repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
pagekite/PyPagekite
|
pagekite/ui/nullui.py
|
1
|
10192
|
"""
This is a basic "Null" user interface which does nothing at all.
"""
##############################################################################
from __future__ import absolute_import
from __future__ import division
LICENSE = """\
This file is part of pagekite.py.
Copyright 2010-2020, the Beanstalks Project ehf. and Bjarni Runar Einarsson
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU Affero General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see: <http://www.gnu.org/licenses/>
"""
##############################################################################
import sys
from pagekite.compat import *
from pagekite.common import *
import pagekite.logging as logging
class NullUi(object):
"""This is a UI that always returns default values or raises errors."""
DAEMON_FRIENDLY = True
ALLOWS_INPUT = False
WANTS_STDERR = False
REJECTED_REASONS = {
'quota': 'You are out of quota',
'nodays': 'Your subscription has expired',
'noquota': 'You are out of quota',
'noconns': 'You are flying too many kites',
'unauthorized': 'Invalid account or shared secret'
}
def __init__(self, welcome=None, wfile=sys.stderr, rfile=sys.stdin):
if sys.platform[:3] in ('win', 'os2'):
self.CLEAR = '\n\n%s\n\n' % ('=' * 79)
self.NORM = self.WHITE = self.GREY = self.GREEN = self.YELLOW = ''
self.BLUE = self.RED = self.MAGENTA = self.CYAN = ''
else:
self.CLEAR = '\033[H\033[J'
self.NORM = '\033[0m'
self.WHITE = '\033[1m'
self.GREY = '\033[0m' #'\033[30;1m'
self.RED = '\033[31;1m'
self.GREEN = '\033[32;1m'
self.YELLOW = '\033[33;1m'
self.BLUE = '\033[34;1m'
self.MAGENTA = '\033[35;1m'
self.CYAN = '\033[36;1m'
self.wfile = wfile
self.rfile = rfile
self.welcome = welcome
if hasattr(self.wfile, 'buffer'):
self.wfile = self.wfile.buffer
self.Reset()
self.Splash()
def write(self, data):
self.wfile.write(b(data))
self.wfile.flush()
def Reset(self):
self.in_wizard = False
self.wizard_tell = None
self.last_tick = 0
self.notify_history = {}
self.status_tag = ''
self.status_col = self.NORM
self.status_msg = ''
self.tries = 200
self.server_info = None
def Splash(self): pass
def Welcome(self): pass
def StartWizard(self, title): pass
def EndWizard(self, quietly=False): pass
def Spacer(self): pass
def Browse(self, url):
import webbrowser
self.Tell(['Opening %s in your browser...' % url])
webbrowser.open(url)
def DefaultOrFail(self, question, default):
if default is not None: return default
raise ConfigError('Unanswerable question: %s' % question)
def AskLogin(self, question, default=None, email=None,
wizard_hint=False, image=None, back=None):
return self.DefaultOrFail(question, default)
def AskEmail(self, question, default=None, pre=None,
wizard_hint=False, image=None, back=None):
return self.DefaultOrFail(question, default)
def AskYesNo(self, question, default=None, pre=None, yes='Yes', no='No',
wizard_hint=False, image=None, back=None):
return self.DefaultOrFail(question, default)
def AskQuestion(self, question, pre=[], default=None, prompt=None,
wizard_hint=False, image=None, back=None):
return self.DefaultOrFail(question, default)
def AskKiteName(self, domains, question, pre=[], default=None,
wizard_hint=False, image=None, back=None):
return self.DefaultOrFail(question, default)
def AskMultipleChoice(self, choices, question, pre=[], default=None,
wizard_hint=False, image=None, back=None):
return self.DefaultOrFail(question, default)
def AskBackends(self, kitename, protos, ports, rawports, question, pre=[],
default=None, wizard_hint=False, image=None, back=None):
return self.DefaultOrFail(question, default)
def Working(self, message): pass
def Tell(self, lines, error=False, back=None):
if error:
logging.LogError(' '.join(lines))
raise ConfigError(' '.join(lines))
else:
logging.Log([('message', ' '.join(lines))])
return True
def Notify(self, message, prefix=' ',
popup=False, color=None, now=None, alignright=''):
if popup: logging.Log([('info', '%s%s%s' % (message,
alignright and ' ' or '',
alignright))])
def NotifyMOTD(self, frontend, message):
pass
def NotifyKiteRejected(self, proto, domain, reason, crit=False):
if reason in self.REJECTED_REASONS:
reason = self.REJECTED_REASONS[reason]
self.Notify('REJECTED: %s:%s (%s)' % (proto, domain, reason),
prefix='!', color=(crit and self.RED or self.YELLOW))
def NotifyList(self, prefix, items, color):
items = items[:]
while items:
show = []
while items and len(prefix) + len(' '.join(show)) < 65:
show.append(items.pop(0))
self.Notify(' - %s: %s' % (prefix, ' '.join(show)), color=color)
def NotifyServer(self, obj, server_info):
self.server_info = server_info
self.Notify(
'Connecting to front-end relay %s ...' % server_info[obj.S_NAME],
color=self.GREY)
self.Notify(
' - Relay supports %d protocols on %d public ports.'
% (len(server_info[obj.S_PROTOS]), len(server_info[obj.S_PORTS])),
color=self.GREY)
if 'raw' in server_info[obj.S_PROTOS]:
self.Notify(
' - Raw TCP/IP (HTTP proxied) kites are available.',
color=self.GREY)
self.Notify(
' - To enable more logging, add option: --logfile=/path/to/logfile',
color=self.GREY)
def NotifyQuota(self, quota, q_days, q_conns):
q, qMB = [], float(quota) / 1024 # Float division
if qMB < 1024:
q.append('%.2f MB' % qMB)
if q_days is not None and q_days < 400:
q.append('%d days' % q_days)
if q_conns is not None and q_conns < 10:
q.append('%s tunnels' % q_conns)
if not q:
q = ['plenty of time and bandwidth']
self.Notify('Quota: You have %s left.' % ', '.join(q),
prefix=(int(quota) < qMB) and '!' or ' ',
color=self.MAGENTA)
def NotifyIPsPerSec(self, ips, secs):
self.Notify(
'Abuse/DDOS protection: Relaying traffic for up to %d clients per %ds.'
% (ips, secs),
prefix=' ',
color=self.MAGENTA)
def NotifyFlyingFE(self, proto, port, domain, be=None):
self.Notify(('Flying: %s://%s%s/'
) % (proto, domain, port and ':'+port or ''),
prefix='~<>', color=self.CYAN)
def StartListingBackEnds(self): pass
def EndListingBackEnds(self): pass
def NotifyBE(self, bid, be, has_ssl, dpaths,
is_builtin=False, fingerprint=None):
domain, port, proto = be[BE_DOMAIN], be[BE_PORT], be[BE_PROTO]
prox = (proto == 'raw') and ' (HTTP proxied)' or ''
if proto == 'raw' and port in ('22', 22): proto = 'ssh'
if has_ssl and proto == 'http':
proto = 'https'
url = '%s://%s%s' % (proto, domain, port and (':%s' % port) or '')
if be[BE_STATUS] == BE_STATUS_UNKNOWN: return
if be[BE_STATUS] & BE_STATUS_OK:
if be[BE_STATUS] & BE_STATUS_ERR_ANY:
status = 'Trying'
color = self.YELLOW
prefix = ' '
else:
status = 'Flying'
color = self.CYAN
prefix = '~<>'
else:
return
if is_builtin:
backend = 'builtin HTTPD'
else:
backend = '%s:%s' % (be[BE_BHOST], be[BE_BPORT])
self.Notify(('%s %s as %s/%s'
) % (status, backend, url, prox),
prefix=prefix, color=color)
if status == 'Flying':
for dp in sorted(dpaths.keys()):
self.Notify(' - %s%s' % (url, dp), color=self.BLUE)
if fingerprint and proto.startswith('https'):
self.Notify(' - Fingerprint=%s' % fingerprint,
color=self.WHITE)
self.Notify((' IMPORTANT: For maximum security, use a secure channel'
' to inform your'),
color=self.YELLOW)
self.Notify(' guests what fingerprint to expect.',
color=self.YELLOW)
def Status(self, tag, message=None, color=None): pass
def ExplainError(self, error, title, subject=None):
if error == 'pleaselogin':
self.Tell([title, '', 'You already have an account. Log in to continue.'
], error=True)
elif error == 'email':
self.Tell([title, '', 'Invalid e-mail address. Please try again?'
], error=True)
elif error == 'honey':
self.Tell([title, '', 'Hmm. Somehow, you triggered the spam-filter.'
], error=True)
elif error in ('domaintaken', 'domain', 'subdomain'):
self.Tell([title, '',
'Sorry, that domain (%s) is unavailable.' % subject,
'',
'If you registered it already, perhaps you need to log on with',
'a different e-mail address?',
''
], error=True)
elif error == 'checkfailed':
self.Tell([title, '',
'That domain (%s) is not correctly set up.' % subject
], error=True)
elif error == 'network':
self.Tell([title, '',
'There was a problem communicating with %s.' % subject, '',
'Please verify that you have a working'
' Internet connection and try again!'
], error=True)
else:
self.Tell([title, 'Error code: %s' % error, 'Try again later?'
], error=True)
|
agpl-3.0
| 2,314,659,761,549,735,400 | 34.512195 | 81 | 0.586637 | false |
scholer/cadnano2.5
|
cadnano/strand/insertioncmd.py
|
2
|
5135
|
# -*- coding: utf-8 -*-
from cadnano.proxies.cnproxy import UndoCommand
from cadnano.decorators.insertion import Insertion
from cadnano.cntypes import (
StrandT
)
class AddInsertionCommand(UndoCommand):
def __init__(self, strand: StrandT, idx: int, length: int):
super(AddInsertionCommand, self).__init__("add insertion")
self._strand = strand
id_num = strand.idNum()
self._insertions = strand.part().insertions()[id_num]
self._idx = idx
self._length = length
self._insertion = Insertion(idx, length)
self._comp_strand = \
strand.strandSet().complementStrandSet().getStrand(idx)
# end def
def redo(self):
strand = self._strand
c_strand = self._comp_strand
inst = self._insertion
self._insertions[self._idx] = inst
strand.oligo()._incrementLength(inst.length(), emit_signals=True)
strand.strandInsertionAddedSignal.emit(strand, inst)
if c_strand:
c_strand.oligo()._incrementLength(inst.length(), emit_signals=True)
c_strand.strandInsertionAddedSignal.emit(c_strand, inst)
# end def
def undo(self):
strand = self._strand
c_strand = self._comp_strand
inst = self._insertion
strand.oligo()._decrementLength(inst.length(), emit_signals=True)
if c_strand:
c_strand.oligo()._decrementLength(inst.length(), emit_signals=True)
idx = self._idx
del self._insertions[idx]
strand.strandInsertionRemovedSignal.emit(strand, idx)
if c_strand:
c_strand.strandInsertionRemovedSignal.emit(c_strand, idx)
# end def
# end class
class RemoveInsertionCommand(UndoCommand):
def __init__(self, strand, idx):
super(RemoveInsertionCommand, self).__init__("remove insertion")
self._strand = strand
self._idx = idx
id_num = strand.idNum()
self._insertions = strand.part().insertions()[id_num]
self._insertion = self._insertions[idx]
self._comp_strand = \
strand.strandSet().complementStrandSet().getStrand(idx)
# end def
def redo(self):
strand = self._strand
c_strand = self._comp_strand
inst = self._insertion
strand.oligo()._decrementLength(inst.length(), emit_signals=True)
if c_strand:
c_strand.oligo()._decrementLength(inst.length(), emit_signals=True)
idx = self._idx
del self._insertions[idx]
strand.strandInsertionRemovedSignal.emit(strand, idx)
if c_strand:
c_strand.strandInsertionRemovedSignal.emit(c_strand, idx)
# end def
def undo(self):
strand = self._strand
c_strand = self._comp_strand
inst = self._insertion
strand.oligo()._incrementLength(inst.length(), emit_signals=True)
self._insertions[self._idx] = inst
strand.strandInsertionAddedSignal.emit(strand, inst)
if c_strand:
c_strand.oligo()._incrementLength(inst.length(), emit_signals=True)
c_strand.strandInsertionAddedSignal.emit(c_strand, inst)
# end def
# end class
class ChangeInsertionCommand(UndoCommand):
"""
Changes the length of an insertion to a non-zero value
the caller of this needs to handle the case where a zero length
is required and call RemoveInsertionCommand
"""
def __init__(self, strand, idx, new_length):
super(ChangeInsertionCommand, self).__init__("change insertion")
self._strand = strand
id_num = strand.idNum()
self._insertions = strand.part().insertions()[id_num]
self._idx = idx
self._new_length = new_length
self._old_length = self._insertions[idx].length()
self._comp_strand = \
strand.strandSet().complementStrandSet().getStrand(idx)
# end def
def redo(self):
strand = self._strand
c_strand = self._comp_strand
inst = self._insertions[self._idx]
inst.setLength(self._new_length, emit_signals=True)
strand.oligo()._incrementLength(self._new_length - self._old_length,
emit_signals=True)
strand.strandInsertionChangedSignal.emit(strand, inst)
if c_strand:
c_strand.oligo()._incrementLength(
self._new_length - self._old_length,
emit_signals=True)
c_strand.strandInsertionChangedSignal.emit(c_strand, inst)
# end def
def undo(self):
strand = self._strand
c_strand = self._comp_strand
inst = self._insertions[self._idx]
inst.setLength(self._old_length)
strand.oligo()._decrementLength(self._new_length - self._old_length,
emit_signals=True)
strand.strandInsertionChangedSignal.emit(strand, inst)
if c_strand:
c_strand.oligo()._decrementLength(
self._new_length - self._old_length,
emit_signals=True)
c_strand.strandInsertionChangedSignal.emit(c_strand, inst)
# end def
# end class
|
mit
| -6,313,271,576,578,294,000 | 36.481752 | 79 | 0.614216 | false |
stevenwudi/Kernelized_Correlation_Filter
|
OBT_50_RNN_train.py
|
1
|
6825
|
"""
author: DI WU
stevenwudi@gmail.com
"""
import getopt
import sys
# some configurations files for OBT experiments, originally, I would never do that this way of importing,
# it's simple way too ugly
from config import *
from scripts import *
from KCF_CNN_RNN import KCFTracker
def main(argv):
trackers = [KCFTracker(feature_type='cnn', load_model=False)]
#evalTypes = ['OPE', 'SRE', 'TRE']
evalTypes = ['OPE']
loadSeqs = 'TB50'
try:
opts, args = getopt.getopt(argv, "ht:e:s:", ["tracker=", "evaltype=", "sequence="])
except getopt.GetoptError:
print 'usage : run_trackers.py -t <trackers> -s <sequences>' + '-e <evaltypes>'
sys.exit(1)
for opt, arg in opts:
if opt == '-h':
print 'usage : run_trackers.py -t <trackers> -s <sequences>' + '-e <evaltypes>'
sys.exit(0)
elif opt in ("-t", "--tracker"):
trackers = [x.strip() for x in arg.split(',')]
# trackers = [arg]
elif opt in ("-s", "--sequence"):
loadSeqs = arg
if loadSeqs != 'All' and loadSeqs != 'all' and \
loadSeqs != 'tb50' and loadSeqs != 'tb100' and \
loadSeqs != 'cvpr13':
loadSeqs = [x.strip() for x in arg.split(',')]
elif opt in ("-e", "--evaltype"):
evalTypes = [x.strip() for x in arg.split(',')]
if SETUP_SEQ:
print 'Setup sequences ...'
butil.setup_seqs(loadSeqs)
print 'Starting benchmark for {0} trackers, evalTypes : {1}'.format(
len(trackers), evalTypes)
for evalType in evalTypes:
seqNames = butil.get_seq_names(loadSeqs)
seqs = butil.load_seq_configs(seqNames)
######################################################################
trackerResults = run_trackers(trackers, seqs, evalType, shiftTypeSet)
######################################################################
for tracker in trackers:
results = trackerResults[tracker]
if len(results) > 0:
######################################################################
evalResults, attrList = butil.calc_result(tracker, seqs, results, evalType)
######################################################################
print "Result of Sequences\t -- '{0}'".format(tracker.name)
for seq in seqs:
try:
print '\t\'{0}\'{1}'.format(
seq.name, " " * (12 - len(seq.name))),
print "\taveCoverage : {0:.3f}%".format(
sum(seq.aveCoverage) / len(seq.aveCoverage) * 100),
print "\taveErrCenter : {0:.3f}".format(
sum(seq.aveErrCenter) / len(seq.aveErrCenter))
except:
print '\t\'{0}\' ERROR!!'.format(seq.name)
print "Result of attributes\t -- '{0}'".format(tracker.name)
for attr in attrList:
print "\t\'{0}\'".format(attr.name),
print "\toverlap : {0:02.1f}%".format(attr.overlap),
print "\tfailures : {0:.1f}".format(attr.error)
if SAVE_RESULT:
butil.save_scores(attrList)
def run_trackers(trackers, seqs, evalType, shiftTypeSet):
tmpRes_path = RESULT_SRC.format('tmp/{0}/'.format(evalType))
if not os.path.exists(tmpRes_path):
os.makedirs(tmpRes_path)
numSeq = len(seqs)
trackerResults = dict((t, list()) for t in trackers)
##################################################
# chose sequence to run from below
##################################################
# we also collect data fro training here
import h5py
f = h5py.File("OBT50_scale_correct.hdf5", "w")
X_train = f.create_dataset("x_train", (26922, 60, 40), dtype='float', chunks=True)
y_train = f.create_dataset("y_train", (26922, 4), dtype='float', chunks=True)
count = 0
for idxSeq in range(0, numSeq):
s = seqs[idxSeq]
subSeqs, subAnno = butil.get_sub_seqs(s, 20.0, evalType)
for idxTrk in range(len(trackers)):
t = trackers[idxTrk]
if not OVERWRITE_RESULT:
trk_src = os.path.join(RESULT_SRC.format(evalType), t.name)
result_src = os.path.join(trk_src, s.name + '.json')
if os.path.exists(result_src):
seqResults = butil.load_seq_result(evalType, t, s.name)
trackerResults[t].append(seqResults)
continue
seqLen = len(subSeqs)
for idx in range(seqLen):
print '{0}_{1}, {2}_{3}:{4}/{5} - {6}'.format(
idxTrk + 1, t.feature_type, idxSeq + 1, s.name, idx + 1, seqLen, evalType)
rp = tmpRes_path + '_' + t.feature_type + '_' + str(idx + 1) + '/'
if SAVE_IMAGE and not os.path.exists(rp):
os.makedirs(rp)
subS = subSeqs[idx]
subS.name = s.name + '_' + str(idx)
####################
X_train, y_train, count = run_KCF_variant(t, subS, X_train, y_train, count)
####################
print("count %d"%count)
####################
X_train.resize(count - 1, axis=0)
y_train.resize(count - 1, axis=0)
f.close()
return trackerResults
def run_KCF_variant(tracker, seq, X_train, y_train, count):
from keras.preprocessing import image
start_time = time.time()
for frame in range(seq.endFrame - seq.startFrame):
if frame > 0:
img_rgb = img_rgb_next.copy()
else:
image_filename = seq.s_frames[frame]
image_path = os.path.join(seq.path, image_filename)
img_rgb = image.load_img(image_path)
img_rgb = image.img_to_array(img_rgb)
image_filename_next = seq.s_frames[frame+1]
image_path_next = os.path.join(seq.path, image_filename_next)
img_rgb_next = image.load_img(image_path_next)
img_rgb_next = image.img_to_array(img_rgb_next)
X_train, y_train, count = tracker.train_cnn(frame,
img_rgb,
seq.gtRect[frame],
img_rgb_next,
seq.gtRect[frame+1],
X_train, y_train, count
)
total_time = time.time() - start_time
tracker.fps = len(range(seq.endFrame - seq.startFrame)) / total_time
print("Frames-per-second:", tracker.fps)
return X_train, y_train, count
if __name__ == "__main__":
main(sys.argv[1:])
|
gpl-3.0
| -8,334,159,504,333,091,000 | 38.680233 | 105 | 0.488205 | false |
lreis2415/SEIMS
|
seims/preprocess/sp_extraction.py
|
1
|
1437
|
"""Extract spatial parameters for soil, landuse, and terrain related.
@author : Liangjun Zhu
@changelog:
- 16-12-07 lj - rewrite for version 2.0
- 17-06-23 lj - reorganize according to pylint and google style
- 18-02-08 lj - compatible with Python3.
"""
from __future__ import absolute_import, unicode_literals
import os
import sys
if os.path.abspath(os.path.join(sys.path[0], '..')) not in sys.path:
sys.path.insert(0, os.path.abspath(os.path.join(sys.path[0], '..')))
from preprocess.sp_landuse import LanduseUtilClass
from preprocess.sp_soil import SoilUtilClass
from preprocess.sp_terrain import TerrainUtilClass
def extract_spatial_parameters(cfg, maindb):
"""Main entrance for spatial parameters extraction."""
# 1. Soil related
SoilUtilClass.parameters_extraction(cfg)
# 2. Landuse/Landcover related
LanduseUtilClass.parameters_extraction(cfg, maindb)
# 3. Terrain related and other spatial parameters
TerrainUtilClass.parameters_extraction(cfg, maindb)
def main():
"""TEST CODE"""
from preprocess.config import parse_ini_configuration
from preprocess.db_mongodb import ConnectMongoDB
seims_cfg = parse_ini_configuration()
client = ConnectMongoDB(seims_cfg.hostname, seims_cfg.port)
conn = client.get_conn()
main_db = conn[seims_cfg.spatial_db]
extract_spatial_parameters(seims_cfg, main_db)
if __name__ == "__main__":
main()
|
gpl-3.0
| -8,365,632,222,240,427,000 | 30.933333 | 72 | 0.713292 | false |
ealter/vim_turing_machine
|
vim_turing_machine/struct.py
|
1
|
1051
|
from collections import namedtuple
from vim_turing_machine.constants import BACKWARDS
from vim_turing_machine.constants import DO_NOT_MOVE
from vim_turing_machine.constants import FORWARDS
from vim_turing_machine.constants import INVALID_STATE_CHARACTERS
from vim_turing_machine.constants import VALID_CHARACTERS
class StateTransition(namedtuple('StateTransition', [
'previous_state',
'previous_character',
'next_state',
'next_character',
'tape_pointer_direction',
])):
def validate(self):
assert self.tape_pointer_direction in (FORWARDS, DO_NOT_MOVE, BACKWARDS)
assert self.previous_character in VALID_CHARACTERS
assert self.next_character in VALID_CHARACTERS
for invalid_char in INVALID_STATE_CHARACTERS:
if invalid_char in self.previous_state:
raise AssertionError('{} is in {}'.format(invalid_char, self.previous_state))
if invalid_char in self.next_state:
raise AssertionError('{} is in {}'.format(invalid_char, self.next_state))
|
mit
| -2,245,830,219,414,987,300 | 39.423077 | 93 | 0.714558 | false |
hdemeyer/king-phisher
|
king_phisher/scrubber.py
|
1
|
3276
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/scrubber.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import tempfile
import xml.etree.ElementTree as ElementTree
import zipfile
def remove_office_metadata(file_name):
"""
Remove all metadata from Microsoft Office 2007+ file types such as docx,
pptx, and xlsx.
:param str file_name: The path to the file whose metadata is to be removed.
"""
ns = {
'cp': 'http://schemas.openxmlformats.org/package/2006/metadata/core-properties',
'dc': 'http://purl.org/dc/elements/1.1/',
'dcterms': 'http://purl.org/dc/terms/',
'dcmitype': 'http://purl.org/dc/dcmitype/',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'
}
for prefix, uri in ns.items():
ElementTree.register_namespace(prefix, uri)
_, file_ext = os.path.splitext(file_name)
tmpfd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name), suffix=file_ext)
os.close(tmpfd)
with zipfile.ZipFile(file_name, 'r') as zin:
with zipfile.ZipFile(tmpname, 'w') as zout:
zout.comment = zin.comment
for item in zin.infolist():
data = zin.read(item.filename)
if item.filename == 'docProps/core.xml':
root = ElementTree.fromstring(data)
root.clear()
data = ElementTree.tostring(root, 'UTF-8')
zout.writestr(item, data)
os.remove(file_name)
os.rename(tmpname, file_name)
def main():
if len(sys.argv) < 2:
print("usage: {0} [path to document]".format(os.path.basename(sys.argv[0])))
return 0
file_path = sys.argv[1]
if not os.path.isfile(file_path):
print('[-] the specified path is not a file')
return 1
if not os.access(file_path, os.R_OK | os.W_OK):
print('[-] insufficient permissions to the specified file')
return 1
remove_office_metadata(file_path)
return 0
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
| 4,907,353,813,057,486,000 | 36.227273 | 83 | 0.724054 | false |
christophmark/bayesloop
|
tests/test_transitionmodels.py
|
1
|
4570
|
#!/usr/bin/env python
from __future__ import print_function, division
import bayesloop as bl
import numpy as np
class TestBuiltin:
def test_static(self):
S = bl.Study()
S.loadData(np.array([1, 2, 3, 4, 5]))
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.Static()
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -10.372209708143769, decimal=5,
err_msg='Erroneous log-evidence value.')
def test_deterministic(self):
S = bl.HyperStudy()
S.loadData(np.array([1, 2, 3, 4, 5]))
def linear(t, a=[1, 2]):
return 0.5 + 0.2*a*t
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.Deterministic(linear, target='rate')
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -9.4050089375418136, decimal=5,
err_msg='Erroneous log-evidence value.')
def test_gaussianrandomwalk(self):
S = bl.Study()
S.loadData(np.array([1, 2, 3, 4, 5]))
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.GaussianRandomWalk('sigma', 0.2, target='rate')
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -10.323144246611964, decimal=5,
err_msg='Erroneous log-evidence value.')
def test_alphastablerandomwalk(self):
S = bl.Study()
S.loadData(np.array([1, 2, 3, 4, 5]))
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.AlphaStableRandomWalk('c', 0.2, 'alpha', 1.5, target='rate')
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -10.122384638661309, decimal=5,
err_msg='Erroneous log-evidence value.')
def test_changepoint(self):
S = bl.Study()
S.loadData(np.array([1, 2, 3, 4, 5]))
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.ChangePoint('t_change', 2)
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -12.894336092378385, decimal=5,
err_msg='Erroneous log-evidence value.')
def test_regimeswitch(self):
S = bl.Study()
S.loadData(np.array([1, 2, 3, 4, 5]))
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.RegimeSwitch('p_min', -3)
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -10.372866559561402, decimal=5,
err_msg='Erroneous log-evidence value.')
def test_independent(self):
S = bl.Study()
S.loadData(np.array([1, 2, 3, 4, 5]))
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.Independent()
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -11.087360077190617, decimal=5,
err_msg='Erroneous log-evidence value.')
def test_notequal(self):
S = bl.Study()
S.loadData(np.array([1, 2, 3, 4, 5]))
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.NotEqual('p_min', -3)
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -10.569099863134156, decimal=5,
err_msg='Erroneous log-evidence value.')
class TestNested:
def test_nested(self):
S = bl.Study()
S.loadData(np.array([1, 2, 3, 4, 5]))
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.SerialTransitionModel(
bl.tm.Static(),
bl.tm.ChangePoint('t_change', 1),
bl.tm.CombinedTransitionModel(
bl.tm.GaussianRandomWalk('sigma', 0.2, target='rate'),
bl.tm.RegimeSwitch('p_min', -3)
),
bl.tm.BreakPoint('t_break', 3),
bl.tm.Independent()
)
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -13.269918024215237, decimal=5,
err_msg='Erroneous log-evidence value.')
|
mit
| 5,765,564,429,500,397,000 | 30.088435 | 85 | 0.521444 | false |
dicksontsai/sudoku
|
sudoku_checker.py
|
1
|
1170
|
import io, sys
class Sudoku():
def __init__(self, textfile):
puzzle = open(textfile).readlines()
class Position():
def __init__(self, row, column, box):
self.row = row
self.column = column
self.box = box
if __name__ == '__main__':
puzzle = open(sys.argv[1]).readlines()
rows = []
columns = [set() for i in range(9)]
boxes = [[set() for i in range(3)] for j in range(3)]
i = 0
for line in puzzle:
data = line.split()
data = [int(x) for x in data]
rows.append(set(data))
for j in range(9):
columns[j].add(data[j])
boxes[i//3][j // 3].add(data[j])
i += 1
row_results = [len(row_set) == 9 for row_set in rows]
column_results = [len(col_set) == 9 for col_set in columns]
if not all(row_results):
print("False, row")
sys.exit(0)
elif not all(column_results):
print("False, col")
sys.exit(0)
for box_set in boxes:
box_results = [len(box) == 9 for box in box_set]
if not all(box_results):
print(False)
sys.exit(0)
print(True)
sys.exit(0)
|
gpl-3.0
| -5,252,328,654,009,277,000 | 23.375 | 63 | 0.517949 | false |
arthur-wsw/pinax-ratings
|
runtests.py
|
1
|
1258
|
#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"pinax.ratings",
"pinax.ratings.tests"
],
MIDDLEWARE_CLASSES=[],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
SITE_ID=1,
ROOT_URLCONF="pinax.ratings.tests.urls",
SECRET_KEY="notasecret",
)
def runtests(*test_args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
try:
from django.test.runner import DiscoverRunner
runner_class = DiscoverRunner
test_args = ["pinax.ratings.tests"]
except ImportError:
from django.test.simple import DjangoTestSuiteRunner
runner_class = DjangoTestSuiteRunner
test_args = ["tests"]
failures = runner_class(verbosity=1, interactive=True, failfast=False).run_tests(test_args)
sys.exit(failures)
if __name__ == "__main__":
runtests(*sys.argv[1:])
|
mit
| -6,897,648,623,100,419,000 | 22.296296 | 95 | 0.620032 | false |
sharmaeklavya2/zulip
|
zerver/lib/ccache.py
|
1
|
7539
|
from __future__ import absolute_import
from typing import Any, Dict, Optional, Text
# This file is adapted from samples/shellinabox/ssh-krb-wrapper in
# https://github.com/davidben/webathena, which has the following
# license:
#
# Copyright (c) 2013 David Benjamin and Alan Huang
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import base64
import struct
import six
# Some DER encoding stuff. Bleh. This is because the ccache contains a
# DER-encoded krb5 Ticket structure, whereas Webathena deserializes
# into the various fields. Re-encoding in the client would be easy as
# there is already an ASN.1 implementation, but in the interest of
# limiting MIT Kerberos's exposure to malformed ccaches, encode it
# ourselves. To that end, here's the laziest DER encoder ever.
def der_encode_length(length):
# type: (int) -> str
if length <= 127:
return chr(length)
out = ""
while length > 0:
out = chr(length & 0xff) + out
length >>= 8
out = chr(len(out) | 0x80) + out
return out
def der_encode_tlv(tag, value):
# type: (int, str) -> str
return chr(tag) + der_encode_length(len(value)) + value
def der_encode_integer_value(val):
# type: (int) -> str
if not isinstance(val, six.integer_types):
raise TypeError("int")
# base 256, MSB first, two's complement, minimum number of octets
# necessary. This has a number of annoying edge cases:
# * 0 and -1 are 0x00 and 0xFF, not the empty string.
# * 255 is 0x00 0xFF, not 0xFF
# * -256 is 0xFF 0x00, not 0x00
# Special-case to avoid an empty encoding.
if val == 0:
return "\x00"
sign = 0 # What you would get if you sign-extended the current high bit.
out = ""
# We can stop once sign-extension matches the remaining value.
while val != sign:
byte = val & 0xff
out = chr(byte) + out
sign = -1 if byte & 0x80 == 0x80 else 0
val >>= 8
return out
def der_encode_integer(val):
# type: (int) -> str
return der_encode_tlv(0x02, der_encode_integer_value(val))
def der_encode_int32(val):
# type: (int) -> str
if val < -2147483648 or val > 2147483647:
raise ValueError("Bad value")
return der_encode_integer(val)
def der_encode_uint32(val):
# type: (int) -> str
if val < 0 or val > 4294967295:
raise ValueError("Bad value")
return der_encode_integer(val)
def der_encode_string(val):
# type: (Text) -> str
if not isinstance(val, Text):
raise TypeError("unicode")
return der_encode_tlv(0x1b, val.encode("utf-8"))
def der_encode_octet_string(val):
# type: (str) -> str
if not isinstance(val, str):
raise TypeError("str")
return der_encode_tlv(0x04, val)
def der_encode_sequence(tlvs, tagged=True):
# type: (List[str], Optional[bool]) -> str
body = []
for i, tlv in enumerate(tlvs):
# Missing optional elements represented as None.
if not tlv:
continue
if tagged:
# Assume kerberos-style explicit tagging of components.
tlv = der_encode_tlv(0xa0 | i, tlv)
body.append(tlv)
return der_encode_tlv(0x30, "".join(body))
def der_encode_ticket(tkt):
# type: (Dict[str, Any]) -> str
return der_encode_tlv(
0x61, # Ticket
der_encode_sequence(
[der_encode_integer(5), # tktVno
der_encode_string(tkt["realm"]),
der_encode_sequence( # PrincipalName
[der_encode_int32(tkt["sname"]["nameType"]),
der_encode_sequence([der_encode_string(c)
for c in tkt["sname"]["nameString"]],
tagged=False)]),
der_encode_sequence( # EncryptedData
[der_encode_int32(tkt["encPart"]["etype"]),
(der_encode_uint32(tkt["encPart"]["kvno"])
if "kvno" in tkt["encPart"]
else None),
der_encode_octet_string(
base64.b64decode(tkt["encPart"]["cipher"]))])]))
# Kerberos ccache writing code. Using format documentation from here:
# http://www.gnu.org/software/shishi/manual/html_node/The-Credential-Cache-Binary-File-Format.html
def ccache_counted_octet_string(data):
# type: (str) -> bytes
if not isinstance(data, str):
raise TypeError("str")
return struct.pack("!I", len(data)) + data
def ccache_principal(name, realm):
# type: (Dict[str, str], str) -> str
header = struct.pack("!II", name["nameType"], len(name["nameString"]))
return (header + ccache_counted_octet_string(realm.encode("utf-8")) +
"".join(ccache_counted_octet_string(c.encode("utf-8"))
for c in name["nameString"]))
def ccache_key(key):
# type: (Dict[str, str]) -> bytes
return (struct.pack("!H", key["keytype"]) +
ccache_counted_octet_string(base64.b64decode(key["keyvalue"])))
def flags_to_uint32(flags):
# type: (List[str]) -> int
ret = 0
for i, v in enumerate(flags):
if v:
ret |= 1 << (31 - i)
return ret
def ccache_credential(cred):
# type: (Dict[str, Any]) -> str
out = ccache_principal(cred["cname"], cred["crealm"])
out += ccache_principal(cred["sname"], cred["srealm"])
out += ccache_key(cred["key"])
out += struct.pack("!IIII",
cred["authtime"] // 1000,
cred.get("starttime", cred["authtime"]) // 1000,
cred["endtime"] // 1000,
cred.get("renewTill", 0) // 1000)
out += struct.pack("!B", 0)
out += struct.pack("!I", flags_to_uint32(cred["flags"]))
# TODO: Care about addrs or authdata? Former is "caddr" key.
out += struct.pack("!II", 0, 0)
out += ccache_counted_octet_string(der_encode_ticket(cred["ticket"]))
# No second_ticket.
out += ccache_counted_octet_string("")
return out
def make_ccache(cred):
# type: (Dict[str, Any]) -> str
# Do we need a DeltaTime header? The ccache I get just puts zero
# in there, so do the same.
out = struct.pack("!HHHHII",
0x0504, # file_format_version
12, # headerlen
1, # tag (DeltaTime)
8, # taglen (two uint32_ts)
0, 0, # time_offset / usec_offset
)
out += ccache_principal(cred["cname"], cred["crealm"])
out += ccache_credential(cred)
return out
|
apache-2.0
| -5,120,918,209,832,883,000 | 37.075758 | 98 | 0.613477 | false |
qxcv/comp2560
|
project/tests/test_pairwise_relations.py
|
1
|
2409
|
import numpy as np
from datasets import Joints
from pairwise_relations import from_dataset
def generate_fake_locations(num, means, stddev=5):
"""Generate a matrix with four rows (one for each "point") and three
columns (x-coord, y-coord and visibility). Means is a 3x2 matrix giving
mean locations for each point."""
per_joint = []
for joint_mean in means:
locations = np.random.multivariate_normal(
joint_mean, stddev * np.eye(2), num
)
with_visibility = np.append(locations, np.ones((num, 1)), axis=1)
per_joint.append(with_visibility)
warped_array = np.array(per_joint)
# Now we need to swap the first and second dimensions
return warped_array.transpose((1, 0, 2))
def test_clustering():
"""Test learning of clusters for joint types."""
first_means = np.asarray([
(10, 70),
(58, 94),
(66, 58),
(95, 62)
])
second_means = np.asarray([
(88, 12),
(56, 15),
(25, 21),
(24, 89)
])
fake_locations = np.concatenate([
generate_fake_locations(100, first_means),
generate_fake_locations(100, second_means),
], axis=0)
np.random.shuffle(fake_locations)
fake_pairs = [
(0, 1),
(1, 2),
(2, 3)
]
fake_joints = Joints(fake_locations, fake_pairs)
# Make two clusters for each relationship type. Yes, passing in zeros as
# your scale is stupid, and poor testing practice.
centers = from_dataset(fake_joints, 2, np.zeros(len(fake_locations)), 1)
assert centers.ndim == 3
# Three joints, two clusters per joint, two coordinates (i.e. x, y) per
# cluster
assert centers.shape == (3, 2, 2)
for idx, pair in enumerate(fake_pairs):
first_idx, second_idx = pair
first_mean = first_means[second_idx] - first_means[first_idx]
second_mean = second_means[second_idx] - second_means[first_idx]
found_means = centers[idx]
first_dists = np.linalg.norm(found_means - first_mean, axis=1)
second_dists = np.linalg.norm(found_means - second_mean, axis=1)
# Make sure that each of our specified means are within Euclidean
# distance 1 of at least one found cluster
first_within = first_dists < 1
assert first_within.any()
second_within = second_dists < 1
assert second_within.any()
|
apache-2.0
| -8,136,137,840,983,608,000 | 33.414286 | 76 | 0.616853 | false |
petterreinholdtsen/frikanalen
|
fkbeta/fk/models.py
|
1
|
20257
|
# Copyright (c) 2012-2013 Benjamin Bruheim <grolgh@gmail.com>
# This file is covered by the LGPLv3 or later, read COPYING for details.
import datetime
import os
import uuid
import pytz
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_save
from django.utils import timezone
from django.utils.timezone import utc
from django.utils.translation import ugettext as _
from model_utils import Choices
from model_utils.models import TimeStampedModel
"""
Models for the Frikanalen database.
A lot of the models are business-specific for Frikanalen. There's also a
quite a few fields that are related to our legacy systems, but these are
likely to be removed when we're confident that data is properly
transferred.
An empty database should populate at least FileFormat and Categories with
some content before it can be properly used.
Fields that are commented out are suggestions for future fields. If they
turn out to be silly they should obviously be removed.
"""
class Organization(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
description = models.TextField(blank=True, max_length=255)
members = models.ManyToManyField(User) # User ownership of an organization
fkmember = models.BooleanField(default=False)
orgnr = models.CharField(blank=True, max_length=255)
homepage = models.CharField('Link back to the organisation home page.',
blank=True, null=True, max_length=255)
# No such concept yet. Every member can add members.
# owner = models.ForeignKey(User)
# Videos to feature on their frontpage, incl other members
# featured_videos = models.ManyToManyField("Video")
# twitter_username = models.CharField(null=True,max_length=255)
# twitter_tags = models.CharField(null=True,max_length=255)
# To be copied into every video they create
# homepage = models.CharField(blank=True, max_length=255)
# categories = models.ManyToManyField(Category)
class Meta:
db_table = 'Organization'
ordering = ('name', '-id')
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('vod-org-video-list', kwargs={'orgid': self.id})
class FileFormat(models.Model):
id = models.AutoField(primary_key=True)
description = models.TextField(
unique=True, max_length=255, null=True, blank=True)
fsname = models.CharField(max_length=20)
vod_publish = models.BooleanField('Present video format to video on demand?',
default=False)
mime_type = models.CharField(max_length=256,
null=True, blank=True)
# httpprefix = models.CharField(max_length=200)
# metadata framerate, resolution, etc?
class Meta:
db_table = 'ItemType'
verbose_name = 'video file format'
verbose_name_plural = 'video file formats'
ordering = ('fsname', '-id')
def __str__(self):
return self.fsname
class VideoFile(models.Model):
id = models.AutoField(primary_key=True)
# uploader = models.ForeignKey(User) # Not migrated
video = models.ForeignKey("Video")
format = models.ForeignKey("FileFormat")
filename = models.CharField(max_length=256)
old_filename = models.CharField(max_length=256, default='', blank=True)
# source = video = models.ForeignKey("VideoFile")
integrated_lufs = models.FloatField(
'Integrated LUFS of file defined in ITU R.128',
blank=True, null=True)
truepeak_lufs = models.FloatField(
'True peak LUFS of file defined in ITU R.128',
blank=True, null=True)
created_time = models.DateTimeField(
auto_now_add=True, null=True,
help_text='Time the video file was created')
# metadata frames, width, height, framerate? mlt profile name?
# edl for in/out?
class Meta:
verbose_name = 'video file'
verbose_name_plural = 'video files'
ordering = ('-video_id', '-id',)
def __str__(self):
return "%s version of %s" % (self.format.fsname, self.video.name)
def location(self, relative=False):
filename = os.path.basename(self.filename)
path = '/'.join((str(self.video.id), self.format.fsname, filename))
if relative:
return path
else:
return '/'.join((settings.FK_MEDIA_ROOT, path))
class Category(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=255)
desc = models.CharField(max_length=255, blank=True)
class Meta:
db_table = 'Category'
verbose_name = 'video category'
verbose_name_plural = 'video categories'
ordering = ('name', '-id')
def __str__(self):
return self.name
class VideoManager(models.Manager):
def public(self):
return (super(VideoManager, self)
.get_queryset()
.filter(publish_on_web=True, proper_import=True))
class Video(models.Model):
id = models.AutoField(primary_key=True)
# Retire, use description instead
header = models.TextField(blank=True, null=True, max_length=2048)
name = models.CharField(max_length=255)
description = models.CharField(blank=True, null=True, max_length=2048)
# Code for editors' internal use
# production_code = models.CharField(null=True,max_length=255)
categories = models.ManyToManyField(Category)
editor = models.ForeignKey(User)
has_tono_records = models.BooleanField(default=False)
is_filler = models.BooleanField('Play automatically?',
help_text = 'You still have the editorial responsibility. Only affect videos from members.',
default=False) # Find a better name?
publish_on_web = models.BooleanField(default=True)
# disabled = models.BooleanField() # Not migrated
# uploader = models.ForeignKey(User)
# Planned duration in milliseconds, probably not going to be used
# planned_duration = models.IntegerField()
# Time when it is to be published on web
# published_time = models.DateTimeField()
proper_import = models.BooleanField(default=False)
played_count_web = models.IntegerField(
default=0, help_text='Number of times it has been played')
created_time = models.DateTimeField(
auto_now_add=True, null=True,
help_text='Time the program record was created')
updated_time = models.DateTimeField(
auto_now=True, null=True,
help_text='Time the program record has been updated')
uploaded_time = models.DateTimeField(
blank=True, null=True,
help_text='Time the original video for the program was uploaded')
framerate = models.IntegerField(
default=25000,
help_text='Framerate of master video in thousands / second')
organization = models.ForeignKey(
Organization, null=True, help_text='Organization for video')
ref_url = models.CharField(
blank=True, max_length=1024, help_text='URL for reference')
duration = models.DurationField(blank=True, default=datetime.timedelta(0))
upload_token = models.CharField(
blank=True, default='', max_length=32,
help_text='Code for upload')
objects = VideoManager()
class Meta:
db_table = 'Video'
get_latest_by = 'uploaded_time'
ordering = ('-id',)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.id:
self.upload_token = uuid.uuid4().hex
return super(Video, self).save(*args, **kwargs)
def is_public(self):
return self.publish_on_web and self.proper_import
def tags(self):
tags = []
if self.has_tono_records:
tags.append("tono")
if self.publish_on_web:
tags.append("www")
if self.is_filler:
tags.append("filler")
return ', '.join(tags)
def videofiles(self):
videofiles = VideoFile.objects.filter(video=self)
return videofiles
def category_list(self):
categories = self.categories.filter(video=self)
return categories
def schedule(self):
events = Scheduleitem.objects.filter(video=self)
return events
def first_broadcast(self):
events = Scheduleitem.objects.filter(video=self)
if events:
return events[0]
return None
def last_broadcast(self):
events = Scheduleitem.objects.filter(video=self)
if events:
return events[max(0, len(events)-1)]
return None
def videofile_url(self, fsname):
videofile = self.videofile_set.get(format__fsname=fsname)
return videofile.location(relative=True)
def small_thumbnail_url(self):
format = FileFormat.objects.get(fsname="small_thumb")
try:
videofile = VideoFile.objects.get(video=self, format=format)
except ObjectDoesNotExist:
return "/static/default_small_thumbnail.png"
return settings.FK_MEDIA_URLPREFIX+videofile.location(relative=True)
def medium_thumbnail_url(self):
format = FileFormat.objects.get(fsname="medium_thumb")
try:
videofile = VideoFile.objects.get(video=self, format=format)
except ObjectDoesNotExist:
return "/static/default_medium_thumbnail.png"
return settings.FK_MEDIA_URLPREFIX+videofile.location(relative=True)
def large_thumbnail_url(self):
format = FileFormat.objects.get(fsname="large_thumb")
try:
videofile = VideoFile.objects.get(video=self, format=format)
except ObjectDoesNotExist:
return "/static/default_large_thumbnail.png"
return settings.FK_MEDIA_URLPREFIX+videofile.location(relative=True)
def ogv_url(self):
try:
return settings.FK_MEDIA_URLPREFIX + self.videofile_url("theora")
except ObjectDoesNotExist:
return
def vod_files(self):
"""Return a list of video files fit for the video on demand
presentation, with associated MIME type.
[
{
'url: 'https://../.../file.ogv',
'mime_type': 'video/ogg',
},
]
"""
vodfiles = []
for videofile in self.videofiles().filter(format__vod_publish=True):
url = settings.FK_MEDIA_URLPREFIX + videofile.location(relative=True)
vodfiles.append({'url': url, 'mime_type': videofile.format.mime_type})
return vodfiles
def get_absolute_url(self):
return reverse('vod-video-detail', kwargs={'video_id': self.id})
class ScheduleitemManager(models.Manager):
def by_day(self, date=None, days=1, surrounding=False):
if not date:
date = timezone.now().date()
elif hasattr(date, 'date'):
date = date.date()
# Take current date, but make an object at 00:00.
# Then make that an aware datetime so our comparisons
# are correct.
day_start = datetime.datetime.combine(date, datetime.time(0))
startdt = timezone.make_aware(day_start, timezone.get_current_timezone())
enddt = startdt + datetime.timedelta(days=days)
if surrounding:
startdt, enddt = self.expand_to_surrounding(startdt, enddt)
return self.get_queryset().filter(starttime__gte=startdt,
starttime__lte=enddt)
def expand_to_surrounding(self, startdt, enddt):
# Try to find the event before the given date
try:
startdt = (Scheduleitem.objects
.filter(starttime__lte=startdt)
.order_by("-starttime")[0].starttime)
except IndexError:
pass
# Try to find the event after the end date
try:
enddt = (Scheduleitem.objects
.filter(starttime__gte=enddt)
.order_by("starttime")[0].starttime)
except IndexError:
pass
return startdt, enddt
class Scheduleitem(models.Model):
SCHEDULE_REASONS = (
(1, 'Legacy'),
(2, 'Administrative'),
(3, 'User'),
(4, 'Automatic'),
)
id = models.AutoField(primary_key=True)
default_name = models.CharField(max_length=255, blank=True)
video = models.ForeignKey(Video, null=True, blank=True)
schedulereason = models.IntegerField(blank=True, choices=SCHEDULE_REASONS)
starttime = models.DateTimeField()
duration = models.DurationField()
objects = ScheduleitemManager()
"""
def save(self, *args, **kwargs):
self.endtime = self.starttime + timeutils.duration
super(Scheduleitem, self).save(*args, **kwargs)
"""
class Meta:
db_table = 'ScheduleItem'
verbose_name = 'TX schedule entry'
verbose_name_plural = 'TX schedule entries'
ordering = ('-id',)
def __str__(self):
t = self.starttime
s = t.strftime("%Y-%m-%d %H:%M:%S")
# format microsecond to hundreths
s += ".%02i" % (t.microsecond / 10000)
if self.video:
return str(s) + ": " + str(self.video)
else:
return str(s) + ": " + self.default_name
def endtime(self):
if not self.duration:
return self.starttime
return self.starttime + self.duration
class UserProfile(models.Model):
user = models.OneToOneField(User)
phone = models.CharField(
blank=True, max_length=255, default='', null=True)
mailing_address = models.CharField(
blank=True, max_length=512, default='', null=True)
post_code = models.CharField(
blank=True, max_length=255, default='', null=True)
city = models.CharField(
blank=True, max_length=255, default='', null=True)
country = models.CharField(
blank=True, max_length=255, default='', null=True)
legacy_username = models.CharField(
blank=True, max_length=255, default='')
def __str__(self):
return "%s (profile)" % self.user
def create_user_profile(sender, instance, created, **kwargs):
if created:
profile, created = UserProfile.objects.get_or_create(user=instance)
# Create a hook so the profile model is created when a User is.
post_save.connect(create_user_profile, sender=User)
class SchedulePurpose(models.Model):
"""
A block of video files having a similar purpose.
Either an organization and its videos (takes preference) or manually
connected videos.
"""
STRATEGY = Choices('latest', 'random', 'least_scheduled')
TYPE = Choices('videos', 'organization')
name = models.CharField(max_length=100)
type = models.CharField(max_length=32, choices=TYPE)
strategy = models.CharField(max_length=32, choices=STRATEGY)
# You probably need one of these depending on type and strategy
organization = models.ForeignKey(Organization, blank=True, null=True)
direct_videos = models.ManyToManyField(Video, blank=True)
class Meta:
ordering = ('-id',)
def videos_str(self):
return ", ".join([str(x) for x in self.videos_queryset()])
videos_str.short_description = "videos"
videos_str.admin_order_field = "videos"
def videos_queryset(self, max_duration=None):
"""
Get the queryset for the available videos
"""
if self.type == self.TYPE.organization:
qs = self.organization.video_set.all()
elif self.type == self.TYPE.videos:
qs = self.direct_videos.all()
else:
raise Exception("Unhandled type %s" % self.type)
if max_duration:
qs = qs.filter(duration__lte=max_duration)
# Workaround playout not handling broken files correctly
qs = qs.filter(proper_import=True)
return qs
def single_video(self, max_duration=None):
"""
Get a single video based on the settings of this purpose
"""
qs = self.videos_queryset(max_duration)
if self.strategy == self.STRATEGY.latest:
try:
return qs.latest()
except Video.DoesNotExist:
return None
elif self.strategy == self.STRATEGY.random:
# This might be slow, but hopefully few records
return qs.order_by('?').first()
elif self.strategy == self.STRATEGY.least_scheduled:
# Get the video which has been scheduled the least
return (qs.annotate(num_sched=models.Count('scheduleitem'))
.order_by('num_sched').first())
else:
raise Exception("Unhandled strategy %s" % self.strategy)
def __str__(self):
return self.name
class WeeklySlot(models.Model):
DAY_OF_THE_WEEK = (
(0, _('Monday')),
(1, _('Tuesday')),
(2, _('Wednesday')),
(3, _('Thursday')),
(4, _('Friday')),
(5, _('Saturday')),
(6, _('Sunday')),
)
purpose = models.ForeignKey(SchedulePurpose, null=True, blank=True)
day = models.IntegerField(
choices=DAY_OF_THE_WEEK,
)
start_time = models.TimeField()
duration = models.DurationField()
class Meta:
ordering = ('day', 'start_time', 'pk')
@property
def end_time(self):
if not self.duration:
return self.start_time
return self.start_time + self.duration
def next_date(self, from_date=None):
if not from_date:
from_date = datetime.date.today()
days_ahead = self.day - from_date.weekday()
if days_ahead <= 0:
# target date already happened this week
days_ahead += 7
return from_date + datetime.timedelta(days_ahead)
def next_datetime(self, from_date=None):
next_date = self.next_date(from_date)
naive_dt = datetime.datetime.combine(next_date, self.start_time)
tz = pytz.timezone(settings.TIME_ZONE)
return tz.localize(naive_dt)
def __str__(self):
return ("{day} {s.start_time} ({s.purpose})"
"".format(day=self.get_day_display(), s=self))
class AsRun(TimeStampedModel):
"""
AsRun model is a historic log over what was sent through playout.
`video` - Points to the Video which was played if there is one.
Can be empty if something other than a video was played.
The field is mutually exclusive with `program_name`.
`program_name` - A free form text input saying what was played.
If `video` is set, this field should not be set.
Examples of where you'd use this field is e.g.
when broadcasting live.
Defaults to the empty string.
`playout` - The playout this entry corresponds with. This will
almost always be 'main' which it defaults to.
`played_at` - Time when the playout started. Defaults to now.
`in_ms` - The inpoint where the video/stream was started at.
In milliseconds. Normally 0 which it defaults to.
`out_ms` - The outpoint where the video/stream stopped.
This would often be the duration of the video, or
how long we live streamed a particular URL.
Can be null (None) if this is 'currently happening'.
"""
video = models.ForeignKey(Video, blank=True, null=True)
program_name = models.CharField(max_length=160, blank=True, default='')
playout = models.CharField(max_length=255, blank=True, default='main')
played_at = models.DateTimeField(blank=True, default=timezone.now)
in_ms = models.IntegerField(blank=True, default=0)
out_ms = models.IntegerField(blank=True, null=True)
def __str__(self):
if self.video:
return '{s.playout} video: {s.video}'.format(s=self)
return '{s.playout}: {s.program_name}'.format(s=self)
class Meta:
ordering = ('-played_at', '-id',)
|
lgpl-3.0
| 90,916,170,762,672,200 | 34.601054 | 129 | 0.630646 | false |
skosukhin/spack
|
var/spack/repos/builtin/packages/r-ggpubr/package.py
|
1
|
1832
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGgpubr(RPackage):
"""ggpubr: 'ggplot2' Based Publication Ready Plots"""
homepage = "http://www.sthda.com/english/rpkgs/ggpubr"
url = "https://cran.r-project.org/src/contrib/ggpubr_0.1.2.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/ggpubr"
version('0.1.2', '42a5749ae44121597ef511a7424429d1')
depends_on('r@3.1.0:')
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-ggrepel', type=('build', 'run'))
depends_on('r-ggsci', type=('build', 'run'))
depends_on('r-plyr', type=('build', 'run'))
|
lgpl-2.1
| 4,993,525,382,914,049,000 | 43.682927 | 78 | 0.659389 | false |
psinha/paparazzi
|
sw/ground_segment/python/move_waypoint_example.py
|
1
|
1617
|
#!/usr/bin/env python
from __future__ import print_function
import sys
from os import path, getenv
from time import sleep
# if PAPARAZZI_SRC not set, then assume the tree containing this
# file is a reasonable substitute
PPRZ_SRC = getenv("PAPARAZZI_SRC", path.normpath(path.join(path.dirname(path.abspath(__file__)), '../../../')))
sys.path.append(PPRZ_SRC + "/sw/ext/pprzlink/lib/v1.0/python")
from ivy_msg_interface import IvyMessagesInterface
from pprzlink.message import PprzMessage
class WaypointMover(object):
def __init__(self, verbose=False):
self.verbose = verbose
self._interface = IvyMessagesInterface(self.message_recv)
def message_recv(self, ac_id, msg):
if self.verbose:
print("Got msg %s" % msg.name)
def shutdown(self):
print("Shutting down ivy interface...")
self._interface.shutdown()
def __del__(self):
self.shutdown()
def move_waypoint(self, ac_id, wp_id, lat, lon, alt):
msg = PprzMessage("ground", "MOVE_WAYPOINT")
msg['ac_id'] = ac_id
msg['wp_id'] = wp_id
msg['lat'] = lat
msg['long'] = lon
msg['alt'] = alt
print("Sending message: %s" % msg)
self._interface.send(msg)
if __name__ == '__main__':
try:
wm = WaypointMover()
# sleep shortly in oder to make sure Ivy is up, then message sent before shutting down again
sleep(0.1)
wm.move_waypoint(ac_id=202, wp_id=3, lat=43.563, lon=1.481, alt=172.0)
sleep(0.1)
except KeyboardInterrupt:
print("Stopping on request")
wm.shutdown()
|
gpl-2.0
| 2,017,451,250,166,968,600 | 28.944444 | 111 | 0.621521 | false |
iut-ibk/DynaMind-ToolBox
|
DynaMind-GDALModules/scripts/Drainage/dm_importswmm.py
|
1
|
17024
|
# -*- coding: utf-8 -*-
"""
@file
@author Chrisitan Urich <christian.urich@gmail.com>
@version 1.0
@section LICENSE
This file is part of DynaMind
Copyright (C) 2015 Christian Urich
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import sys
from pydynamind import *
class DM_ImportSWMM(Module):
display_name = "Import SWMM File"
group_name = "Performance Assessment"
def getHelpUrl(self):
return "/DynaMind-GDALModules/dm_importswmm.html"
def __init__(self):
Module.__init__(self)
self.setIsGDALModule(True)
self.createParameter("filename", FILENAME, "Name of SWMM File")
self.filename = ""
self.createParameter("name_outlet", STRING, "Identifier Outlet")
self.name_outlet = ""
# self.conduits.addLinkAttribute("XSECTION", "XSECTION", WRITE)
# self.junctions = View("JUNCTION", NODE, WRITE)
# self.junctions.addAttribute("Z", DOUBLE, WRITE)
# self.junctions.addAttribute("D", DOUBLE, WRITE)
# self.junctions.addAttribute("invert_elevation", DOUBLE, WRITE)
# self.junctions.addAttribute("built_year", DOUBLE, WRITE)
#
# self.outfalls = View("OUTFALL", NODE, WRITE)
# self.outfalls.addAttribute("Z", DOUBLE, WRITE)
# Not imported
# self.inlets = View("INLET", NODE, WRITE)
# self.wwtps = View("WWTP", NODE, WRITE)
#
# self.storages = View("STORAGE", NODE, WRITE)
# self.storages.addAttribute("Z", DOUBLE, WRITE)
# self.storages.addAttribute("max_depth", DOUBLE, WRITE)
# self.storages.addAttribute("type", STRING, WRITE)
# self.storages.addAttribute("storage_x", DOUBLE, WRITE)
# self.storages.addAttribute("storage_y", DOUBLE, WRITE)
#
# self.weirs = View("WEIR", EDGE, WRITE)
# self.weirs.addAttribute("type", STRING, WRITE)
# self.weirs.addAttribute("crest_height", DOUBLE, WRITE)
# self.weirs.addAttribute("discharge_coefficient", DOUBLE, WRITE)
# self.weirs.addAttribute("end_coefficient", DOUBLE, WRITE)
#
# self.pumps = View("PUMPS", EDGE, WRITE)
# self.pumps.addAttribute("type", STRING, WRITE)
# self.pumps.addAttribute("pump_x", DOUBLE, WRITE)
# self.pumps.addAttribute("pump_y", DOUBLE, WRITE)
# views.append(self.conduits)
# views.append(self.nodes)
# views.append(self.outfalls)
# views.append(self.junctions)
# views.append(self.inlets)
# views.append(self.wwtps)
# views.append(self.storages)
# views.append(self.weirs)
# views.append(self.xsections)
# views.append(self.pumps)
# self.registerViewContainers(views)
# self.createParameter("NameWWTP", STRING, "Identifier WWTP")
# self.NameWWTP = "MD020"
# self.createParameter("defaultBuiltYear", INT, "Default_Built_Year")
# self.defaultBuiltYear = 1900
#
# self.curves = {}
# self.curves_types = {}
def readCurves(self):
try:
f = open(self.filename)
startReading = False
for line in f:
line = line.strip()
if line is '':
continue
if line[0] is ';':
continue
if startReading == True and line[0] is '[':
startReading = False
break
if startReading == True:
# print line
content = line.split()
if content[0] not in self.curves:
self.curves[content[0]] = []
values = self.curves[content[0]]
if (len(content) == 4):
values.append((float(content[2]), float(content[3])))
if (len(content) == 3):
values.append((float(content[1]), float(content[2])))
self.curves[content[0]] = values
if (len(content) == 4):
if content[1] != "":
self.curves_types[content[0]] = str(content[1])
if line == "[CURVES]":
startReading = True
f.close()
except Exception, e:
print e
print sys.exc_info()
def init(self):
self.conduits = ViewContainer("conduit", EDGE, WRITE)
self.conduits.addAttribute("start_id", Attribute.INT, WRITE)
self.conduits.addAttribute("end_id", Attribute.INT, WRITE)
self.pumps = ViewContainer("pump", EDGE, WRITE)
self.pumps.addAttribute("start_id", Attribute.INT, WRITE)
self.pumps.addAttribute("end_id", Attribute.INT, WRITE)
self.weirs = ViewContainer("weir", EDGE, WRITE)
self.weirs.addAttribute("start_id", Attribute.INT, WRITE)
self.weirs.addAttribute("end_id", Attribute.INT, WRITE)
# self.conduits.addAttribute("inlet_offset", Attribute.DOUBLE, WRITE)
# self.conduits.addAttribute("outlet_offset", Attribute.DOUBLE, WRITE)
# self.conduits.addAttribute("diameter", Attribute.DOUBLE, WRITE)
# self.dummy = ViewContainer("dummy", SUBSYSTEM, MODIFY)
# self.xsections = ViewContainer("xsection",COMPONENT,WRITE)
# self.xsections.addAttribute("type", STRING, WRITE)
# self.xsections.addAttribute("shape", STRING, WRITE)
self.nodes_container = ViewContainer("node", NODE, WRITE)
views = [self.nodes_container, self.conduits, self.pumps, self.weirs]
if self.name_outlet != "":
self.outlet = ViewContainer("outlet", NODE, WRITE)
self.outlet.addAttribute("node_id", Attribute.INT, WRITE)
views.append(self.outlet)
self.registerViewContainers(views)
def run(self):
# try:
# sewer = self.getData("Sewer")
results = {}
f = open(self.filename)
currentContainer = ""
for line in f:
# print line
line = line.strip()
if line is '':
continue
if line[0] is ';':
continue
if line[0] is '[':
results[line] = {}
currentContainer = line
if line is '':
continue
# First Section is always the Name
content = line.split()
container = []
counter = 0
if len(content) < 2:
continue
for c in content:
counter = counter + 1
if counter is 1:
continue
container.append(c)
ress = results[currentContainer]
ress[content[0]] = container
results[currentContainer] = ress
f.close()
# print "done reading"
# self.readCurves()
# "Create Nodes"
nodes = {}
node_ids = set()
# Add Coordinates
node_id = 0 # We assume that the node id increases incrementally
ress = results["[COORDINATES]"]
for c in ress:
node_id += 1
coords = ress[c]
node = self.nodes_container.create_feature()
# Create geometry
n_pt = ogr.Geometry(ogr.wkbPoint)
x1 = float(coords[0])
y1 = float(coords[1])
n_pt.SetPoint_2D(0, x1, y1)
# Set geometry in feature
node.SetGeometry(n_pt)
nodes[c] = (node_id, x1, y1)
node_ids.add(c)
if self.name_outlet == c:
outfall = self.outlet.create_feature()
outfall.SetGeometry(n_pt)
outfall.SetField("node_id", node_id)
self.nodes_container.finalise()
if self.name_outlet != "":
self.outlet.finalise()
# #Add Nodes
# junctions = results["[JUNCTIONS]"]
# for c in junctions:
# attributes = junctions[c]
# juntion = nodes[c]
# sewer.addComponentToView(juntion, self.junctions)
#
# juntion.addAttribute("SWMM_ID", str(c))
# juntion.addAttribute("invert_elevation", (float(attributes[0])))
# juntion.addAttribute("D", (float(attributes[1])))
# juntion.addAttribute("Z", (float(attributes[0])) + (float(attributes[1])))
# juntion.addAttribute("built_year", self.defaultBuiltYear)
# if (c == self.NameWWTP):
# print "wwtp found"
# sewer.addComponentToView(juntion, self.wwtps)
#
# #Write Outfalls
# outfalls = results["[OUTFALLS]"]
# for o in outfalls:
# vals = outfalls[o]
# attributes = outfalls[o]
# outfall = nodes[o]
# sewer.addComponentToView(outfall, self.outfalls)
# outfall.addAttribute("Z", float(vals[0]))
# if (o == self.NameWWTP):
# print "wwtp found"
# sewer.addComponentToView(outfall, self.wwtps)
# outfall.addAttribute("WWTP", 1.0)
# #Write Storage Units
# if "[STORAGE]" in results:
# storages = results["[STORAGE]"]
# for s in storages:
# vals = storages[s]
# storage = nodes[s]
# sewer.addComponentToView(storage, self.storages)
# storage.addAttribute("Z", float(vals[0]))
# storage.addAttribute("max_depth", float(vals[1]))
# storage.addAttribute("type", vals[3])
# if vals[3] == "TABULAR":
# curve = self.curves[vals[4]]
# storage_x = doublevector()
# storage_y = doublevector()
# for c in curve:
# storage_x.append(c[0])
# storage_y.append(c[1])
# storage.getAttribute("storage_x").setDoubleVector(storage_x)
# storage.getAttribute("storage_y").setDoubleVector(storage_y)
#
#
#
# if "[XSECTIONS]" in results:
# xsections = results["[XSECTIONS]"]
#
ress = results["[CONDUITS]"]
counter = 0
for c in ress:
counter += 1
vals = ress[c]
end_id = nodes[vals[0]]
start_id = nodes[vals[1]]
# if end_id not in node_ids:
# continue
# if start_id not in node_ids:
# continue
conduit = self.conduits.create_feature()
line = ogr.Geometry(ogr.wkbLineString)
# print start_id
# print nodes[start_id][1], nodes[start_id][2]
line.SetPoint_2D(0, nodes[vals[0]][1], nodes[vals[0]][2])
line.SetPoint_2D(1, nodes[vals[1]][1], nodes[vals[1]][2])
conduit.SetGeometry(line)
# Create XSection
conduit.SetField("start_id", nodes[vals[0]][0])
conduit.SetField("end_id", nodes[vals[1]][0])
# conduit.SetField("inlet_offset", float(vals[4]))
# conduit.SetField("outlet_offset", float(vals[5]))
# e.addAttribute("built_year", self.defaultBuiltYear)
# if c in xsections:
# e.addAttribute("Diameter", float(xsections[c][1]))
# xsection = self.createXSection(sewer, xsections[c])
# e.getAttribute("XSECTION").addLink(xsection, "XSECTION")
self.conduits.finalise()
if "[WEIRS]" in results:
c_weirs = results["[WEIRS]"]
for c in c_weirs:
vals = c_weirs[c]
end_id = nodes[vals[0]]
start_id = nodes[vals[1]]
# if end_id not in node_ids:
# continue
# if start_id not in node_ids:
# continue
weir = self.weirs.create_feature()
line = ogr.Geometry(ogr.wkbLineString)
# print start_id
# print nodes[start_id][1], nodes[start_id][2]
line.SetPoint_2D(0, nodes[vals[0]][1], nodes[vals[0]][2])
line.SetPoint_2D(1, nodes[vals[1]][1], nodes[vals[1]][2])
weir.SetGeometry(line)
# Create XSection
weir.SetField("start_id", nodes[vals[0]][0])
weir.SetField("end_id", nodes[vals[1]][0])
self.weirs.finalise()
# vals = c_weirs[c]
# start = nodes[vals[0]]
# end = nodes[vals[1]]
# e = sewer.addEdge(start, end, self.weirs)
#
# e.addAttribute("type",vals[2] )
# e.addAttribute("crest_height",float(vals[3]))
# e.addAttribute("discharge_coefficient",float(vals[4]))
# e.addAttribute("end_coefficient",float(vals[7]))
# #Create XSection
# e.addAttribute("Diameter", float(xsections[c][1]))
#
# xsection = self.createXSection(sewer, xsections[c])
# e.getAttribute("XSECTION").addLink(xsection, "XSECTION")
if "[PUMPS]" in results:
c_pumps = results["[PUMPS]"]
for c in c_pumps:
vals = c_pumps[c]
end_id = nodes[vals[0]]
start_id = nodes[vals[1]]
# if end_id not in node_ids:
# continue
# if start_id not in node_ids:
# continue
pump = self.pumps.create_feature()
line = ogr.Geometry(ogr.wkbLineString)
# print start_id
# print nodes[start_id][1], nodes[start_id][2]
line.SetPoint_2D(0, nodes[vals[0]][1], nodes[vals[0]][2])
line.SetPoint_2D(1, nodes[vals[1]][1], nodes[vals[1]][2])
pump.SetGeometry(line)
# Create XSection
pump.SetField("start_id", nodes[vals[0]][0])
pump.SetField("end_id", nodes[vals[1]][0])
self.pumps.finalise()
# vals = c_pumps[c]
# start = nodes[vals[0]]
# end = nodes[vals[1]]
# e = sewer.addEdge(start, end, self.pumps)
#
# e.addAttribute("type", self.curves_types[vals[2]] )
#
# curve = self.curves[vals[2]]
# pump_x = doublevector()
# pump_y = doublevector()
# for c in curve:
# pump_x.append(c[0])
# pump_y.append(c[1])
#
# e.getAttribute("pump_x").setDoubleVector(pump_x)
# e.getAttribute("pump_y").setDoubleVector(pump_y)
#
# except Exception, e:
# print e
# print sys.exc_info()
# self.nodes_container.finalise()
# def createXSection(self, sewer, attributes):
# c_xsection = Component()
# xsection = sewer.addComponent(c_xsection, self.xsections)
# xsection.addAttribute("type", str(attributes[0]))
# diameters = doublevector()
# diameters.push_back(float(attributes[1]))
# #print self.curves
# if str(attributes[0]) != "CUSTOM":
# diameters.push_back(float(attributes[2]))
# diameters.push_back(float(attributes[3]))
# diameters.push_back(float(attributes[4]))
# else:
# shape_x = doublevector()
# shape_y = doublevector()
# #print attributes
# cv = self.curves[attributes[2]]
#
# #xsection.getAttribute("shape_type").setString(vd)
# for c in cv:
# shape_x.append(c[0])
# shape_y.append(c[1])
# xsection.getAttribute("shape_x").setDoubleVector(shape_x)
# xsection.getAttribute("shape_y").setDoubleVector(shape_y)
# xsection.getAttribute("shape_type").setString(self.curves_types[attributes[2]])
#
# xsection.getAttribute("diameters").setDoubleVector(diameters)
#
# return xsection
|
gpl-2.0
| 8,308,265,447,512,826,000 | 37 | 97 | 0.523672 | false |
kosystem/git-tree
|
gitTree/gitTree.py
|
1
|
1927
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
C_GREEN = '\033[92m'
C_BLUE = '\033[94m'
C_END = '\033[00m'
def grouping(fileList):
root = {}
for path in fileList:
current = root
for p in path.rstrip('\n').split('/'):
current.setdefault(p, {})
current = current[p]
return root
def displayItems(items, path, prefix, color):
for index, item in enumerate(sorted(items.keys())):
if index == len(items)-1:
print prefix + '└── ' + appendColor(path, item, color)
nextPrefix = prefix + ' '
else:
print prefix + '├── ' + appendColor(path, item, color)
nextPrefix = prefix + '│ '
if len(items[item]) > 0:
nextpath = os.path.join(path, item)
displayItems(items[item], nextpath, nextPrefix, color)
def appendColor(path, item, color=False):
filepath = os.path.join(path, item)
colorCode = ''
endCode = C_END if color else ''
indicator = ''
if color:
if os.path.isdir(filepath):
colorCode = C_BLUE
elif os.access(filepath, os.X_OK):
colorCode = C_GREEN
else:
colorCode = C_END
if os.path.isdir(filepath):
indicator = '/'
return colorCode + item + endCode + indicator
def main():
cmd = 'git ls-files'
p = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.wait()
stdout_data = p.stdout.readlines()
stderr_data = p.stderr.read()
if len(stderr_data) > 0:
print stderr_data,
else:
color = True
currentDir = os.path.split(os.getcwd())
print appendColor(currentDir[0], currentDir[1], color)
group = grouping(stdout_data)
displayItems(group, '.', '', color)
if __name__ == '__main__':
main()
|
mit
| 7,526,913,242,082,758,000 | 24.144737 | 66 | 0.552067 | false |
jhamman/xray
|
xarray/tests/test_combine.py
|
1
|
15860
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
import numpy as np
import pandas as pd
from xarray import Dataset, DataArray, auto_combine, concat, Variable
from xarray.core.pycompat import iteritems, OrderedDict
from . import TestCase, InaccessibleArray, requires_dask
from .test_dataset import create_test_data
class TestConcatDataset(TestCase):
def test_concat(self):
# TODO: simplify and split this test case
# drop the third dimension to keep things relatively understandable
data = create_test_data()
for k in list(data):
if 'dim3' in data[k].dims:
del data[k]
split_data = [data.isel(dim1=slice(3)),
data.isel(dim1=slice(3, None))]
self.assertDatasetIdentical(data, concat(split_data, 'dim1'))
def rectify_dim_order(dataset):
# return a new dataset with all variable dimensions transposed into
# the order in which they are found in `data`
return Dataset(dict((k, v.transpose(*data[k].dims))
for k, v in iteritems(dataset.data_vars)),
dataset.coords, attrs=dataset.attrs)
for dim in ['dim1', 'dim2']:
datasets = [g for _, g in data.groupby(dim, squeeze=False)]
self.assertDatasetIdentical(data, concat(datasets, dim))
dim = 'dim2'
self.assertDatasetIdentical(
data, concat(datasets, data[dim]))
self.assertDatasetIdentical(
data, concat(datasets, data[dim], coords='minimal'))
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
concat_over = [k for k, v in iteritems(data.coords)
if dim in v.dims and k != dim]
actual = concat(datasets, data[dim], coords=concat_over)
self.assertDatasetIdentical(data, rectify_dim_order(actual))
actual = concat(datasets, data[dim], coords='different')
self.assertDatasetIdentical(data, rectify_dim_order(actual))
# make sure the coords argument behaves as expected
data.coords['extra'] = ('dim4', np.arange(3))
for dim in ['dim1', 'dim2']:
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
actual = concat(datasets, data[dim], coords='all')
expected = np.array([data['extra'].values
for _ in range(data.dims[dim])])
self.assertArrayEqual(actual['extra'].values, expected)
actual = concat(datasets, data[dim], coords='different')
self.assertDataArrayEqual(data['extra'], actual['extra'])
actual = concat(datasets, data[dim], coords='minimal')
self.assertDataArrayEqual(data['extra'], actual['extra'])
# verify that the dim argument takes precedence over
# concatenating dataset variables of the same name
dim = (2 * data['dim1']).rename('dim1')
datasets = [g for _, g in data.groupby('dim1', squeeze=False)]
expected = data.copy()
expected['dim1'] = dim
self.assertDatasetIdentical(expected, concat(datasets, dim))
def test_concat_data_vars(self):
data = Dataset({'foo': ('x', np.random.randn(10))})
objs = [data.isel(x=slice(5)), data.isel(x=slice(5, None))]
for data_vars in ['minimal', 'different', 'all', [], ['foo']]:
actual = concat(objs, dim='x', data_vars=data_vars)
self.assertDatasetIdentical(data, actual)
def test_concat_coords(self):
data = Dataset({'foo': ('x', np.random.randn(10))})
expected = data.assign_coords(c=('x', [0] * 5 + [1] * 5))
objs = [data.isel(x=slice(5)).assign_coords(c=0),
data.isel(x=slice(5, None)).assign_coords(c=1)]
for coords in ['different', 'all', ['c']]:
actual = concat(objs, dim='x', coords=coords)
self.assertDatasetIdentical(expected, actual)
for coords in ['minimal', []]:
with self.assertRaisesRegexp(ValueError, 'not equal across'):
concat(objs, dim='x', coords=coords)
def test_concat_constant_index(self):
# GH425
ds1 = Dataset({'foo': 1.5}, {'y': 1})
ds2 = Dataset({'foo': 2.5}, {'y': 1})
expected = Dataset({'foo': ('y', [1.5, 2.5]), 'y': [1, 1]})
for mode in ['different', 'all', ['foo']]:
actual = concat([ds1, ds2], 'y', data_vars=mode)
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'not equal across datasets'):
concat([ds1, ds2], 'y', data_vars='minimal')
def test_concat_size0(self):
data = create_test_data()
split_data = [data.isel(dim1=slice(0, 0)), data]
actual = concat(split_data, 'dim1')
self.assertDatasetIdentical(data, actual)
actual = concat(split_data[::-1], 'dim1')
self.assertDatasetIdentical(data, actual)
def test_concat_autoalign(self):
ds1 = Dataset({'foo': DataArray([1, 2], coords=[('x', [1, 2])])})
ds2 = Dataset({'foo': DataArray([1, 2], coords=[('x', [1, 3])])})
actual = concat([ds1, ds2], 'y')
expected = Dataset({'foo': DataArray([[1, 2, np.nan], [1, np.nan, 2]],
dims=['y', 'x'],
coords={'x': [1, 2, 3]})})
self.assertDatasetIdentical(expected, actual)
def test_concat_errors(self):
data = create_test_data()
split_data = [data.isel(dim1=slice(3)),
data.isel(dim1=slice(3, None))]
with self.assertRaisesRegexp(ValueError, 'must supply at least one'):
concat([], 'dim1')
with self.assertRaisesRegexp(ValueError, 'are not coordinates'):
concat([data, data], 'new_dim', coords=['not_found'])
with self.assertRaisesRegexp(ValueError, 'global attributes not'):
data0, data1 = deepcopy(split_data)
data1.attrs['foo'] = 'bar'
concat([data0, data1], 'dim1', compat='identical')
self.assertDatasetIdentical(
data, concat([data0, data1], 'dim1', compat='equals'))
with self.assertRaisesRegexp(ValueError, 'encountered unexpected'):
data0, data1 = deepcopy(split_data)
data1['foo'] = ('bar', np.random.randn(10))
concat([data0, data1], 'dim1')
with self.assertRaisesRegexp(ValueError, 'compat.* invalid'):
concat(split_data, 'dim1', compat='foobar')
with self.assertRaisesRegexp(ValueError, 'unexpected value for'):
concat([data, data], 'new_dim', coords='foobar')
with self.assertRaisesRegexp(
ValueError, 'coordinate in some datasets but not others'):
concat([Dataset({'x': 0}), Dataset({'x': [1]})], dim='z')
with self.assertRaisesRegexp(
ValueError, 'coordinate in some datasets but not others'):
concat([Dataset({'x': 0}), Dataset({}, {'x': 1})], dim='z')
with self.assertRaisesRegexp(ValueError, 'no longer a valid'):
concat([data, data], 'new_dim', mode='different')
with self.assertRaisesRegexp(ValueError, 'no longer a valid'):
concat([data, data], 'new_dim', concat_over='different')
def test_concat_promote_shape(self):
# mixed dims within variables
objs = [Dataset({}, {'x': 0}), Dataset({'x': [1]})]
actual = concat(objs, 'x')
expected = Dataset({'x': [0, 1]})
self.assertDatasetIdentical(actual, expected)
objs = [Dataset({'x': [0]}), Dataset({}, {'x': 1})]
actual = concat(objs, 'x')
self.assertDatasetIdentical(actual, expected)
# mixed dims between variables
objs = [Dataset({'x': [2], 'y': 3}), Dataset({'x': [4], 'y': 5})]
actual = concat(objs, 'x')
expected = Dataset({'x': [2, 4], 'y': ('x', [3, 5])})
self.assertDatasetIdentical(actual, expected)
# mixed dims in coord variable
objs = [Dataset({'x': [0]}, {'y': -1}),
Dataset({'x': [1]}, {'y': ('x', [-2])})]
actual = concat(objs, 'x')
expected = Dataset({'x': [0, 1]}, {'y': ('x', [-1, -2])})
self.assertDatasetIdentical(actual, expected)
# scalars with mixed lengths along concat dim -- values should repeat
objs = [Dataset({'x': [0]}, {'y': -1}),
Dataset({'x': [1, 2]}, {'y': -2})]
actual = concat(objs, 'x')
expected = Dataset({'x': [0, 1, 2]}, {'y': ('x', [-1, -2, -2])})
self.assertDatasetIdentical(actual, expected)
# broadcast 1d x 1d -> 2d
objs = [Dataset({'z': ('x', [-1])}, {'x': [0], 'y': [0]}),
Dataset({'z': ('y', [1])}, {'x': [1], 'y': [0]})]
actual = concat(objs, 'x')
expected = Dataset({'z': (('x', 'y'), [[-1], [1]])},
{'x': [0, 1], 'y': [0]})
self.assertDatasetIdentical(actual, expected)
def test_concat_do_not_promote(self):
# GH438
objs = [Dataset({'y': ('t', [1])}, {'x': 1, 't': [0]}),
Dataset({'y': ('t', [2])}, {'x': 1, 't': [0]})]
expected = Dataset({'y': ('t', [1, 2])}, {'x': 1, 't': [0, 0]})
actual = concat(objs, 't')
self.assertDatasetIdentical(expected, actual)
objs = [Dataset({'y': ('t', [1])}, {'x': 1, 't': [0]}),
Dataset({'y': ('t', [2])}, {'x': 2, 't': [0]})]
with self.assertRaises(ValueError):
concat(objs, 't', coords='minimal')
def test_concat_dim_is_variable(self):
objs = [Dataset({'x': 0}), Dataset({'x': 1})]
coord = Variable('y', [3, 4])
expected = Dataset({'x': ('y', [0, 1]), 'y': [3, 4]})
actual = concat(objs, coord)
self.assertDatasetIdentical(actual, expected)
def test_concat_multiindex(self):
x = pd.MultiIndex.from_product([[1, 2, 3], ['a', 'b']])
expected = Dataset({'x': x})
actual = concat([expected.isel(x=slice(2)),
expected.isel(x=slice(2, None))], 'x')
assert expected.equals(actual)
assert isinstance(actual.x.to_index(), pd.MultiIndex)
class TestConcatDataArray(TestCase):
def test_concat(self):
ds = Dataset({'foo': (['x', 'y'], np.random.random((2, 3))),
'bar': (['x', 'y'], np.random.random((2, 3)))},
{'x': [0, 1]})
foo = ds['foo']
bar = ds['bar']
# from dataset array:
expected = DataArray(np.array([foo.values, bar.values]),
dims=['w', 'x', 'y'], coords={'x': [0, 1]})
actual = concat([foo, bar], 'w')
self.assertDataArrayEqual(expected, actual)
# from iteration:
grouped = [g for _, g in foo.groupby('x')]
stacked = concat(grouped, ds['x'])
self.assertDataArrayIdentical(foo, stacked)
# with an index as the 'dim' argument
stacked = concat(grouped, ds.indexes['x'])
self.assertDataArrayIdentical(foo, stacked)
actual = concat([foo[0], foo[1]], pd.Index([0, 1])).reset_coords(drop=True)
expected = foo[:2].rename({'x': 'concat_dim'})
self.assertDataArrayIdentical(expected, actual)
actual = concat([foo[0], foo[1]], [0, 1]).reset_coords(drop=True)
expected = foo[:2].rename({'x': 'concat_dim'})
self.assertDataArrayIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'not identical'):
concat([foo, bar], dim='w', compat='identical')
with self.assertRaisesRegexp(ValueError, 'not a valid argument'):
concat([foo, bar], dim='w', data_vars='minimal')
@requires_dask
def test_concat_lazy(self):
import dask.array as da
arrays = [DataArray(
da.from_array(InaccessibleArray(np.zeros((3, 3))), 3),
dims=['x', 'y']) for _ in range(2)]
# should not raise
combined = concat(arrays, dim='z')
self.assertEqual(combined.shape, (2, 3, 3))
self.assertEqual(combined.dims, ('z', 'x', 'y'))
class TestAutoCombine(TestCase):
@requires_dask # only for toolz
def test_auto_combine(self):
objs = [Dataset({'x': [0]}), Dataset({'x': [1]})]
actual = auto_combine(objs)
expected = Dataset({'x': [0, 1]})
self.assertDatasetIdentical(expected, actual)
actual = auto_combine([actual])
self.assertDatasetIdentical(expected, actual)
objs = [Dataset({'x': [0, 1]}), Dataset({'x': [2]})]
actual = auto_combine(objs)
expected = Dataset({'x': [0, 1, 2]})
self.assertDatasetIdentical(expected, actual)
# ensure auto_combine handles non-sorted variables
objs = [Dataset(OrderedDict([('x', ('a', [0])), ('y', ('a', [0]))])),
Dataset(OrderedDict([('y', ('a', [1])), ('x', ('a', [1]))]))]
actual = auto_combine(objs)
expected = Dataset({'x': ('a', [0, 1]), 'y': ('a', [0, 1])})
self.assertDatasetIdentical(expected, actual)
objs = [Dataset({'x': [0], 'y': [0]}), Dataset({'y': [1], 'x': [1]})]
with self.assertRaisesRegexp(ValueError, 'too many .* dimensions'):
auto_combine(objs)
objs = [Dataset({'x': 0}), Dataset({'x': 1})]
with self.assertRaisesRegexp(ValueError, 'cannot infer dimension'):
auto_combine(objs)
objs = [Dataset({'x': [0], 'y': [0]}), Dataset({'x': [0]})]
with self.assertRaises(KeyError):
auto_combine(objs)
@requires_dask # only for toolz
def test_auto_combine_previously_failed(self):
# In the above scenario, one file is missing, containing the data for
# one year's data for one variable.
datasets = [Dataset({'a': ('x', [0]), 'x': [0]}),
Dataset({'b': ('x', [0]), 'x': [0]}),
Dataset({'a': ('x', [1]), 'x': [1]})]
expected = Dataset({'a': ('x', [0, 1]), 'b': ('x', [0, np.nan])},
{'x': [0, 1]})
actual = auto_combine(datasets)
self.assertDatasetIdentical(expected, actual)
# Your data includes "time" and "station" dimensions, and each year's
# data has a different set of stations.
datasets = [Dataset({'a': ('x', [2, 3]), 'x': [1, 2]}),
Dataset({'a': ('x', [1, 2]), 'x': [0, 1]})]
expected = Dataset({'a': (('t', 'x'),
[[np.nan, 2, 3], [1, 2, np.nan]])},
{'x': [0, 1, 2]})
actual = auto_combine(datasets, concat_dim='t')
self.assertDatasetIdentical(expected, actual)
@requires_dask # only for toolz
def test_auto_combine_still_fails(self):
# concat can't handle new variables (yet):
# https://github.com/pydata/xarray/issues/508
datasets = [Dataset({'x': 0}, {'y': 0}),
Dataset({'x': 1}, {'y': 1, 'z': 1})]
with self.assertRaises(ValueError):
auto_combine(datasets, 'y')
@requires_dask # only for toolz
def test_auto_combine_no_concat(self):
objs = [Dataset({'x': 0}), Dataset({'y': 1})]
actual = auto_combine(objs)
expected = Dataset({'x': 0, 'y': 1})
self.assertDatasetIdentical(expected, actual)
objs = [Dataset({'x': 0, 'y': 1}), Dataset({'y': np.nan, 'z': 2})]
actual = auto_combine(objs)
expected = Dataset({'x': 0, 'y': 1, 'z': 2})
self.assertDatasetIdentical(expected, actual)
data = Dataset({'x': 0})
actual = auto_combine([data, data, data], concat_dim=None)
self.assertDatasetIdentical(data, actual)
|
apache-2.0
| 3,788,485,069,393,481,700 | 42.452055 | 83 | 0.541173 | false |
apache/libcloud
|
libcloud/compute/drivers/linode.py
|
2
|
60267
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""libcloud driver for the Linode(R) API
This driver implements all libcloud functionality for the Linode API.
Since the API is a bit more fine-grained, create_node abstracts a significant
amount of work (and may take a while to run).
Linode home page http://www.linode.com/
Linode API documentation http://www.linode.com/api/
Alternate bindings for reference http://github.com/tjfontaine/linode-python
Linode(R) is a registered trademark of Linode, LLC.
"""
import os
import re
try:
import simplejson as json
except ImportError:
import json
import itertools
import binascii
from datetime import datetime
from copy import copy
from libcloud.utils.py3 import PY3, httplib
from libcloud.utils.networking import is_private_subnet
from libcloud.common.linode import (API_ROOT, LinodeException,
LinodeConnection, LinodeConnectionV4,
LinodeDisk, LinodeIPAddress,
LinodeExceptionV4,
LINODE_PLAN_IDS, LINODE_DISK_FILESYSTEMS,
LINODE_DISK_FILESYSTEMS_V4,
DEFAULT_API_VERSION)
from libcloud.compute.types import Provider, NodeState, StorageVolumeState
from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation
from libcloud.compute.base import NodeAuthPassword, NodeAuthSSHKey
from libcloud.compute.base import NodeImage, StorageVolume
class LinodeNodeDriver(NodeDriver):
name = 'Linode'
website = 'http://www.linode.com/'
type = Provider.LINODE
def __new__(cls, key, secret=None, secure=True, host=None, port=None,
api_version=DEFAULT_API_VERSION, region=None, **kwargs):
if cls is LinodeNodeDriver:
if api_version == '3.0':
cls = LinodeNodeDriverV3
elif api_version == '4.0':
cls = LinodeNodeDriverV4
else:
raise NotImplementedError(
'No Linode driver found for API version: %s' %
(api_version))
return super(LinodeNodeDriver, cls).__new__(cls)
class LinodeNodeDriverV3(LinodeNodeDriver):
"""libcloud driver for the Linode API
Rough mapping of which is which:
- list_nodes linode.list
- reboot_node linode.reboot
- destroy_node linode.delete
- create_node linode.create, linode.update,
linode.disk.createfromdistribution,
linode.disk.create, linode.config.create,
linode.ip.addprivate, linode.boot
- list_sizes avail.linodeplans
- list_images avail.distributions
- list_locations avail.datacenters
- list_volumes linode.disk.list
- destroy_volume linode.disk.delete
For more information on the Linode API, be sure to read the reference:
http://www.linode.com/api/
"""
connectionCls = LinodeConnection
_linode_plan_ids = LINODE_PLAN_IDS
_linode_disk_filesystems = LINODE_DISK_FILESYSTEMS
features = {'create_node': ['ssh_key', 'password']}
def __init__(self, key, secret=None, secure=True, host=None, port=None,
api_version=None, region=None, **kwargs):
"""Instantiate the driver with the given API key
:param key: the API key to use (required)
:type key: ``str``
:rtype: ``None``
"""
self.datacenter = None
NodeDriver.__init__(self, key)
# Converts Linode's state from DB to a NodeState constant.
LINODE_STATES = {
(-2): NodeState.UNKNOWN, # Boot Failed
(-1): NodeState.PENDING, # Being Created
0: NodeState.PENDING, # Brand New
1: NodeState.RUNNING, # Running
2: NodeState.STOPPED, # Powered Off
3: NodeState.REBOOTING, # Shutting Down
4: NodeState.UNKNOWN # Reserved
}
def list_nodes(self):
"""
List all Linodes that the API key can access
This call will return all Linodes that the API key in use has access
to.
If a node is in this list, rebooting will work; however, creation and
destruction are a separate grant.
:return: List of node objects that the API key can access
:rtype: ``list`` of :class:`Node`
"""
params = {"api_action": "linode.list"}
data = self.connection.request(API_ROOT, params=params).objects[0]
return self._to_nodes(data)
def start_node(self, node):
"""
Boot the given Linode
"""
params = {"api_action": "linode.boot", "LinodeID": node.id}
self.connection.request(API_ROOT, params=params)
return True
def stop_node(self, node):
"""
Shutdown the given Linode
"""
params = {"api_action": "linode.shutdown", "LinodeID": node.id}
self.connection.request(API_ROOT, params=params)
return True
def reboot_node(self, node):
"""
Reboot the given Linode
Will issue a shutdown job followed by a boot job, using the last booted
configuration. In most cases, this will be the only configuration.
:param node: the Linode to reboot
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {"api_action": "linode.reboot", "LinodeID": node.id}
self.connection.request(API_ROOT, params=params)
return True
def destroy_node(self, node):
"""Destroy the given Linode
Will remove the Linode from the account and issue a prorated credit. A
grant for removing Linodes from the account is required, otherwise this
method will fail.
In most cases, all disk images must be removed from a Linode before the
Linode can be removed; however, this call explicitly skips those
safeguards. There is no going back from this method.
:param node: the Linode to destroy
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {"api_action": "linode.delete", "LinodeID": node.id,
"skipChecks": True}
self.connection.request(API_ROOT, params=params)
return True
def create_node(self, name, image, size, auth, location=None, ex_swap=None,
ex_rsize=None, ex_kernel=None, ex_payment=None,
ex_comment=None, ex_private=False, lconfig=None,
lroot=None, lswap=None):
"""Create a new Linode, deploy a Linux distribution, and boot
This call abstracts much of the functionality of provisioning a Linode
and getting it booted. A global grant to add Linodes to the account is
required, as this call will result in a billing charge.
Note that there is a safety valve of 5 Linodes per hour, in order to
prevent a runaway script from ruining your day.
:keyword name: the name to assign the Linode (mandatory)
:type name: ``str``
:keyword image: which distribution to deploy on the Linode (mandatory)
:type image: :class:`NodeImage`
:keyword size: the plan size to create (mandatory)
:type size: :class:`NodeSize`
:keyword auth: an SSH key or root password (mandatory)
:type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword`
:keyword location: which datacenter to create the Linode in
:type location: :class:`NodeLocation`
:keyword ex_swap: size of the swap partition in MB (128)
:type ex_swap: ``int``
:keyword ex_rsize: size of the root partition in MB (plan size - swap).
:type ex_rsize: ``int``
:keyword ex_kernel: a kernel ID from avail.kernels (Latest 2.6 Stable).
:type ex_kernel: ``str``
:keyword ex_payment: one of 1, 12, or 24; subscription length (1)
:type ex_payment: ``int``
:keyword ex_comment: a small comment for the configuration (libcloud)
:type ex_comment: ``str``
:keyword ex_private: whether or not to request a private IP (False)
:type ex_private: ``bool``
:keyword lconfig: what to call the configuration (generated)
:type lconfig: ``str``
:keyword lroot: what to call the root image (generated)
:type lroot: ``str``
:keyword lswap: what to call the swap space (generated)
:type lswap: ``str``
:return: Node representing the newly-created Linode
:rtype: :class:`Node`
"""
auth = self._get_and_check_auth(auth)
# Pick a location (resolves LIBCLOUD-41 in JIRA)
if location:
chosen = location.id
elif self.datacenter:
chosen = self.datacenter
else:
raise LinodeException(0xFB, "Need to select a datacenter first")
# Step 0: Parameter validation before we purchase
# We're especially careful here so we don't fail after purchase, rather
# than getting halfway through the process and having the API fail.
# Plan ID
plans = self.list_sizes()
if size.id not in [p.id for p in plans]:
raise LinodeException(0xFB, "Invalid plan ID -- avail.plans")
# Payment schedule
payment = "1" if not ex_payment else str(ex_payment)
if payment not in ["1", "12", "24"]:
raise LinodeException(0xFB, "Invalid subscription (1, 12, 24)")
ssh = None
root = None
# SSH key and/or root password
if isinstance(auth, NodeAuthSSHKey):
ssh = auth.pubkey # pylint: disable=no-member
elif isinstance(auth, NodeAuthPassword):
root = auth.password
if not ssh and not root:
raise LinodeException(0xFB, "Need SSH key or root password")
if root is not None and len(root) < 6:
raise LinodeException(0xFB, "Root password is too short")
# Swap size
try:
swap = 128 if not ex_swap else int(ex_swap)
except Exception:
raise LinodeException(0xFB, "Need an integer swap size")
# Root partition size
imagesize = (size.disk - swap) if not ex_rsize else\
int(ex_rsize)
if (imagesize + swap) > size.disk:
raise LinodeException(0xFB, "Total disk images are too big")
# Distribution ID
distros = self.list_images()
if image.id not in [d.id for d in distros]:
raise LinodeException(0xFB,
"Invalid distro -- avail.distributions")
# Kernel
if ex_kernel:
kernel = ex_kernel
else:
if image.extra['64bit']:
# For a list of available kernel ids, see
# https://www.linode.com/kernels/
kernel = 138
else:
kernel = 137
params = {"api_action": "avail.kernels"}
kernels = self.connection.request(API_ROOT, params=params).objects[0]
if kernel not in [z["KERNELID"] for z in kernels]:
raise LinodeException(0xFB, "Invalid kernel -- avail.kernels")
# Comments
comments = "Created by Apache libcloud <https://www.libcloud.org>" if\
not ex_comment else ex_comment
# Step 1: linode.create
params = {
"api_action": "linode.create",
"DatacenterID": chosen,
"PlanID": size.id,
"PaymentTerm": payment
}
data = self.connection.request(API_ROOT, params=params).objects[0]
linode = {"id": data["LinodeID"]}
# Step 1b. linode.update to rename the Linode
params = {
"api_action": "linode.update",
"LinodeID": linode["id"],
"Label": name
}
self.connection.request(API_ROOT, params=params)
# Step 1c. linode.ip.addprivate if it was requested
if ex_private:
params = {
"api_action": "linode.ip.addprivate",
"LinodeID": linode["id"]
}
self.connection.request(API_ROOT, params=params)
# Step 1d. Labels
# use the linode id as the name can be up to 63 chars and the labels
# are limited to 48 chars
label = {
"lconfig": "[%s] Configuration Profile" % linode["id"],
"lroot": "[%s] %s Disk Image" % (linode["id"], image.name),
"lswap": "[%s] Swap Space" % linode["id"]
}
if lconfig:
label['lconfig'] = lconfig
if lroot:
label['lroot'] = lroot
if lswap:
label['lswap'] = lswap
# Step 2: linode.disk.createfromdistribution
if not root:
root = binascii.b2a_base64(os.urandom(8)).decode('ascii').strip()
params = {
"api_action": "linode.disk.createfromdistribution",
"LinodeID": linode["id"],
"DistributionID": image.id,
"Label": label["lroot"],
"Size": imagesize,
"rootPass": root,
}
if ssh:
params["rootSSHKey"] = ssh
data = self.connection.request(API_ROOT, params=params).objects[0]
linode["rootimage"] = data["DiskID"]
# Step 3: linode.disk.create for swap
params = {
"api_action": "linode.disk.create",
"LinodeID": linode["id"],
"Label": label["lswap"],
"Type": "swap",
"Size": swap
}
data = self.connection.request(API_ROOT, params=params).objects[0]
linode["swapimage"] = data["DiskID"]
# Step 4: linode.config.create for main profile
disks = "%s,%s,,,,,,," % (linode["rootimage"], linode["swapimage"])
params = {
"api_action": "linode.config.create",
"LinodeID": linode["id"],
"KernelID": kernel,
"Label": label["lconfig"],
"Comments": comments,
"DiskList": disks
}
if ex_private:
params['helper_network'] = True
params['helper_distro'] = True
data = self.connection.request(API_ROOT, params=params).objects[0]
linode["config"] = data["ConfigID"]
# Step 5: linode.boot
params = {
"api_action": "linode.boot",
"LinodeID": linode["id"],
"ConfigID": linode["config"]
}
self.connection.request(API_ROOT, params=params)
# Make a node out of it and hand it back
params = {"api_action": "linode.list", "LinodeID": linode["id"]}
data = self.connection.request(API_ROOT, params=params).objects[0]
nodes = self._to_nodes(data)
if len(nodes) == 1:
node = nodes[0]
if getattr(auth, "generated", False):
node.extra['password'] = auth.password
return node
return None
def ex_resize_node(self, node, size):
"""Resizes a Linode from one plan to another
Immediately shuts the Linode down, charges/credits the account,
and issue a migration to another host server.
Requires a size (numeric), which is the desired PlanID available from
avail.LinodePlans()
After resize is complete the node needs to be booted
"""
params = {"api_action": "linode.resize", "LinodeID": node.id,
"PlanID": size}
self.connection.request(API_ROOT, params=params)
return True
def ex_start_node(self, node):
# NOTE: This method is here for backward compatibility reasons after
# this method was promoted to be part of the standard compute API in
# Libcloud v2.7.0
return self.start_node(node=node)
def ex_stop_node(self, node):
# NOTE: This method is here for backward compatibility reasons after
# this method was promoted to be part of the standard compute API in
# Libcloud v2.7.0
return self.stop_node(node=node)
def ex_rename_node(self, node, name):
"""Renames a node"""
params = {
"api_action": "linode.update",
"LinodeID": node.id,
"Label": name
}
self.connection.request(API_ROOT, params=params)
return True
def list_sizes(self, location=None):
"""
List available Linode plans
Gets the sizes that can be used for creating a Linode. Since available
Linode plans vary per-location, this method can also be passed a
location to filter the availability.
:keyword location: the facility to retrieve plans in
:type location: :class:`NodeLocation`
:rtype: ``list`` of :class:`NodeSize`
"""
params = {"api_action": "avail.linodeplans"}
data = self.connection.request(API_ROOT, params=params).objects[0]
sizes = []
for obj in data:
n = NodeSize(id=obj["PLANID"], name=obj["LABEL"], ram=obj["RAM"],
disk=(obj["DISK"] * 1024), bandwidth=obj["XFER"],
price=obj["PRICE"], driver=self.connection.driver)
sizes.append(n)
return sizes
def list_images(self):
"""
List available Linux distributions
Retrieve all Linux distributions that can be deployed to a Linode.
:rtype: ``list`` of :class:`NodeImage`
"""
params = {"api_action": "avail.distributions"}
data = self.connection.request(API_ROOT, params=params).objects[0]
distros = []
for obj in data:
i = NodeImage(id=obj["DISTRIBUTIONID"],
name=obj["LABEL"],
driver=self.connection.driver,
extra={'pvops': obj['REQUIRESPVOPSKERNEL'],
'64bit': obj['IS64BIT']})
distros.append(i)
return distros
def list_locations(self):
"""
List available facilities for deployment
Retrieve all facilities that a Linode can be deployed in.
:rtype: ``list`` of :class:`NodeLocation`
"""
params = {"api_action": "avail.datacenters"}
data = self.connection.request(API_ROOT, params=params).objects[0]
nl = []
for dc in data:
country = None
if "USA" in dc["LOCATION"]:
country = "US"
elif "UK" in dc["LOCATION"]:
country = "GB"
elif "JP" in dc["LOCATION"]:
country = "JP"
else:
country = "??"
nl.append(NodeLocation(dc["DATACENTERID"],
dc["LOCATION"],
country,
self))
return nl
def linode_set_datacenter(self, dc):
"""
Set the default datacenter for Linode creation
Since Linodes must be created in a facility, this function sets the
default that :class:`create_node` will use. If a location keyword is
not passed to :class:`create_node`, this method must have already been
used.
:keyword dc: the datacenter to create Linodes in unless specified
:type dc: :class:`NodeLocation`
:rtype: ``bool``
"""
did = dc.id
params = {"api_action": "avail.datacenters"}
data = self.connection.request(API_ROOT, params=params).objects[0]
for datacenter in data:
if did == dc["DATACENTERID"]:
self.datacenter = did
return
dcs = ", ".join([d["DATACENTERID"] for d in data])
self.datacenter = None
raise LinodeException(0xFD, "Invalid datacenter (use one of %s)" % dcs)
def destroy_volume(self, volume):
"""
Destroys disk volume for the Linode. Linode id is to be provided as
extra["LinodeId"] whithin :class:`StorageVolume`. It can be retrieved
by :meth:`libcloud.compute.drivers.linode.LinodeNodeDriver\
.ex_list_volumes`.
:param volume: Volume to be destroyed
:type volume: :class:`StorageVolume`
:rtype: ``bool``
"""
if not isinstance(volume, StorageVolume):
raise LinodeException(0xFD, "Invalid volume instance")
if volume.extra["LINODEID"] is None:
raise LinodeException(0xFD, "Missing LinodeID")
params = {
"api_action": "linode.disk.delete",
"LinodeID": volume.extra["LINODEID"],
"DiskID": volume.id,
}
self.connection.request(API_ROOT, params=params)
return True
def ex_create_volume(self, size, name, node, fs_type):
"""
Create disk for the Linode.
:keyword size: Size of volume in megabytes (required)
:type size: ``int``
:keyword name: Name of the volume to be created
:type name: ``str``
:keyword node: Node to attach volume to.
:type node: :class:`Node`
:keyword fs_type: The formatted type of this disk. Valid types are:
ext3, ext4, swap, raw
:type fs_type: ``str``
:return: StorageVolume representing the newly-created volume
:rtype: :class:`StorageVolume`
"""
# check node
if not isinstance(node, Node):
raise LinodeException(0xFD, "Invalid node instance")
# check space available
total_space = node.extra['TOTALHD']
existing_volumes = self.ex_list_volumes(node)
used_space = 0
for volume in existing_volumes:
used_space = used_space + volume.size
available_space = total_space - used_space
if available_space < size:
raise LinodeException(0xFD, "Volume size too big. Available space\
%d" % available_space)
# check filesystem type
if fs_type not in self._linode_disk_filesystems:
raise LinodeException(0xFD, "Not valid filesystem type")
params = {
"api_action": "linode.disk.create",
"LinodeID": node.id,
"Label": name,
"Type": fs_type,
"Size": size
}
data = self.connection.request(API_ROOT, params=params).objects[0]
volume = data["DiskID"]
# Make a volume out of it and hand it back
params = {
"api_action": "linode.disk.list",
"LinodeID": node.id,
"DiskID": volume
}
data = self.connection.request(API_ROOT, params=params).objects[0]
return self._to_volumes(data)[0]
def ex_list_volumes(self, node, disk_id=None):
"""
List existing disk volumes for for given Linode.
:keyword node: Node to list disk volumes for. (required)
:type node: :class:`Node`
:keyword disk_id: Id for specific disk volume. (optional)
:type disk_id: ``int``
:rtype: ``list`` of :class:`StorageVolume`
"""
if not isinstance(node, Node):
raise LinodeException(0xFD, "Invalid node instance")
params = {
"api_action": "linode.disk.list",
"LinodeID": node.id
}
# Add param if disk_id was specified
if disk_id is not None:
params["DiskID"] = disk_id
data = self.connection.request(API_ROOT, params=params).objects[0]
return self._to_volumes(data)
def _to_volumes(self, objs):
"""
Covert returned JSON volumes into StorageVolume instances
:keyword objs: ``list`` of JSON dictionaries representing the
StorageVolumes
:type objs: ``list``
:return: ``list`` of :class:`StorageVolume`s
"""
volumes = {}
for o in objs:
vid = o["DISKID"]
volumes[vid] = vol = StorageVolume(id=vid, name=o["LABEL"],
size=int(o["SIZE"]),
driver=self.connection.driver)
vol.extra = copy(o)
return list(volumes.values())
def _to_nodes(self, objs):
"""Convert returned JSON Linodes into Node instances
:keyword objs: ``list`` of JSON dictionaries representing the Linodes
:type objs: ``list``
:return: ``list`` of :class:`Node`s"""
# Get the IP addresses for the Linodes
nodes = {}
batch = []
for o in objs:
lid = o["LINODEID"]
nodes[lid] = n = Node(id=lid, name=o["LABEL"], public_ips=[],
private_ips=[],
state=self.LINODE_STATES[o["STATUS"]],
driver=self.connection.driver)
n.extra = copy(o)
n.extra["PLANID"] = self._linode_plan_ids.get(o.get("TOTALRAM"))
batch.append({"api_action": "linode.ip.list", "LinodeID": lid})
# Avoid batch limitation
ip_answers = []
args = [iter(batch)] * 25
if PY3:
izip_longest = itertools.zip_longest # pylint: disable=no-member
else:
izip_longest = getattr(itertools, 'izip_longest', _izip_longest)
for twenty_five in izip_longest(*args):
twenty_five = [q for q in twenty_five if q]
params = {"api_action": "batch",
"api_requestArray": json.dumps(twenty_five)}
req = self.connection.request(API_ROOT, params=params)
if not req.success() or len(req.objects) == 0:
return None
ip_answers.extend(req.objects)
# Add the returned IPs to the nodes and return them
for ip_list in ip_answers:
for ip in ip_list:
lid = ip["LINODEID"]
which = nodes[lid].public_ips if ip["ISPUBLIC"] == 1 else\
nodes[lid].private_ips
which.append(ip["IPADDRESS"])
return list(nodes.values())
class LinodeNodeDriverV4(LinodeNodeDriver):
connectionCls = LinodeConnectionV4
_linode_disk_filesystems = LINODE_DISK_FILESYSTEMS_V4
LINODE_STATES = {
'running': NodeState.RUNNING,
'stopped': NodeState.STOPPED,
'provisioning': NodeState.STARTING,
'offline': NodeState.STOPPED,
'booting': NodeState.STARTING,
'rebooting': NodeState.REBOOTING,
'shutting_down': NodeState.STOPPING,
'deleting': NodeState.PENDING,
'migrating': NodeState.MIGRATING,
'rebuilding': NodeState.UPDATING,
'cloning': NodeState.MIGRATING,
'restoring': NodeState.PENDING,
'resizing': NodeState.RECONFIGURING
}
LINODE_DISK_STATES = {
'ready': StorageVolumeState.AVAILABLE,
'not ready': StorageVolumeState.CREATING,
'deleting': StorageVolumeState.DELETING
}
LINODE_VOLUME_STATES = {
'creating': StorageVolumeState.CREATING,
'active': StorageVolumeState.AVAILABLE,
'resizing': StorageVolumeState.UPDATING,
'contact_support': StorageVolumeState.UNKNOWN
}
def list_nodes(self):
"""
Returns a list of Linodes the API key in use has access
to view.
:return: List of node objects
:rtype: ``list`` of :class:`Node`
"""
data = self._paginated_request('/v4/linode/instances', 'data')
return [self._to_node(obj) for obj in data]
def list_sizes(self):
"""
Returns a list of Linode Types
: rtype: ``list`` of :class: `NodeSize`
"""
data = self._paginated_request('/v4/linode/types', 'data')
return [self._to_size(obj) for obj in data]
def list_images(self):
"""
Returns a list of images
:rtype: ``list`` of :class:`NodeImage`
"""
data = self._paginated_request('/v4/images', 'data')
return [self._to_image(obj) for obj in data]
def list_locations(self):
"""
Lists the Regions available for Linode services
:rtype: ``list`` of :class:`NodeLocation`
"""
data = self._paginated_request('/v4/regions', 'data')
return [self._to_location(obj) for obj in data]
def start_node(self, node):
"""Boots a node the API Key has permission to modify
:param node: the node to start
:type node: :class:`Node`
:rtype: ``bool``
"""
if not isinstance(node, Node):
raise LinodeExceptionV4("Invalid node instance")
response = self.connection.request('/v4/linode/instances/%s/boot'
% node.id,
method='POST')
return response.status == httplib.OK
def ex_start_node(self, node):
# NOTE: This method is here for backward compatibility reasons after
# this method was promoted to be part of the standard compute API in
# Libcloud v2.7.0
return self.start_node(node=node)
def stop_node(self, node):
"""Shuts down a a node the API Key has permission to modify.
:param node: the Linode to destroy
:type node: :class:`Node`
:rtype: ``bool``
"""
if not isinstance(node, Node):
raise LinodeExceptionV4("Invalid node instance")
response = self.connection.request('/v4/linode/instances/%s/shutdown'
% node.id,
method='POST')
return response.status == httplib.OK
def ex_stop_node(self, node):
# NOTE: This method is here for backward compatibility reasons after
# this method was promoted to be part of the standard compute API in
# Libcloud v2.7.0
return self.stop_node(node=node)
def destroy_node(self, node):
"""Deletes a node the API Key has permission to `read_write`
:param node: the Linode to destroy
:type node: :class:`Node`
:rtype: ``bool``
"""
if not isinstance(node, Node):
raise LinodeExceptionV4("Invalid node instance")
response = self.connection.request('/v4/linode/instances/%s'
% node.id,
method='DELETE')
return response.status == httplib.OK
def reboot_node(self, node):
"""Reboots a node the API Key has permission to modify.
:param node: the Linode to destroy
:type node: :class:`Node`
:rtype: ``bool``
"""
if not isinstance(node, Node):
raise LinodeExceptionV4("Invalid node instance")
response = self.connection.request('/v4/linode/instances/%s/reboot'
% node.id,
method='POST')
return response.status == httplib.OK
def create_node(self, location, size, image=None,
name=None, root_pass=None, ex_authorized_keys=None,
ex_authorized_users=None, ex_tags=None,
ex_backups_enabled=False, ex_private_ip=False):
"""Creates a Linode Instance.
In order for this request to complete successfully,
the user must have the `add_linodes` grant as this call
will incur a charge.
:param location: which region to create the node in
:type location: :class:`NodeLocation`
:param size: the plan size to create
:type size: :class:`NodeSize`
:keyword image: which distribution to deploy on the node
:type image: :class:`NodeImage`
:keyword name: the name to assign to node.\
Must start with an alpha character.\
May only consist of alphanumeric characters,\
dashes (-), underscores (_) or periods (.).\
Cannot have two dashes (--), underscores (__) or periods (..) in a row.
:type name: ``str``
:keyword root_pass: the root password (required if image is provided)
:type root_pass: ``str``
:keyword ex_authorized_keys: a list of public SSH keys
:type ex_authorized_keys: ``list`` of ``str``
:keyword ex_authorized_users: a list of usernames.\
If the usernames have associated SSH keys,\
the keys will be appended to the root users `authorized_keys`
:type ex_authorized_users: ``list`` of ``str``
:keyword ex_tags: list of tags for the node
:type ex_tags: ``list`` of ``str``
:keyword ex_backups_enabled: whether to be enrolled \
in the Linode Backup service (False)
:type ex_backups_enabled: ``bool``
:keyword ex_private_ip: whether or not to request a private IP
:type ex_private_ip: ``bool``
:return: Node representing the newly-created node
:rtype: :class:`Node`
"""
if not isinstance(location, NodeLocation):
raise LinodeExceptionV4("Invalid location instance")
if not isinstance(size, NodeSize):
raise LinodeExceptionV4("Invalid size instance")
attr = {'region': location.id,
'type': size.id,
'private_ip': ex_private_ip,
'backups_enabled': ex_backups_enabled,
}
if image is not None:
if root_pass is None:
raise LinodeExceptionV4("root password required "
"when providing an image")
attr['image'] = image.id
attr['root_pass'] = root_pass
if name is not None:
valid_name = r'^[a-zA-Z]((?!--|__|\.\.)[a-zA-Z0-9-_.])+$'
if not re.match(valid_name, name):
raise LinodeExceptionV4("Invalid name")
attr['label'] = name
if ex_authorized_keys is not None:
attr['authorized_keys'] = list(ex_authorized_keys)
if ex_authorized_users is not None:
attr['authorized_users'] = list(ex_authorized_users)
if ex_tags is not None:
attr['tags'] = list(ex_tags)
response = self.connection.request('/v4/linode/instances',
data=json.dumps(attr),
method='POST').object
return self._to_node(response)
def ex_get_node(self, node_id):
"""
Return a Node object based on a node ID.
:keyword node_id: Node's ID
:type node_id: ``str``
:return: Created node
:rtype : :class:`Node`
"""
response = self.connection.request('/v4/linode/instances/%s'
% node_id).object
return self._to_node(response)
def ex_list_disks(self, node):
"""
List disks associated with the node.
:param node: Node to list disks. (required)
:type node: :class:`Node`
:rtype: ``list`` of :class:`LinodeDisk`
"""
if not isinstance(node, Node):
raise LinodeExceptionV4("Invalid node instance")
data = self._paginated_request('/v4/linode/instances/%s/disks'
% node.id, 'data')
return [self._to_disk(obj) for obj in data]
def ex_create_disk(self, size, name, node, fs_type,
image=None, ex_root_pass=None, ex_authorized_keys=None,
ex_authorized_users=None, ex_read_only=False):
"""
Adds a new disk to node
:param size: Size of disk in megabytes (required)
:type size: ``int``
:param name: Name of the disk to be created (required)
:type name: ``str``
:param node: Node to attach disk to (required)
:type node: :class:`Node`
:param fs_type: The formatted type of this disk. Valid types are:
ext3, ext4, swap, raw, initrd
:type fs_type: ``str``
:keyword image: Image to deploy the volume from
:type image: :class:`NodeImage`
:keyword ex_root_pass: root password,required \
if an image is provided
:type ex_root_pass: ``str``
:keyword ex_authorized_keys: a list of SSH keys
:type ex_authorized_keys: ``list`` of ``str``
:keyword ex_authorized_users: a list of usernames \
that will have their SSH keys,\
if any, automatically appended \
to the root user's ~/.ssh/authorized_keys file.
:type ex_authorized_users: ``list`` of ``str``
:keyword ex_read_only: if true, this disk is read-only
:type ex_read_only: ``bool``
:return: LinodeDisk representing the newly-created disk
:rtype: :class:`LinodeDisk`
"""
attr = {'label': str(name),
'size': int(size),
'filesystem': fs_type,
'read_only': ex_read_only}
if not isinstance(node, Node):
raise LinodeExceptionV4("Invalid node instance")
if fs_type not in self._linode_disk_filesystems:
raise LinodeExceptionV4("Not valid filesystem type")
if image is not None:
if not isinstance(image, NodeImage):
raise LinodeExceptionV4("Invalid image instance")
# when an image is set, root pass must be set as well
if ex_root_pass is None:
raise LinodeExceptionV4("root_pass is required when "
"deploying an image")
attr['image'] = image.id
attr['root_pass'] = ex_root_pass
if ex_authorized_keys is not None:
attr['authorized_keys'] = list(ex_authorized_keys)
if ex_authorized_users is not None:
attr['authorized_users'] = list(ex_authorized_users)
response = self.connection.request('/v4/linode/instances/%s/disks'
% node.id,
data=json.dumps(attr),
method='POST').object
return self._to_disk(response)
def ex_destroy_disk(self, node, disk):
"""
Destroys disk for the given node.
:param node: The Node the disk is attached to. (required)
:type node: :class:`Node`
:param disk: LinodeDisk to be destroyed (required)
:type disk: :class:`LinodeDisk`
:rtype: ``bool``
"""
if not isinstance(node, Node):
raise LinodeExceptionV4("Invalid node instance")
if not isinstance(disk, LinodeDisk):
raise LinodeExceptionV4("Invalid disk instance")
if node.state != self.LINODE_STATES['stopped']:
raise LinodeExceptionV4("Node needs to be stopped"
" before disk is destroyed")
response = self.connection.request('/v4/linode/instances/%s/disks/%s'
% (node.id, disk.id),
method='DELETE')
return response.status == httplib.OK
def list_volumes(self):
"""Get all volumes of the account
:rtype: `list` of :class: `StorageVolume`
"""
data = self._paginated_request('/v4/volumes', 'data')
return [self._to_volume(obj) for obj in data]
def create_volume(self, name, size, location=None, node=None, tags=None):
"""Creates a volume and optionally attaches it to a node.
:param name: The name to be given to volume (required).\
Must start with an alpha character. \
May only consist of alphanumeric characters,\
dashes (-), underscores (_)\
Cannot have two dashes (--), underscores (__) in a row.
:type name: `str`
:param size: Size in gigabytes (required)
:type size: `int`
:keyword location: Location to create the node.\
Required if node is not given.
:type location: :class:`NodeLocation`
:keyword volume: Node to attach the volume to
:type volume: :class:`Node`
:keyword tags: tags to apply to volume
:type tags: `list` of `str`
:rtype: :class: `StorageVolume`
"""
valid_name = '^[a-zA-Z]((?!--|__)[a-zA-Z0-9-_])+$'
if not re.match(valid_name, name):
raise LinodeExceptionV4("Invalid name")
attr = {
'label': name,
'size': int(size),
}
if node is not None:
if not isinstance(node, Node):
raise LinodeExceptionV4("Invalid node instance")
attr['linode_id'] = int(node.id)
else:
# location is only required if a node is not given
if location:
if not isinstance(location, NodeLocation):
raise LinodeExceptionV4("Invalid location instance")
attr['region'] = location.id
else:
raise LinodeExceptionV4("Region must be provided "
"when node is not")
if tags is not None:
attr['tags'] = list(tags)
response = self.connection.request('/v4/volumes',
data=json.dumps(attr),
method='POST').object
return self._to_volume(response)
def attach_volume(self, node, volume, persist_across_boots=True):
"""Attaches a volume to a node.
Volume and node must be located in the same region
:param node: Node to attach the volume to(required)
:type node: :class:`Node`
:param volume: Volume to be attached (required)
:type volume: :class:`StorageVolume`
:keyword persist_across_boots: Wether volume should be \
attached to node across boots
:type persist_across_boots: `bool`
:rtype: :class: `StorageVolume`
"""
if not isinstance(volume, StorageVolume):
raise LinodeExceptionV4("Invalid volume instance")
if not isinstance(node, Node):
raise LinodeExceptionV4("Invalid node instance")
if volume.extra['linode_id'] is not None:
raise LinodeExceptionV4("Volume is already attached to a node")
if node.extra['location'] != volume.extra['location']:
raise LinodeExceptionV4("Volume and node "
"must be on the same region")
attr = {
'linode_id': int(node.id),
'persist_across_boots': persist_across_boots
}
response = self.connection.request('/v4/volumes/%s/attach'
% volume.id,
data=json.dumps(attr),
method='POST').object
return self._to_volume(response)
def detach_volume(self, volume):
"""Detaches a volume from a node.
:param volume: Volume to be detached (required)
:type volume: :class:`StorageVolume`
:rtype: ``bool``
"""
if not isinstance(volume, StorageVolume):
raise LinodeExceptionV4("Invalid volume instance")
if volume.extra['linode_id'] is None:
raise LinodeExceptionV4("Volume is already detached")
response = self.connection.request('/v4/volumes/%s/detach'
% volume.id,
method='POST')
return response.status == httplib.OK
def destroy_volume(self, volume):
"""Destroys the volume given.
:param volume: Volume to be deleted (required)
:type volume: :class:`StorageVolume`
:rtype: ``bool``
"""
if not isinstance(volume, StorageVolume):
raise LinodeExceptionV4("Invalid volume instance")
if volume.extra['linode_id'] is not None:
raise LinodeExceptionV4("Volume must be detached"
" before it can be deleted.")
response = self.connection.request('/v4/volumes/%s'
% volume.id,
method='DELETE')
return response.status == httplib.OK
def ex_resize_volume(self, volume, size):
"""Resizes the volume given.
:param volume: Volume to be resized
:type volume: :class:`StorageVolume`
:param size: new volume size in gigabytes, must be\
greater than current size
:type size: `int`
:rtype: ``bool``
"""
if not isinstance(volume, StorageVolume):
raise LinodeExceptionV4("Invalid volume instance")
if volume.size >= size:
raise LinodeExceptionV4("Volumes can only be resized up")
attr = {
'size': size
}
response = self.connection.request('/v4/volumes/%s/resize'
% volume.id,
data=json.dumps(attr),
method='POST')
return response.status == httplib.OK
def ex_clone_volume(self, volume, name):
"""Clones the volume given
:param volume: Volume to be cloned
:type volume: :class:`StorageVolume`
:param name: new cloned volume name
:type name: `str`
:rtype: :class:`StorageVolume`
"""
if not isinstance(volume, StorageVolume):
raise LinodeExceptionV4("Invalid volume instance")
attr = {
'label': name
}
response = self.connection.request('/v4/volumes/%s/clone'
% volume.id,
data=json.dumps(attr),
method='POST').object
return self._to_volume(response)
def ex_get_volume(self, volume_id):
"""
Return a Volume object based on a volume ID.
:param volume_id: Volume's id
:type volume_id: ``str``
:return: A StorageVolume object for the volume
:rtype: :class:`StorageVolume`
"""
response = self.connection.request('/v4/volumes/%s'
% volume_id).object
return self._to_volume(response)
def create_image(self, disk, name=None, description=None):
"""Creates a private image from a LinodeDisk.
Images are limited to three per account.
:param disk: LinodeDisk to create the image from (required)
:type disk: :class:`LinodeDisk`
:keyword name: A name for the image.\
Defaults to the name of the disk \
it is being created from if not provided
:type name: `str`
:keyword description: A description of the image
:type description: `str`
:return: The newly created NodeImage
:rtype: :class:`NodeImage`
"""
if not isinstance(disk, LinodeDisk):
raise LinodeExceptionV4("Invalid disk instance")
attr = {
'disk_id': int(disk.id),
'label': name,
'description': description
}
response = self.connection.request('/v4/images',
data=json.dumps(attr),
method='POST').object
return self._to_image(response)
def delete_image(self, image):
"""Deletes a private image
:param image: NodeImage to delete (required)
:type image: :class:`NodeImage`
:rtype: ``bool``
"""
if not isinstance(image, NodeImage):
raise LinodeExceptionV4("Invalid image instance")
response = self.connection.request('/v4/images/%s'
% image.id,
method='DELETE')
return response.status == httplib.OK
def ex_list_addresses(self):
"""List IP addresses
:return: LinodeIPAddress list
:rtype: `list` of :class:`LinodeIPAddress`
"""
data = self._paginated_request('/v4/networking/ips', 'data')
return [self._to_address(obj) for obj in data]
def ex_list_node_addresses(self, node):
"""List all IPv4 addresses attached to node
:param node: Node to list IP addresses
:type node: :class:`Node`
:return: LinodeIPAddress list
:rtype: `list` of :class:`LinodeIPAddress`
"""
if not isinstance(node, Node):
raise LinodeExceptionV4("Invalid node instance")
response = self.connection.request('/v4/linode/instances/%s/ips'
% node.id).object
return self._to_addresses(response)
def ex_allocate_private_address(self, node, address_type='ipv4'):
"""Allocates a private IPv4 address to node.Only ipv4 is currently supported
:param node: Node to attach the IP address
:type node: :class:`Node`
:keyword address_type: Type of IP address
:type address_type: `str`
:return: The newly created LinodeIPAddress
:rtype: :class:`LinodeIPAddress`
"""
if not isinstance(node, Node):
raise LinodeExceptionV4("Invalid node instance")
# Only ipv4 is currently supported
if address_type != 'ipv4':
raise LinodeExceptionV4("Address type not supported")
# Only one private IP address can be allocated
if len(node.private_ips) >= 1:
raise LinodeExceptionV4("Nodes can have up to one private IP")
attr = {
'public': False,
'type': address_type
}
response = self.connection.request('/v4/linode/instances/%s/ips'
% node.id,
data=json.dumps(attr),
method='POST').object
return self._to_address(response)
def ex_share_address(self, node, addresses):
"""Shares an IP with another node.This can be used to allow one Linode
to begin serving requests should another become unresponsive.
:param node: Node to share the IP addresses with
:type node: :class:`Node`
:keyword addresses: List of IP addresses to share
:type address_type: `list` of :class: `LinodeIPAddress`
:rtype: ``bool``
"""
if not isinstance(node, Node):
raise LinodeExceptionV4("Invalid node instance")
if not all(isinstance(address, LinodeIPAddress)
for address in addresses):
raise LinodeExceptionV4("Invalid address instance")
attr = {
'ips': [address.inet for address in addresses],
'linode_id': int(node.id)
}
response = self.connection.request('/v4/networking/ipv4/share',
data=json.dumps(attr),
method='POST')
return response.status == httplib.OK
def ex_resize_node(self, node, size, allow_auto_disk_resize=False):
"""
Resizes a node the API Key has read_write permission
to a different Type.
The following requirements must be met:
- The node must not have a pending migration
- The account cannot have an outstanding balance
- The node must not have more disk allocation than the new size allows
:param node: the Linode to resize
:type node: :class:`Node`
:param size: the size of the new node
:type size: :class:`NodeSize`
:keyword allow_auto_disk_resize: Automatically resize disks \
when resizing a node.
:type allow_auto_disk_resize: ``bool``
:rtype: ``bool``
"""
if not isinstance(node, Node):
raise LinodeExceptionV4("Invalid node instance")
if not isinstance(size, NodeSize):
raise LinodeExceptionV4("Invalid node size")
attr = {'type': size.id,
'allow_auto_disk_resize': allow_auto_disk_resize}
response = self.connection.request(
'/v4/linode/instances/%s/resize' % node.id,
data=json.dumps(attr),
method='POST')
return response.status == httplib.OK
def ex_rename_node(self, node, name):
"""Renames a node
:param node: the Linode to resize
:type node: :class:`Node`
:param name: the node's new name
:type name: ``str``
:return: Changed Node
:rtype: :class:`Node`
"""
if not isinstance(node, Node):
raise LinodeExceptionV4("Invalid node instance")
attr = {'label': name}
response = self.connection.request(
'/v4/linode/instances/%s' % node.id,
data=json.dumps(attr),
method='PUT').object
return self._to_node(response)
def _to_node(self, data):
extra = {
'tags': data['tags'],
'location': data['region'],
'ipv6': data['ipv6'],
'hypervisor': data['hypervisor'],
'specs': data['specs'],
'alerts': data['alerts'],
'backups': data['backups'],
'watchdog_enabled': data['watchdog_enabled']
}
public_ips = [ip for ip in data['ipv4'] if not is_private_subnet(ip)]
private_ips = [ip for ip in data['ipv4'] if is_private_subnet(ip)]
return Node(
id=data['id'],
name=data['label'],
state=self.LINODE_STATES[data['status']],
public_ips=public_ips,
private_ips=private_ips,
driver=self,
size=data['type'],
image=data['image'],
created_at=self._to_datetime(data['created']),
extra=extra)
def _to_datetime(self, strtime):
return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S")
def _to_size(self, data):
extra = {
'class': data['class'],
'monthly_price': data['price']['monthly'],
'addons': data['addons'],
'successor': data['successor'],
'transfer': data['transfer'],
'vcpus': data['vcpus'],
'gpus': data['gpus']
}
return NodeSize(
id=data['id'],
name=data['label'],
ram=data['memory'],
disk=data['disk'],
bandwidth=data['network_out'],
price=data['price']['hourly'],
driver=self,
extra=extra
)
def _to_image(self, data):
extra = {
'type': data['type'],
'description': data['description'],
'created': self._to_datetime(data['created']),
'created_by': data['created_by'],
'is_public': data['is_public'],
'size': data['size'],
'eol': data['eol'],
'vendor': data['vendor'],
}
return NodeImage(
id=data['id'],
name=data['label'],
driver=self,
extra=extra
)
def _to_location(self, data):
extra = {
'status': data['status'],
'capabilities': data['capabilities'],
'resolvers': data['resolvers']
}
return NodeLocation(
id=data['id'],
name=data['id'],
country=data['country'].upper(),
driver=self,
extra=extra)
def _to_volume(self, data):
extra = {
'created': self._to_datetime(data['created']),
'tags': data['tags'],
'location': data['region'],
'linode_id': data['linode_id'],
'linode_label': data['linode_label'],
'state': self.LINODE_VOLUME_STATES[data['status']],
'filesystem_path': data['filesystem_path']
}
return StorageVolume(
id=str(data['id']),
name=data['label'],
size=data['size'],
driver=self,
extra=extra)
def _to_disk(self, data):
return LinodeDisk(
id=data['id'],
state=self.LINODE_DISK_STATES[data['status']],
name=data['label'],
filesystem=data['filesystem'],
size=data['size'],
driver=self,
)
def _to_address(self, data):
extra = {
'gateway': data['gateway'],
'subnet_mask': data['subnet_mask'],
'prefix': data['prefix'],
'rdns': data['rdns'],
'node_id': data['linode_id'],
'region': data['region'],
}
return LinodeIPAddress(
inet=data['address'],
public=data['public'],
version=data['type'],
driver=self,
extra=extra
)
def _to_addresses(self, data):
addresses = data['ipv4']['public'] + data['ipv4']['private']
return [self._to_address(address) for address in addresses]
def _paginated_request(self, url, obj, params=None):
"""
Perform multiple calls in order to have a full list of elements when
the API responses are paginated.
:param url: API endpoint
:type url: ``str``
:param obj: Result object key
:type obj: ``str``
:param params: Request parameters
:type params: ``dict``
:return: ``list`` of API response objects
:rtype: ``list``
"""
objects = []
params = params if params is not None else {}
ret = self.connection.request(url, params=params).object
data = list(ret.get(obj, []))
current_page = int(ret.get('page', 1))
num_of_pages = int(ret.get('pages', 1))
objects.extend(data)
for page in range(current_page + 1, num_of_pages + 1):
# add param to request next page
params['page'] = page
ret = self.connection.request(url, params=params).object
data = list(ret.get(obj, []))
objects.extend(data)
return objects
def _izip_longest(*args, **kwds):
"""Taken from Python docs
http://docs.python.org/library/itertools.html#itertools.izip
"""
fillvalue = kwds.get('fillvalue')
def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):
yield counter() # yields the fillvalue, or raises IndexError
fillers = itertools.repeat(fillvalue)
iters = [itertools.chain(it, sentinel(), fillers) for it in args]
try:
for tup in itertools.izip(*iters): # pylint: disable=no-member
yield tup
except IndexError:
pass
|
apache-2.0
| -4,174,464,189,524,054,500 | 34.264482 | 84 | 0.551181 | false |
CloudBrewery/duplicity-swiftkeys
|
duplicity/patchdir.py
|
1
|
21607
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from future_builtins import filter, map
import re #@UnusedImport
import types
import os
import tempfile
from duplicity import tarfile #@UnusedImport
from duplicity import librsync #@UnusedImport
from duplicity import log #@UnusedImport
from duplicity import diffdir
from duplicity import selection
from duplicity import tempdir
from duplicity import util #@UnusedImport
from duplicity.path import * #@UnusedWildImport
from duplicity.lazy import * #@UnusedWildImport
"""Functions for patching of directories"""
class PatchDirException( Exception ):
pass
def Patch( base_path, difftar_fileobj ):
"""Patch given base_path and file object containing delta"""
diff_tarfile = tarfile.TarFile( "arbitrary", "r", difftar_fileobj )
patch_diff_tarfile( base_path, diff_tarfile )
assert not difftar_fileobj.close()
def Patch_from_iter( base_path, fileobj_iter, restrict_index=() ):
"""Patch given base_path and iterator of delta file objects"""
diff_tarfile = TarFile_FromFileobjs( fileobj_iter )
patch_diff_tarfile( base_path, diff_tarfile, restrict_index )
def patch_diff_tarfile( base_path, diff_tarfile, restrict_index=() ):
"""Patch given Path object using delta tarfile (as in tarfile.TarFile)
If restrict_index is set, ignore any deltas in diff_tarfile that
don't start with restrict_index.
"""
if base_path.exists():
path_iter = selection.Select( base_path ).set_iter()
else:
path_iter = empty_iter() # probably untarring full backup
diff_path_iter = difftar2path_iter( diff_tarfile )
if restrict_index:
diff_path_iter = filter_path_iter( diff_path_iter, restrict_index )
collated = diffdir.collate2iters( path_iter, diff_path_iter )
ITR = IterTreeReducer( PathPatcher, [base_path] )
for basis_path, diff_ropath in collated:
if basis_path:
log.Info(_("Patching %s") % (util.ufn(basis_path.get_relative_path())),
log.InfoCode.patch_file_patching,
util.escape( basis_path.get_relative_path() ) )
ITR( basis_path.index, basis_path, diff_ropath )
else:
log.Info(_("Patching %s") % (util.ufn(diff_ropath.get_relative_path())),
log.InfoCode.patch_file_patching,
util.escape( diff_ropath.get_relative_path() ) )
ITR( diff_ropath.index, basis_path, diff_ropath )
ITR.Finish()
base_path.setdata()
def empty_iter():
if 0:
yield 1 # this never happens, but fools into generator treatment
def filter_path_iter( path_iter, index ):
"""Rewrite path elements of path_iter so they start with index
Discard any that doesn't start with index, and remove the index
prefix from the rest.
"""
assert isinstance( index, tuple ) and index, index
l = len( index )
for path in path_iter:
if path.index[:l] == index:
path.index = path.index[l:]
yield path
def difftar2path_iter( diff_tarfile ):
"""Turn file-like difftarobj into iterator of ROPaths"""
tar_iter = iter( diff_tarfile )
multivol_fileobj = None
# The next tar_info is stored in this one element list so
# Multivol_Filelike below can update it. Any StopIterations will
# be passed upwards.
tarinfo_list = [tar_iter.next()]
while 1:
# This section relevant when a multivol diff is last in tar
if not tarinfo_list[0]:
raise StopIteration
if multivol_fileobj and not multivol_fileobj.at_end:
multivol_fileobj.close() # aborting in middle of multivol
continue
index, difftype, multivol = get_index_from_tarinfo( tarinfo_list[0] )
ropath = ROPath( index )
ropath.init_from_tarinfo( tarinfo_list[0] )
ropath.difftype = difftype
if difftype == "deleted":
ropath.type = None
elif ropath.isreg():
if multivol:
multivol_fileobj = Multivol_Filelike( diff_tarfile, tar_iter,
tarinfo_list, index )
ropath.setfileobj( multivol_fileobj )
yield ropath
continue # Multivol_Filelike will reset tarinfo_list
else:
ropath.setfileobj( diff_tarfile.extractfile( tarinfo_list[0] ) )
yield ropath
tarinfo_list[0] = tar_iter.next()
def get_index_from_tarinfo( tarinfo ):
"""Return (index, difftype, multivol) pair from tarinfo object"""
for prefix in ["snapshot/", "diff/", "deleted/",
"multivol_diff/", "multivol_snapshot/"]:
tiname = util.get_tarinfo_name( tarinfo )
if tiname.startswith( prefix ):
name = tiname[len( prefix ):] # strip prefix
if prefix.startswith( "multivol" ):
if prefix == "multivol_diff/":
difftype = "diff"
else:
difftype = "snapshot"
multivol = 1
name, num_subs = \
re.subn( "(?s)^multivol_(diff|snapshot)/?(.*)/[0-9]+$",
"\\2", tiname )
if num_subs != 1:
raise PatchDirException(u"Unrecognized diff entry %s" %
util.ufn(tiname))
else:
difftype = prefix[:-1] # strip trailing /
name = tiname[len( prefix ):]
if name.endswith( "/" ):
name = name[:-1] # strip trailing /'s
multivol = 0
break
else:
raise PatchDirException(u"Unrecognized diff entry %s" %
util.ufn(tiname))
if name == "." or name == "":
index = ()
else:
index = tuple( name.split( "/" ) )
if '..' in index:
raise PatchDirException(u"Tar entry %s contains '..'. Security "
"violation" % util.ufn(tiname))
return ( index, difftype, multivol )
class Multivol_Filelike:
"""Emulate a file like object from multivols
Maintains a buffer about the size of a volume. When it is read()
to the end, pull in more volumes as desired.
"""
def __init__( self, tf, tar_iter, tarinfo_list, index ):
"""Initializer. tf is TarFile obj, tarinfo is first tarinfo"""
self.tf, self.tar_iter = tf, tar_iter
self.tarinfo_list = tarinfo_list # must store as list for write access
self.index = index
self.buffer = ""
self.at_end = 0
def read( self, length= -1 ):
"""Read length bytes from file"""
if length < 0:
while self.addtobuffer():
pass
real_len = len( self.buffer )
else:
while len( self.buffer ) < length:
if not self.addtobuffer():
break
real_len = min( len( self.buffer ), length )
result = self.buffer[:real_len]
self.buffer = self.buffer[real_len:]
return result
def addtobuffer( self ):
"""Add next chunk to buffer"""
if self.at_end:
return None
index, difftype, multivol = get_index_from_tarinfo( #@UnusedVariable
self.tarinfo_list[0] )
if not multivol or index != self.index:
# we've moved on
# the following communicates next tarinfo to difftar2path_iter
self.at_end = 1
return None
fp = self.tf.extractfile( self.tarinfo_list[0] )
self.buffer += fp.read()
fp.close()
try:
self.tarinfo_list[0] = self.tar_iter.next()
except StopIteration:
self.tarinfo_list[0] = None
self.at_end = 1
return None
return 1
def close( self ):
"""If not at end, read remaining data"""
if not self.at_end:
while 1:
self.buffer = ""
if not self.addtobuffer():
break
self.at_end = 1
class PathPatcher( ITRBranch ):
"""Used by DirPatch, process the given basis and diff"""
def __init__( self, base_path ):
"""Set base_path, Path of root of tree"""
self.base_path = base_path
self.dir_diff_ropath = None
def start_process( self, index, basis_path, diff_ropath ):
"""Start processing when diff_ropath is a directory"""
if not ( diff_ropath and diff_ropath.isdir() ):
assert index == (), util.uindex(index) # should only happen for first elem
self.fast_process( index, basis_path, diff_ropath )
return
if not basis_path:
basis_path = self.base_path.new_index( index )
assert not basis_path.exists()
basis_path.mkdir() # Need place for later files to go into
elif not basis_path.isdir():
basis_path.delete()
basis_path.mkdir()
self.dir_basis_path = basis_path
self.dir_diff_ropath = diff_ropath
def end_process( self ):
"""Copy directory permissions when leaving tree"""
if self.dir_diff_ropath:
self.dir_diff_ropath.copy_attribs( self.dir_basis_path )
def can_fast_process( self, index, basis_path, diff_ropath ):
"""No need to recurse if diff_ropath isn't a directory"""
return not ( diff_ropath and diff_ropath.isdir() )
def fast_process( self, index, basis_path, diff_ropath ):
"""For use when neither is a directory"""
if not diff_ropath:
return # no change
elif not basis_path:
if diff_ropath.difftype == "deleted":
pass # already deleted
else:
# just copy snapshot over
diff_ropath.copy( self.base_path.new_index( index ) )
elif diff_ropath.difftype == "deleted":
if basis_path.isdir():
basis_path.deltree()
else:
basis_path.delete()
elif not basis_path.isreg():
if basis_path.isdir():
basis_path.deltree()
else:
basis_path.delete()
diff_ropath.copy( basis_path )
else:
assert diff_ropath.difftype == "diff", diff_ropath.difftype
basis_path.patch_with_attribs( diff_ropath )
class TarFile_FromFileobjs:
"""Like a tarfile.TarFile iterator, but read from multiple fileobjs"""
def __init__( self, fileobj_iter ):
"""Make new tarinfo iterator
fileobj_iter should be an iterator of file objects opened for
reading. They will be closed at end of reading.
"""
self.fileobj_iter = fileobj_iter
self.tarfile, self.tar_iter = None, None
self.current_fp = None
def __iter__( self ):
return self
def set_tarfile( self ):
"""Set tarfile from next file object, or raise StopIteration"""
if self.current_fp:
assert not self.current_fp.close()
self.current_fp = self.fileobj_iter.next()
self.tarfile = util.make_tarfile("r", self.current_fp)
self.tar_iter = iter( self.tarfile )
def next( self ):
if not self.tarfile:
self.set_tarfile()
try:
return self.tar_iter.next()
except StopIteration:
assert not self.tarfile.close()
self.set_tarfile()
return self.tar_iter.next()
def extractfile( self, tarinfo ):
"""Return data associated with given tarinfo"""
return self.tarfile.extractfile( tarinfo )
def collate_iters( iter_list ):
"""Collate iterators by index
Input is a list of n iterators each of which must iterate elements
with an index attribute. The elements must come out in increasing
order, and the index should be a tuple itself.
The output is an iterator which yields tuples where all elements
in the tuple have the same index, and the tuple has n elements in
it. If any iterator lacks an element with that index, the tuple
will have None in that spot.
"""
# overflow[i] means that iter_list[i] has been exhausted
# elems[i] is None means that it is time to replenish it.
iter_num = len( iter_list )
if iter_num == 2:
return diffdir.collate2iters( iter_list[0], iter_list[1] )
overflow = [None] * iter_num
elems = overflow[:]
def setrorps( overflow, elems ):
"""Set the overflow and rorps list"""
for i in range( iter_num ):
if not overflow[i] and elems[i] is None:
try:
elems[i] = iter_list[i].next()
except StopIteration:
overflow[i] = 1
elems[i] = None
def getleastindex( elems ):
"""Return the first index in elems, assuming elems isn't empty"""
return min( map( lambda elem: elem.index, filter( lambda x: x, elems ) ) )
def yield_tuples( iter_num, overflow, elems ):
while 1:
setrorps( overflow, elems )
if None not in overflow:
break
index = getleastindex( elems )
yieldval = []
for i in range( iter_num ):
if elems[i] and elems[i].index == index:
yieldval.append( elems[i] )
elems[i] = None
else:
yieldval.append( None )
yield tuple( yieldval )
return yield_tuples( iter_num, overflow, elems )
class IndexedTuple:
"""Like a tuple, but has .index (used previously by collate_iters)"""
def __init__( self, index, sequence ):
self.index = index
self.data = tuple( sequence )
def __len__( self ):
return len( self.data )
def __getitem__( self, key ):
"""This only works for numerical keys (easier this way)"""
return self.data[key]
def __lt__( self, other ):
return self.__cmp__( other ) == -1
def __le__( self, other ):
return self.__cmp__( other ) != 1
def __ne__( self, other ):
return not self.__eq__( other )
def __gt__( self, other ):
return self.__cmp__( other ) == 1
def __ge__( self, other ):
return self.__cmp__( other ) != -1
def __cmp__( self, other ):
assert isinstance( other, IndexedTuple )
if self.index < other.index:
return - 1
elif self.index == other.index:
return 0
else:
return 1
def __eq__( self, other ):
if isinstance( other, IndexedTuple ):
return self.index == other.index and self.data == other.data
elif type( other ) is types.TupleType:
return self.data == other
else:
return None
def __str__( self ):
return "(%s).%s" % ( ", ".join( map( str, self.data ) ), self.index )
def normalize_ps( patch_sequence ):
"""Given an sequence of ROPath deltas, remove blank and unnecessary
The sequence is assumed to be in patch order (later patches apply
to earlier ones). A patch is unnecessary if a later one doesn't
require it (for instance, any patches before a "delete" are
unnecessary).
"""
result_list = []
i = len( patch_sequence ) - 1
while i >= 0:
delta = patch_sequence[i]
if delta is not None:
# skip blank entries
result_list.insert( 0, delta )
if delta.difftype != "diff":
break
i -= 1
return result_list
def patch_seq2ropath( patch_seq ):
"""Apply the patches in patch_seq, return single ropath"""
first = patch_seq[0]
assert first.difftype != "diff", "First patch in sequence " \
"%s was a diff" % patch_seq
if not first.isreg():
# No need to bother with data if not regular file
assert len(patch_seq) == 1, "Patch sequence isn't regular, but " \
"has %d entries" % len(patch_seq)
return first.get_ropath()
current_file = first.open( "rb" )
for delta_ropath in patch_seq[1:]:
assert delta_ropath.difftype == "diff", delta_ropath.difftype
if not isinstance( current_file, file ):
"""
librsync insists on a real file object, which we create manually
by using the duplicity.tempdir to tell us where.
"""
tempfp = tempfile.TemporaryFile( dir=tempdir.default().dir() )
util.copyfileobj( current_file, tempfp )
assert not current_file.close()
tempfp.seek( 0 )
current_file = tempfp
current_file = librsync.PatchedFile( current_file,
delta_ropath.open( "rb" ) )
result = patch_seq[-1].get_ropath()
result.setfileobj( current_file )
return result
def integrate_patch_iters( iter_list ):
"""Combine a list of iterators of ropath patches
The iter_list should be sorted in patch order, and the elements in
each iter_list need to be orderd by index. The output will be an
iterator of the final ROPaths in index order.
"""
collated = collate_iters( iter_list )
for patch_seq in collated:
normalized = normalize_ps(patch_seq)
try:
final_ropath = patch_seq2ropath(normalized)
if final_ropath.exists():
# otherwise final patch was delete
yield final_ropath
except Exception as e:
filename = normalized[-1].get_ropath().get_relative_path()
log.Warn(_("Error '%s' patching %s") %
(util.uexc(e), util.ufn(filename)),
log.WarningCode.cannot_process,
util.escape(filename))
def tarfiles2rop_iter( tarfile_list, restrict_index=() ):
"""Integrate tarfiles of diffs into single ROPath iter
Then filter out all the diffs in that index which don't start with
the restrict_index.
"""
diff_iters = [difftar2path_iter(x) for x in tarfile_list]
if restrict_index:
# Apply filter before integration
diff_iters = [filter_path_iter(x, restrict_index) for x in diff_iters]
return integrate_patch_iters( diff_iters )
def Write_ROPaths( base_path, rop_iter ):
"""Write out ropaths in rop_iter starting at base_path
Returns 1 if something was actually written, 0 otherwise.
"""
ITR = IterTreeReducer( ROPath_IterWriter, [base_path] )
return_val = 0
for ropath in rop_iter:
return_val = 1
ITR( ropath.index, ropath )
ITR.Finish()
base_path.setdata()
return return_val
class ROPath_IterWriter( ITRBranch ):
"""Used in Write_ROPaths above
We need to use an ITR because we have to update the
permissions/times of directories after we write the files in them.
"""
def __init__( self, base_path ):
"""Set base_path, Path of root of tree"""
self.base_path = base_path
self.dir_diff_ropath = None
self.dir_new_path = None
def start_process( self, index, ropath ):
"""Write ropath. Only handles the directory case"""
if not ropath.isdir():
# Base may not be a directory, but rest should
assert ropath.index == (), ropath.index
new_path = self.base_path.new_index( index )
if ropath.exists():
if new_path.exists():
new_path.deltree()
ropath.copy( new_path )
self.dir_new_path = self.base_path.new_index( index )
if self.dir_new_path.exists() and not globals.force:
# base may exist, but nothing else
assert index == (), index
else:
self.dir_new_path.mkdir()
self.dir_diff_ropath = ropath
def end_process( self ):
"""Update information of a directory when leaving it"""
if self.dir_diff_ropath:
self.dir_diff_ropath.copy_attribs( self.dir_new_path )
def can_fast_process( self, index, ropath ):
"""Can fast process (no recursion) if ropath isn't a directory"""
log.Info( _( "Writing %s of type %s" ) %
(util.ufn(ropath.get_relative_path()), ropath.type),
log.InfoCode.patch_file_writing,
"%s %s" % ( util.escape( ropath.get_relative_path() ), ropath.type ) )
return not ropath.isdir()
def fast_process( self, index, ropath ):
"""Write non-directory ropath to destination"""
if ropath.exists():
ropath.copy( self.base_path.new_index( index ) )
|
gpl-2.0
| -456,289,875,767,890,050 | 35.560068 | 87 | 0.582589 | false |
chungjjang80/FRETBursts
|
fretbursts/tests/test_burstlib.py
|
1
|
40546
|
#
# FRETBursts - A single-molecule FRET burst analysis toolkit.
#
# Copyright (C) 2014 Antonino Ingargiola <tritemio@gmail.com>
#
"""
Module containing automated unit tests for FRETBursts.
Running the tests requires `py.test`.
"""
from __future__ import division
from builtins import range, zip
from collections import namedtuple
import pytest
import numpy as np
try:
import matplotlib
except ImportError:
has_matplotlib = False # OK to run tests without matplotlib
else:
has_matplotlib = True
matplotlib.use('Agg') # but if matplotlib is installed, use Agg
try:
import numba
except ImportError:
has_numba = False
else:
has_numba = True
import fretbursts.background as bg
import fretbursts.burstlib as bl
import fretbursts.burstlib_ext as bext
from fretbursts import loader
from fretbursts import select_bursts
from fretbursts.ph_sel import Ph_sel
from fretbursts.phtools import phrates
if has_matplotlib:
import fretbursts.burst_plot as bplt
# data subdir in the notebook folder
DATASETS_DIR = u'notebooks/data/'
def _alex_process(d):
loader.alex_apply_period(d)
d.calc_bg(bg.exp_fit, time_s=30, tail_min_us=300)
d.burst_search(L=10, m=10, F=7)
def load_dataset_1ch(process=True):
fn = "0023uLRpitc_NTP_20dT_0.5GndCl.hdf5"
fname = DATASETS_DIR + fn
d = loader.photon_hdf5(fname)
if process:
_alex_process(d)
return d
def load_dataset_8ch():
fn = "12d_New_30p_320mW_steer_3.hdf5"
fname = DATASETS_DIR + fn
d = loader.photon_hdf5(fname)
d.calc_bg(bg.exp_fit, time_s=30, tail_min_us=300)
d.burst_search(L=10, m=10, F=7)
return d
def load_fake_pax():
fn = "0023uLRpitc_NTP_20dT_0.5GndCl.hdf5"
fname = DATASETS_DIR + fn
d = loader.photon_hdf5(fname)
d.add(ALEX=False, meas_type='PAX')
loader.alex_apply_period(d)
d.calc_bg(bg.exp_fit, time_s=30, tail_min_us='auto')
d.burst_search(L=10, m=10, F=6)
return d
@pytest.fixture(scope="module", params=[
load_dataset_1ch,
load_dataset_8ch,
])
def data(request):
load_func = request.param
d = load_func()
return d
@pytest.fixture(scope="module")
def data_8ch(request):
d = load_dataset_8ch()
return d
@pytest.fixture(scope="module")
def data_1ch(request):
d = load_dataset_1ch()
return d
##
# List comparison functions
#
def list_equal(list1, list2):
"""Test numerical equality of all the elements in the two lists.
"""
return np.all([val1 == val2 for val1, val2 in zip(list1, list2)])
def list_array_equal(list1, list2):
"""Test numerical equality between two lists of arrays.
"""
return np.all([np.all(arr1 == arr2) for arr1, arr2 in zip(list1, list2)])
def list_array_allclose(list1, list2):
"""Test float closeness (np.allclose) between two lists of arrays.
"""
return np.all([np.allclose(arr1, arr2) for arr1, arr2 in zip(list1, list2)])
##
# Test functions
#
def test_bg_compatlayer_for_obsolete_attrs():
d = load_dataset_1ch(process=False)
attrs = ('bg_dd', 'bg_ad', 'bg_da', 'bg_aa',
'rate_m', 'rate_dd', 'rate_ad', 'rate_da', 'rate_aa')
for attr in attrs:
with pytest.raises(RuntimeError):
getattr(d, attr)
_alex_process(d)
for attr in attrs:
assert isinstance(getattr(d, attr), list)
def test_ph_times_compact(data_1ch):
"""Test calculation of ph_times_compact."""
def isinteger(x):
return np.equal(np.mod(x, 1), 0)
ich = 0
d = data_1ch
ph_d = d.get_ph_times(ph_sel=Ph_sel(Dex='DAem'))
ph_a = d.get_ph_times(ph_sel=Ph_sel(Aex='DAem'))
ph_dc = d.get_ph_times(ph_sel=Ph_sel(Dex='DAem'), compact=True)
ph_ac = d.get_ph_times(ph_sel=Ph_sel(Aex='DAem'), compact=True)
# Test that the difference of ph and ph_compact is multiple of
# the complementary excitation period duration
Dex_void = bl._excitation_width(d._D_ON_multich[ich], d.alex_period)
Aex_void = bl._excitation_width(d._A_ON_multich[ich], d.alex_period)
assert isinteger((ph_d - ph_dc) / Dex_void).all()
assert isinteger((ph_a - ph_ac) / Aex_void).all()
# Test that alternation histogram does not have "gaps" for ph_compact
bins = np.linspace(0, d.alex_period, num=101)
hist_dc, _ = np.histogram(ph_dc % d.alex_period, bins=bins)
hist_ac, _ = np.histogram(ph_ac % d.alex_period, bins=bins)
assert (hist_dc > 0).all()
assert (hist_ac > 0).all()
def test_time_min_max():
"""Test time_min and time_max for ALEX data."""
d = load_dataset_1ch(process=False)
ich = 0
assert d.time_max == d.ph_times_t[ich].max() * d.clk_p
assert d.time_min == d.ph_times_t[ich].min() * d.clk_p
del d._time_max, d._time_min
_alex_process(d)
assert d.time_max == d.ph_times_m[ich][-1] * d.clk_p
assert d.time_min == d.ph_times_m[ich][0] * d.clk_p
d.delete('ph_times_m')
del d._time_max, d._time_min
assert d.time_max == d.mburst[0].stop[-1] * d.clk_p
assert d.time_min == d.mburst[0].start[0] * d.clk_p
def test_time_min_max_multispot(data_8ch):
"""Test time_min and time_max for multi-spot data."""
d = data_8ch
assert d.time_max == max(t[-1] for t in d.ph_times_m) * d.clk_p
assert d.time_min == min(t[0] for t in d.ph_times_m) * d.clk_p
def test_aex_dex_ratio(data_1ch):
"""Test methods computing relative D and A alternation periods durations.
"""
d = data_1ch
Dx, Ax = d.D_ON, d.A_ON
a1 = d._aex_fraction()
a2 = (Ax[1] - Ax[0]) / (Ax[1] - Ax[0] + Dx[1] - Dx[0])
assert a1 == a2
r1 = d._aex_dex_ratio()
r2 = (Ax[1] - Ax[0]) / (Dx[1] - Dx[0])
assert r1 == r2
assert (a1 / (1 - a1)) == r1
def test_burst_size_pax():
d = load_fake_pax()
aex_dex_ratio, alpha_d = d._aex_dex_ratio(), 1 - d._aex_fraction()
nd, na = d.nd[0], d.na[0]
nda = d.nda[0]
naa = d.naa[0] - d.nar[0] * aex_dex_ratio
# Test burst size during Dex
b1 = d.burst_sizes_pax_ich(add_aex=False)
b2 = d.burst_sizes_ich(add_naa=False)
b3 = nd + na
assert (b1 == b2).all()
assert (b1 == b3).all()
# Test naa
naa2 = d.get_naa_corrected()
naa3 = d._get_naa_ich()
assert (naa == naa2).all()
assert (naa == naa3).all()
# Test add_naa
b1 = d.burst_sizes_ich(add_naa=True)
b2 = nd + na + naa
assert (b1 == b2).all()
# Test add_aex with no duty-cycle correction
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=False)
b2 = nd + na + nda + d.naa[0]
b3 = nd + na + nda + naa + na * aex_dex_ratio
assert np.allclose(b1, b2)
assert np.allclose(b1, b3)
# Test add_aex with duty-cycle correction
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True)
b2 = nd + na + nda + na * aex_dex_ratio + naa / alpha_d
assert np.allclose(b1, b2)
# Test add_aex with duty-cycle correction, donor_ref
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True, donor_ref=True)
b2 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True, donor_ref=False)
assert np.allclose(b1, b2)
# Test add_aex with duty-cycle correction, gamma, beta
gamma = 0.7
beta = 0.85
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=True)
b2 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=False)
assert np.allclose(b1 * gamma, b2)
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=False)
b2 = (gamma * (nd + nda) + na * (1 + aex_dex_ratio) +
naa / (alpha_d * beta))
assert np.allclose(b1, b2)
d.leakage = 0.1
nd, na = d.nd[0], d.na[0]
nda = d.nda[0]
naa = d.naa[0] - d.nar[0] * aex_dex_ratio
# Test add_aex with duty-cycle correction, gamma, beta
gamma = 0.7
beta = 0.85
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=True)
b2 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=False)
assert np.allclose(b1 * gamma, b2)
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=False)
b2 = (gamma * (nd + nda) + na * (1 + aex_dex_ratio) +
naa / (alpha_d * beta))
assert np.allclose(b1, b2)
def test_bg_calc(data):
"""Smoke test bg_calc() and test deletion of bg fields.
"""
data.calc_bg(bg.exp_fit, time_s=30, tail_min_us=300)
assert 'bg_auto_th_us0' not in data
assert 'bg_auto_F_bg' not in data
assert 'bg_th_us_user' in data
data.calc_bg(bg.exp_fit, time_s=30, tail_min_us='auto', F_bg=1.7)
assert 'bg_auto_th_us0' in data
assert 'bg_auto_F_bg' in data
assert 'bg_th_us_user' not in data
data.calc_bg(bg.exp_fit, time_s=30, tail_min_us='auto', F_bg=1.7,
fit_allph=False)
streams = [s for s in data.ph_streams if s != Ph_sel('all')]
bg_t = [np.sum(data.bg[s][ich] for s in streams) for ich in range(data.nch)]
assert list_array_equal(data.bg[Ph_sel('all')], bg_t)
def test_ph_streams(data):
sel = [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')]
if data.alternated:
sel.extend([Ph_sel(Aex='Aem'), Ph_sel(Aex='Dem')])
for s in sel:
assert s in data.ph_streams
def test_bg_from(data):
"""Test the method .bg_from() for all the ph_sel combinations.
"""
d = data
for sel in d.ph_streams:
bg = d.bg_from(ph_sel=sel)
assert list_array_equal(bg, d.bg[sel])
if not (data.alternated):
assert list_array_equal(d.bg_from(Ph_sel('all')),
d.bg_from(Ph_sel(Dex='DAem')))
return
bg_dd = d.bg_from(ph_sel=Ph_sel(Dex='Dem'))
bg_ad = d.bg_from(ph_sel=Ph_sel(Dex='Aem'))
bg = d.bg_from(ph_sel=Ph_sel(Dex='DAem'))
assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_dd, bg_ad)])
bg_aa = d.bg_from(ph_sel=Ph_sel(Aex='Aem'))
bg_da = d.bg_from(ph_sel=Ph_sel(Aex='Dem'))
bg = d.bg_from(ph_sel=Ph_sel(Aex='DAem'))
assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_aa, bg_da)])
bg = d.bg_from(ph_sel=Ph_sel(Dex='Dem', Aex='Dem'))
assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_dd, bg_da)])
bg = d.bg_from(ph_sel=Ph_sel(Dex='Aem', Aex='Aem'))
assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_ad, bg_aa)])
bg = d.bg_from(ph_sel=Ph_sel(Dex='DAem'))
assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_dd, bg_ad)])
bg = d.bg_from(ph_sel=Ph_sel(Dex='DAem', Aex='Aem'))
bg2 = [b1 + b2 + b3 for b1, b2, b3 in zip(bg_dd, bg_ad, bg_aa)]
assert list_array_equal(bg, bg2)
def test_iter_ph_times(data):
"""Test method .iter_ph_times() for all the ph_sel combinations.
"""
# TODO add all the ph_sel combinations like in test_bg_from()
d = data
assert list_array_equal(d.ph_times_m, d.iter_ph_times())
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='Dem'))):
if d.alternated:
assert (ph == d.ph_times_m[ich][d.D_em[ich] * d.D_ex[ich]]).all()
else:
assert (ph == d.ph_times_m[ich][~d.A_em[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='Aem'))):
if d.alternated:
assert (ph == d.ph_times_m[ich][d.A_em[ich] * d.D_ex[ich]]).all()
else:
assert (ph == d.ph_times_m[ich][d.A_em[ich]]).all()
if d.alternated:
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Aex='Dem'))):
assert (ph == d.ph_times_m[ich][d.D_em[ich] * d.A_ex[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Aex='Aem'))):
assert (ph == d.ph_times_m[ich][d.A_em[ich] * d.A_ex[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='DAem'))):
assert (ph == d.ph_times_m[ich][d.D_ex[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Aex='DAem'))):
assert (ph == d.ph_times_m[ich][d.A_ex[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='Dem', Aex='Dem'))):
assert (ph == d.ph_times_m[ich][d.D_em[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='Aem', Aex='Aem'))):
assert (ph == d.ph_times_m[ich][d.A_em[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(
Ph_sel(Dex='DAem', Aex='Aem'))):
mask = d.D_ex[ich] + d.A_em[ich] * d.A_ex[ich]
assert (ph == d.ph_times_m[ich][mask]).all()
else:
assert list_array_equal(d.iter_ph_times(),
d.iter_ph_times(Ph_sel(Dex='DAem')))
def test_get_ph_times_period(data):
for ich in range(data.nch):
data.get_ph_times_period(0, ich=ich)
data.get_ph_times_period(0, ich=ich, ph_sel=Ph_sel(Dex='Dem'))
def test_iter_ph_times_period(data):
d = data
for ich in range(data.nch):
for period, ph_period in enumerate(d.iter_ph_times_period(ich=ich)):
istart, iend = d.Lim[ich][period]
assert (ph_period == d.ph_times_m[ich][istart : iend + 1]).all()
ph_sel = Ph_sel(Dex='Dem')
mask = d.get_ph_mask(ich=ich, ph_sel=ph_sel)
for period, ph_period in enumerate(
d.iter_ph_times_period(ich=ich, ph_sel=ph_sel)):
istart, iend = d.Lim[ich][period]
ph_period_test = d.ph_times_m[ich][istart : iend + 1]
ph_period_test = ph_period_test[mask[istart : iend + 1]]
assert (ph_period == ph_period_test).all()
def test_burst_search_py_cy(data):
"""Test python and cython burst search with background-dependent threshold.
"""
data.burst_search(pure_python=True)
mburst1 = [b.copy() for b in data.mburst]
num_bursts1 = data.num_bursts
data.burst_search(pure_python=False)
assert np.all(num_bursts1 == data.num_bursts)
assert mburst1 == data.mburst
data.burst_search(L=30, pure_python=True)
mburst1 = [b.copy() for b in data.mburst]
num_bursts1 = data.num_bursts
data.burst_search(L=30, pure_python=False)
assert np.all(num_bursts1 == data.num_bursts)
assert mburst1 == data.mburst
def test_burst_search_constant_rates(data):
"""Test python and cython burst search with constant threshold."""
data.burst_search(min_rate_cps=50e3, pure_python=True)
assert (data.num_bursts > 0).all()
mburst1 = [b.copy() for b in data.mburst]
num_bursts1 = data.num_bursts
data.burst_search(min_rate_cps=50e3, pure_python=False)
assert (data.num_bursts > 0).all()
assert np.all(num_bursts1 == data.num_bursts)
assert mburst1 == data.mburst
def test_burst_search_L(data):
"""Test burst search with different L arguments."""
data.burst_search(L=10)
for bursts in data.mburst:
assert (bursts.counts >= 10).all()
num_bursts1 = data.num_bursts
data.burst_search(L=30)
for bursts in data.mburst:
assert (bursts.counts >= 30).all()
assert np.all(num_bursts1 > data.num_bursts)
def test_burst_search_with_no_bursts(data):
"""Smoke test burst search when some periods have no bursts."""
# F=600 results in periods with no bursts for the us-ALEX measurement
# and in no bursts at all for the multi-spot measurements
data.burst_search(m=10, F=600)
data.fuse_bursts(ms=1)
if has_matplotlib:
def test_stale_fitter_after_burst_search(data):
"""Test that E/S_fitter attributes are deleted on burst search."""
data.burst_search(L=10, m=10, F=7, ph_sel=Ph_sel(Dex='Dem'))
bplt.dplot(data, bplt.hist_fret) # create E_fitter attribute
if data.alternated:
bplt.dplot(data, bplt.hist_S) # create S_fitter attribute
data.burst_search(L=10, m=10, F=7, ph_sel=Ph_sel(Dex='Aem'))
assert not hasattr(data, 'E_fitter')
if data.alternated:
assert not hasattr(data, 'S_fitter')
bplt.dplot(data, bplt.hist_fret) # create E_fitter attribute
if data.alternated:
bplt.dplot(data, bplt.hist_S) # create S_fitter attribute
data.calc_fret()
assert not hasattr(data, 'E_fitter')
if data.alternated:
assert not hasattr(data, 'S_fitter')
def test_burst_search(data):
"""Smoke test and bg_bs check."""
streams = [Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')]
if data.alternated:
streams.extend([Ph_sel(Dex='Aem', Aex='Aem'), Ph_sel(Dex='DAem')])
for sel in streams:
data.burst_search(L=10, m=10, F=7, ph_sel=sel)
assert list_equal(data.bg_bs, data.bg_from(sel))
if data.alternated:
data.burst_search(m=10, F=7, ph_sel=Ph_sel(Dex='DAem'), compact=True)
data.burst_search(L=10, m=10, F=7)
def test_burst_search_and_gate(data_1ch):
"""Test consistency of burst search and gate."""
d = data_1ch
assert d.alternated
# Smoke tests
bext.burst_search_and_gate(d, F=(6, 8))
bext.burst_search_and_gate(d, m=(12, 8))
bext.burst_search_and_gate(d, min_rate_cps=(60e3, 40e3))
if d.nch > 1:
mr1 = 35e3 + np.arange(d.nch) * 1e3
mr2 = 30e3 + np.arange(d.nch) * 1e3
bext.burst_search_and_gate(d, min_rate_cps=(mr1, mr2))
# Consistency test
d_dex = d.copy()
d_dex.burst_search(ph_sel=Ph_sel(Dex='DAem'))
d_aex = d.copy()
d_aex.burst_search(ph_sel=Ph_sel(Aex='Aem'))
d_and = bext.burst_search_and_gate(d)
for bursts_dex, bursts_aex, bursts_and, ph in zip(
d_dex.mburst, d_aex.mburst, d_and.mburst, d.iter_ph_times()):
ph_b_mask_dex = bl.ph_in_bursts_mask(ph.size, bursts_dex)
ph_b_mask_aex = bl.ph_in_bursts_mask(ph.size, bursts_aex)
ph_b_mask_and = bl.ph_in_bursts_mask(ph.size, bursts_and)
assert (ph_b_mask_and == ph_b_mask_dex * ph_b_mask_aex).all()
def test_mch_count_ph_num_py_c(data):
na_py = bl.bslib.mch_count_ph_in_bursts_py(data.mburst, data.A_em)
na_c = bl.bslib.mch_count_ph_in_bursts_c(data.mburst, data.A_em)
assert list_array_equal(na_py, na_c)
assert na_py[0].dtype == np.float64
def test_burst_sizes(data):
"""Test for .burst_sizes_ich() and burst_sizes()"""
# Smoke test
plain_sizes = data.burst_sizes()
assert len(plain_sizes) == data.nch
# Test gamma and donor_ref arguments
bs1 = data.burst_sizes_ich(gamma=0.5, donor_ref=True)
bs2 = data.burst_sizes_ich(gamma=0.5, donor_ref=False)
assert np.allclose(bs1, bs2 / 0.5)
# Test add_naa
if data.alternated:
bs_no_naa = data.burst_sizes_ich(add_naa=False)
bs_naa = data.burst_sizes_ich(add_naa=True)
assert np.allclose(bs_no_naa + data.naa[0], bs_naa)
# Test beta and donor_ref arguments with gamma=1
naa1 = data.get_naa_corrected(beta=0.8, donor_ref=True)
naa2 = data.get_naa_corrected(beta=0.8, donor_ref=False)
assert np.allclose(naa1, naa2)
# Test beta and donor_ref arguments with gamma=0.5
naa1 = data.get_naa_corrected(gamma=0.5, beta=0.8, donor_ref=True)
naa2 = data.get_naa_corrected(gamma=0.5, beta=0.8, donor_ref=False)
assert np.allclose(naa1 * 0.5, naa2)
def test_leakage(data):
"""
Test setting leakage before and after burst search
"""
# burst search, then set leakage
data.burst_search()
data.leakage = 0.04
na1 = list(data.na)
# set leakage, then burst search
data.burst_search()
na2 = list(data.na)
assert list_array_equal(na1, na2)
def test_gamma(data):
"""
Test setting gamma before and after burst search
"""
# burst search, then set gamma
data.burst_search()
E0 = list(data.E)
data.gamma = 0.5
E1 = list(data.E)
assert not list_array_equal(E0, E1)
# burst search after setting gamma
data.burst_search()
E2 = list(data.E)
assert list_array_equal(E1, E2)
def test_dir_ex(data_1ch):
"""
Test setting dir_ex before and after burst search
"""
data = data_1ch
# burst search, then set dir_ex
data.burst_search()
na0 = list(data.na)
data.dir_ex = 0.05
na1 = list(data.na)
assert not list_array_equal(na0, na1)
# burst search after setting dir_ex
data.burst_search()
na2 = list(data.na)
assert list_array_equal(na1, na2)
def test_beta(data_1ch):
"""
Test setting beta before and after burst search
"""
data = data_1ch
# burst search, then set beta
data.burst_search()
S0 = list(data.S)
data.beta = 0.7
S1 = list(data.S)
assert not list_array_equal(S0, S1)
# burst search after setting beta
data.burst_search()
S2 = list(data.S)
assert list_array_equal(S1, S2)
def test_bursts_interface(data):
d = data
for b in d.mburst:
assert (b.start == b.data[:, b._i_start]).all()
assert (b.stop == b.data[:, b._i_stop]).all()
assert (b.istart == b.data[:, b._i_istart]).all()
assert (b.istop == b.data[:, b._i_istop]).all()
rate = 1.*b.counts/b.width
assert (b.ph_rate == rate).all()
separation = b.start[1:] - b.stop[:-1]
assert (b.separation == separation).all()
assert (b.stop > b.start).all()
def test_burst_stop_istop(data):
"""Test coherence between b_end() and b_iend()"""
d = data
for ph, bursts in zip(d.ph_times_m, d.mburst):
assert (ph[bursts.istop] == bursts.stop).all()
def test_monotonic_burst_start(data):
"""Test for monotonic burst start times."""
d = data
for i in range(d.nch):
assert (np.diff(d.mburst[i].start) > 0).all()
def test_monotonic_burst_stop(data):
"""Test for monotonic burst stop times."""
d = data
for bursts in d.mburst:
assert (np.diff(bursts.stop) > 0).all()
def test_burst_istart_iend_size(data):
"""Test consistency between burst istart, istop and counts (i.e. size)"""
d = data
for bursts in d.mburst:
counts = bursts.istop - bursts.istart + 1
assert (counts == bursts.counts).all()
def test_burst_recompute_times(data):
"""Test Bursts.recompute_times method."""
d = data
for times, bursts in zip(d.ph_times_m, d.mburst):
newbursts = bursts.recompute_times(times)
assert newbursts == bursts
def test_burst_recompute_index(data):
"""Test Bursts.recompute_index_* methods."""
d = data
ph_sel = Ph_sel(Dex='Dem')
d.burst_search(ph_sel=ph_sel, index_allph=True)
d_sel = d.copy()
d_sel.burst_search(ph_sel=ph_sel, index_allph=False)
for times_sel, mask_sel, bursts_sel, times_allph, bursts_allph in zip(
d.iter_ph_times(ph_sel=ph_sel),
d.iter_ph_masks(ph_sel=ph_sel),
d_sel.mburst,
d.iter_ph_times(),
d.mburst):
assert (times_sel[bursts_sel.istart] == bursts_sel.start).all()
assert (times_sel[bursts_sel.istop] == bursts_sel.stop).all()
assert (times_allph[bursts_allph.istart] == bursts_allph.start).all()
assert (times_allph[bursts_allph.istop] == bursts_allph.stop).all()
# Test individual methods
bursts_allph2 = bursts_sel.recompute_index_expand(mask_sel)
assert bursts_allph2 == bursts_allph
assert (times_allph[bursts_allph2.istart] == bursts_allph2.start).all()
assert (times_allph[bursts_allph2.istop] == bursts_allph2.stop).all()
bursts_sel2 = bursts_allph.recompute_index_reduce(times_sel)
assert (times_sel[bursts_sel2.istart] == bursts_sel2.start).all()
assert (times_sel[bursts_sel2.istop] == bursts_sel2.stop).all()
assert bursts_sel2 == bursts_sel
# Test round-trip
bursts_allph3 = bursts_sel2.recompute_index_expand(mask_sel)
assert bursts_allph3 == bursts_allph2
assert (times_allph[bursts_allph3.istart] == bursts_allph3.start).all()
assert (times_allph[bursts_allph3.istop] == bursts_allph3.stop).all()
## This test is only used to develop alternative implementations of
## Bursts.recompute_index_reduce() and is normally disabled as it is very slow.
#def test_burst_recompute_index_reduce(data):
# """Test different versions of Bursts.recompute_index_reduce methods.
#
# This test is very slow so it's normally disabled.
# """
# d = data
# ph_sel = Ph_sel(Dex='Aem')
# d.burst_search(ph_sel=ph_sel)
# d_sel = d.copy()
# d_sel.burst_search(ph_sel=ph_sel, index_allph=False)
# for times_sel, bursts_sel, times_allph, bursts_allph in zip(
# d.iter_ph_times(ph_sel=ph_sel),
# d_sel.mburst,
# d.iter_ph_times(),
# d.mburst):
# assert (times_allph[bursts_allph.istart] == bursts_allph.start).all()
# assert (times_allph[bursts_allph.istop] == bursts_allph.stop).all()
#
# bursts_sel1 = bursts_allph.recompute_index_reduce(times_sel)
# bursts_sel2 = bursts_allph.recompute_index_reduce2(times_sel)
# assert bursts_sel1 == bursts_sel2
# assert bursts_sel == bursts_sel1
def test_phrates_mtuple(data):
d = data
m = 10
max_num_ph = 20001
for ph in d.iter_ph_times():
phc = ph[:max_num_ph]
rates = phrates.mtuple_rates(phc, m)
delays = phrates.mtuple_delays(phc, m)
t_rates = 0.5 * (phc[m-1:] + phc[:-m+1])
assert phrates.mtuple_rates_max(phc, m) == rates.max()
assert phrates.mtuple_delays_min(phc, m) == delays.min()
assert phrates.default_c == 1
assert (rates == (m - 1 - phrates.default_c) / delays).all()
assert (phrates.mtuple_rates_t(phc, m) == t_rates).all()
if has_numba:
def test_phrates_kde(data):
d = data
tau = 5000 # 5000 * 12.5ns = 6.25 us
for ph in d.iter_ph_times():
# Test consistency of kde_laplace_nph and (kde_laplace, kde_rect)
rates = phrates.kde_laplace(ph, tau)
nrect = phrates.kde_rect(ph, tau*10)
ratesl, nph = phrates.nb.kde_laplace_nph(ph, tau)
assert (rates == ratesl).all()
assert (nph == nrect).all()
# Test consistency of kde_laplace and _kde_laplace_self_numba
ratesl2, nph2 = phrates.nb.kde_laplace_self_numba(ph, tau)
assert (nph2 == nrect).all()
assert (ratesl2 == rates).all()
# Smoke test laplace, gaussian, rect with time_axis
ratesl = phrates.kde_laplace(ph, tau, time_axis=ph+1)
assert ((ratesl >= 0) * (ratesl < 5e6)).all()
ratesg = phrates.kde_gaussian(ph, tau, time_axis=ph+1)
assert ((ratesg >= 0) * (ratesg < 5e6)).all()
ratesr = phrates.kde_rect(ph, tau, time_axis=ph+1)
assert ((ratesr >= 0) * (ratesr < 5e6)).all()
def test_phrates_kde_cy(data):
d = data
tau = 5000 # 5000 * 12.5ns = 6.25 us
for ph in d.iter_ph_times():
# Test consistency of kde_laplace_nph and (kde_laplace, kde_rect)
ratesg = phrates.nb.kde_gaussian_numba(ph, tau)
ratesl = phrates.nb.kde_laplace_numba(ph, tau)
ratesr = phrates.nb.kde_rect_numba(ph, tau)
ratesgc = phrates.cy.kde_gaussian_cy(ph, tau)
rateslc = phrates.cy.kde_laplace_cy(ph, tau)
ratesrc = phrates.cy.kde_rect_cy(ph, tau)
assert (ratesg == ratesgc).all()
assert (ratesl == rateslc).all()
assert (ratesr == ratesrc).all()
def test_burst_ph_data_functions(data):
"""Tests the functions that iterate or operate on per-burst "ph-data".
"""
d = data
for bursts, ph, mask in zip(d.mburst, d.iter_ph_times(),
d.iter_ph_masks(Ph_sel(Dex='Dem'))):
bstart = bursts.start
bend = bursts.stop
for i, (start, stop) in enumerate(bl.iter_bursts_start_stop(bursts)):
assert ph[start] == bstart[i]
assert ph[stop-1] == bend[i]
for i, burst_ph in enumerate(bl.iter_bursts_ph(ph, bursts)):
assert burst_ph[0] == bstart[i]
assert burst_ph[-1] == bend[i]
for i, burst_ph in enumerate(bl.iter_bursts_ph(ph, bursts, mask=mask)):
if burst_ph.size > 0:
assert burst_ph[0] >= bstart[i]
assert burst_ph[-1] <= bend[i]
stats = bl.burst_ph_stats(ph, bursts, mask=mask)
assert (stats[~np.isnan(stats)] >= bstart[~np.isnan(stats)]).all()
assert (stats[~np.isnan(stats)] <= bend[~np.isnan(stats)]).all()
bistart = bursts.istart
biend = bursts.istop
bursts_mask = bl.ph_in_bursts_mask(ph.size, bursts)
for i, (start, stop) in enumerate(bl.iter_bursts_start_stop(bursts)):
assert bursts_mask[start:stop].all()
if start > 0:
if i > 0 and biend[i-1] < bistart[i] - 1:
assert not bursts_mask[start - 1]
if stop < ph.size:
if i < bistart.size-1 and bistart[i+1] > biend[i] + 1:
assert not bursts_mask[stop]
def test_ph_in_bursts_ich(data):
"""Tests the ph_in_bursts_ich method.
"""
d = data
for ich in range(d.nch):
ph_in_bursts = d.ph_in_bursts_ich(ich)
ph_in_bursts_dd = d.ph_in_bursts_ich(ich, ph_sel=Ph_sel(Dex='Dem'))
assert ph_in_bursts_dd.size < ph_in_bursts.size
def test_burst_fuse(data):
"""Test 2 independent implementations of fuse_bursts for consistency.
"""
d = data
for bursts in d.mburst:
new_mbursti = bl.fuse_bursts_iter(bursts, ms=1)
new_mburstd = bl.fuse_bursts_direct(bursts, ms=1)
assert new_mbursti == new_mburstd
def test_burst_fuse_0ms(data):
"""Test that after fusing with ms=0 the sum of bursts sizes is that same
as the number of ph in bursts (via burst selection).
"""
d = data
if d.nch == 8:
d.burst_search(L=10, m=10, F=7, computefret=False)
d.mburst[1] = bl.bslib.Bursts.empty() # Make one channel with no bursts
d._calc_burst_period()
d.calc_fret(count_ph=True)
df = d.fuse_bursts(ms=0)
for ich, bursts in enumerate(df.mburst):
mask = bl.ph_in_bursts_mask(df.ph_data_sizes[ich], bursts)
assert mask.sum() == bursts.counts.sum()
df.calc_fret(count_ph=True)
assert len(df.mburst) == len(d.mburst)
assert len(df.mburst) == d.nch
def test_burst_fuse_separation(data):
"""Test that after fusing bursts the minimum separation is equal
to the threshold used during fusing.
"""
d = data
fuse_ms = 2
df = d.fuse_bursts(ms=fuse_ms)
for bursts in df.mburst:
separation = bursts.separation * df.clk_p
if bursts.num_bursts > 0:
assert separation.min() >= fuse_ms * 1e-3
def test_calc_sbr(data):
"""Smoke test Data.calc_sbr()"""
data.calc_sbr()
def test_calc_max_rate(data):
"""Smoke test for Data.calc_max_rate()"""
data.calc_max_rate(m=10)
if data.alternated:
data.calc_max_rate(m=10, ph_sel=Ph_sel(Dex='DAem'), compact=True)
def test_burst_data(data):
"""Test for bext.burst_data()"""
bext.burst_data(data, include_bg=True, include_ph_index=True)
bext.burst_data(data, include_bg=False, include_ph_index=True)
bext.burst_data(data, include_bg=True, include_ph_index=False)
bext.burst_data(data, include_bg=False, include_ph_index=False)
def test_print_burst_stats(data):
"""Smoke test for burstlib.print_burst_stats()"""
bl.print_burst_stats(data)
def test_expand(data):
"""Test method `expand()` for `Data()`."""
d = data
for ich, bursts in enumerate(d.mburst):
if bursts.num_bursts == 0:
continue # if no bursts skip this ch
nd, na, bg_d, bg_a, width = d.expand(ich, width=True)
width2 = bursts.width * d.clk_p
period = d.bp[ich]
bg_d2 = d.bg_from(Ph_sel(Dex='Dem'))[ich][period] * width2
bg_a2 = d.bg_from(Ph_sel(Dex='Aem'))[ich][period] * width2
assert (width == width2).all()
assert (nd == d.nd[ich]).all() and (na == d.na[ich]).all()
assert (bg_d == bg_d2).all() and (bg_a == bg_a2).all()
def test_burst_data_ich(data):
"""Test method `Data.burst_data_ich()`."""
d = data
for ich, bursts in enumerate(d.mburst):
if bursts.num_bursts == 0:
continue # if no bursts skip this ch
burst_dict = d.burst_data_ich(ich=ich)
assert (burst_dict['size_raw'] == bursts.counts).all()
assert (burst_dict['t_start'] == bursts.start * d.clk_p).all()
assert (burst_dict['t_stop'] == bursts.stop * d.clk_p).all()
assert (burst_dict['i_start'] == bursts.istart).all()
assert (burst_dict['i_stop'] == bursts.istop).all()
assert (burst_dict['bg_period'] == d.bp[ich]).all()
nd, na, bg_d, bg_a, width = d.expand(ich, width=True)
width_ms = width * 1e3
assert (width_ms == burst_dict['width_ms']).all()
assert (nd == burst_dict['nd']).all()
assert (na == burst_dict['na']).all()
assert (bg_d == burst_dict['bg_dd']).all()
assert (bg_a == burst_dict['bg_ad']).all()
if d.alternated:
period = d.bp[ich]
bg_da = d.bg_from(Ph_sel(Aex='Dem'))[ich][period] * width
bg_aa = d.bg_from(Ph_sel(Aex='Aem'))[ich][period] * width
assert (bg_da == burst_dict['bg_da']).all()
assert (bg_aa == burst_dict['bg_aa']).all()
def test_burst_corrections(data):
"""Test background and bleed-through corrections."""
d = data
d.calc_ph_num(alex_all=True)
d.corrections()
leakage = d.get_leakage_array()
for ich, bursts in enumerate(d.mburst):
if bursts.num_bursts == 0: continue # if no bursts skip this ch
nd, na, bg_d, bg_a, width = d.expand(ich, width=True)
burst_size_raw = bursts.counts
lk = leakage[ich]
if d.alternated:
nda, naa = d.nda[ich], d.naa[ich]
period = d.bp[ich]
bg_da = d.bg_from(Ph_sel(Aex='Dem'))[ich][period]*width
bg_aa = d.bg_from(Ph_sel(Aex='Aem'))[ich][period]*width
burst_size_raw2 = (nd + na + bg_d + bg_a + lk*nd + nda + naa +
bg_da + bg_aa)
assert np.allclose(burst_size_raw, burst_size_raw2)
else:
burst_size_raw2 = nd + na + bg_d + bg_a + lk*nd
assert np.allclose(burst_size_raw, burst_size_raw2)
def test_burst_search_consistency(data):
"""Test consistency of burst data array
"""
d = data
for mb, ph in zip(d.mburst, d.iter_ph_times()):
tot_size = mb.counts
istart, istop = mb.istart, mb.istop
assert np.all(tot_size == istop - istart + 1)
start, stop, width = mb.start, mb.stop, mb.width
assert np.all(width == stop - start)
df = d.fuse_bursts(ms=0)
for mb, ph in zip(df.mburst, df.iter_ph_times()):
tot_size = mb.counts
istart, istop = mb.istart, mb.istop
assert np.all(tot_size == istop - istart + 1)
start, stop, width = mb.start, mb.stop, mb.width
assert np.all(width == stop - start)
df = d.fuse_bursts(ms=1)
for mb, ph in zip(df.mburst, df.iter_ph_times()):
tot_size = mb.counts
istart, istop = mb.istart, mb.istop
assert np.all(tot_size <= istop - istart + 1)
start, stop, width = mb.start, mb.stop, mb.width
assert np.all(width <= stop - start)
def test_E_and_S_with_corrections(data):
d = data
gamma = 0.5
beta = 0.7
d.gamma = gamma
d.beta = beta
for i, (E, nd, na) in enumerate(zip(d.E, d.nd, d.na)):
assert (E == na / (nd * gamma + na)).all()
if d.alternated:
naa = d.naa[i]
if 'PAX' in data.meas_type:
naa = d.naa[i] - d.nar[i]
assert (d.S[i] == (gamma * nd + na) /
(gamma * nd + na + naa / beta)).all()
def test_burst_size_da(data):
"""Test that nd + na with no corrections is equal to b_size(mburst).
"""
d = data
d.calc_ph_num(alex_all=True)
if d.alternated:
for mb, nd, na, naa, nda in zip(d.mburst, d.nd, d.na, d.naa, d.nda):
tot_size = mb.counts
tot_size2 = nd + na + naa + nda
assert np.allclose(tot_size, tot_size2)
else:
for mb, nd, na in zip(d.mburst, d.nd, d.na):
tot_size = mb.counts
assert (tot_size == nd + na).all()
def test_burst_selection(data):
"""Smoke test for burst selection methods.
"""
d = data
d.select_bursts(select_bursts.size, th1=20, th2=100, add_naa=True)
d.select_bursts(select_bursts.size, th1=20, th2=100, gamma=0.5)
M1 = d.select_bursts_mask(select_bursts.consecutive, th1=1e-3, th2=1e4,
kind='first')
M2 = d.select_bursts_mask(select_bursts.consecutive, th1=1e-3, th2=1e4,
kind='second')
Mb = d.select_bursts_mask(select_bursts.consecutive, th1=1e-3, th2=1e4,
kind='both')
Mb2 = [m1 + m2 for m1, m2 in zip(M1, M2)]
assert list_array_equal(Mb, Mb2)
def test_burst_selection_nocorrections(data):
"""Test burst selection with uncorrected bursts.
"""
d = data
d.burst_search(computefret=False)
d.calc_fret(count_ph=True, corrections=False)
ds1 = d.select_bursts(select_bursts.size, th1=20, th2=100,
computefret=False)
ds2 = d.select_bursts(select_bursts.size, th1=20, th2=100)
ds2.calc_ph_num()
ds2.calc_fret(corrections=False)
assert list_array_equal(ds1.nd, ds2.nd)
assert list_array_equal(ds1.na, ds2.na)
assert list_array_equal(ds1.E, ds2.E)
if d.alternated:
assert list_array_equal(ds1.naa, ds2.naa)
assert list_array_equal(ds1.E, ds2.E)
def test_burst_selection_ranges(data):
"""Test selection functions having a min-max range.
"""
d = data
d.burst_search()
d.calc_max_rate(m=10, ph_sel=Ph_sel(Dex='DAem'))
Range = namedtuple('Range', ['min', 'max', 'getter'])
sel_functions = dict(
E=Range(0.5, 1, None), nd=Range(30, 40, None), na=Range(30, 40, None),
time=Range(1, 61, lambda d, ich: d.mburst[ich].start * d.clk_p),
width=Range(0.5, 1.5, lambda d, ich: d.mburst[ich].width * d.clk_p*1e3),
peak_phrate=Range(50e3, 150e3, lambda d, ich: d.max_rate[ich]))
if d.alternated:
sel_functions.update(naa=Range(30, 40, None), S=Range(0.3, 0.7, None))
for func_name, range_ in sel_functions.items():
func = getattr(select_bursts, func_name)
getter = range_.getter
if getter is None:
getter = lambda d, ich: d[func_name][ich]
ds = d.select_bursts(func, args=(range_.min, range_.max))
for ich in range(d.nch):
selected = getter(ds, ich)
assert ((selected >= range_.min) * (selected <= range_.max)).all()
def test_join_data(data):
"""Smoke test for bext.join_data() function.
"""
d = data
dj = bext.join_data([d, d.copy()])
assert (dj.num_bursts == 2 * d.num_bursts).all()
for bursts in dj.mburst:
assert (np.diff(bursts.start) > 0).all()
def test_collapse(data_8ch):
"""Test the .collapse() method that joins the ch.
"""
d = data_8ch
dc1 = d.collapse()
bursts1 = dc1.mburst[0]
bursts2 = bl.bslib.Bursts.merge(d.mburst, sort=True)
assert bursts1 == bursts2
bursts2 = bl.bslib.Bursts.merge(d.mburst, sort=False)
indexsort_stop = bursts2.stop.argsort()
bursts3 = bursts2[indexsort_stop]
indexsort_start = bursts3.start.argsort()
bursts4 = bursts3[indexsort_start]
assert bursts1 == bursts4
indexsort = np.lexsort((bursts2.stop, bursts2.start))
for name in d.burst_fields:
if name not in d or name == 'mburst':
continue
newfield = np.hstack(d[name])[indexsort]
assert np.allclose(dc1[name][0], newfield)
dc2 = d.collapse(update_gamma=False)
for name in d.burst_fields:
if name not in d: continue
if name == 'mburst':
assert dc1.mburst[0] == dc2.mburst[0]
else:
assert np.allclose(dc1[name][0], dc2[name][0])
if __name__ == '__main__':
pytest.main("-x -v fretbursts/tests/test_burstlib.py")
|
gpl-2.0
| -812,374,004,313,463,000 | 34.349608 | 80 | 0.592931 | false |
keishi/chromium
|
tools/isolate/isolate_common_test.py
|
1
|
2483
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import logging
import unittest
import sys
import isolate_common
class TraceInputs(unittest.TestCase):
def _test(self, value, expected):
actual = cStringIO.StringIO()
isolate_common.pretty_print(value, actual)
self.assertEquals(expected, actual.getvalue())
def test_pretty_print_empty(self):
self._test({}, '{\n}\n')
def test_pretty_print_mid_size(self):
value = {
'variables': {
'bar': [
'file1',
'file2',
],
},
'conditions': [
['OS=\"foo\"', {
'variables': {
isolate_common.KEY_UNTRACKED: [
'dir1',
'dir2',
],
isolate_common.KEY_TRACKED: [
'file4',
'file3',
],
'command': ['python', '-c', 'print "H\\i\'"'],
'read_only': True,
'relative_cwd': 'isol\'at\\e',
},
}],
['OS=\"bar\"', {
'variables': {},
}, {
'variables': {},
}],
],
}
expected = (
"{\n"
" 'variables': {\n"
" 'bar': [\n"
" 'file1',\n"
" 'file2',\n"
" ],\n"
" },\n"
" 'conditions': [\n"
" ['OS=\"foo\"', {\n"
" 'variables': {\n"
" 'command': [\n"
" 'python',\n"
" '-c',\n"
" 'print \"H\\i\'\"',\n"
" ],\n"
" 'relative_cwd': 'isol\\'at\\\\e',\n"
" 'read_only': True\n"
" 'isolate_dependency_tracked': [\n"
" 'file4',\n"
" 'file3',\n"
" ],\n"
" 'isolate_dependency_untracked': [\n"
" 'dir1',\n"
" 'dir2',\n"
" ],\n"
" },\n"
" }],\n"
" ['OS=\"bar\"', {\n"
" 'variables': {\n"
" },\n"
" }, {\n"
" 'variables': {\n"
" },\n"
" }],\n"
" ],\n"
"}\n")
self._test(value, expected)
if __name__ == '__main__':
VERBOSE = '-v' in sys.argv
logging.basicConfig(level=logging.DEBUG if VERBOSE else logging.ERROR)
unittest.main()
|
bsd-3-clause
| 6,734,886,638,849,571,000 | 24.597938 | 72 | 0.391462 | false |
MaxTyutyunnikov/lino
|
obsolete/html2odt/__init__.py
|
1
|
2203
|
# -*- coding: UTF-8 -*-
## Copyright 2012 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
r"""
Experimental. Not maintained and not used within Lino.
This package contains mainly a copy of `odfpy.contrib.html2odt`
(https://joinup.ec.europa.eu/software/odfpy).
One modification by LS in the original files:
- :file:`html2odt.py` : changed import statement for `emptycontent`
The content of this file (:file:`__init__.py`) is my own derived work.
I wanted to use the HTML2ODTParser not for grabbing
a complete HTML page and creating a full .odt file,
but for converting a chunk of HTML into a chunk of ODF XML.
Example:
>>> html = '''This is<br>a <em>simple</em> <b>test</b>.'''
>>> print html2odt(html)
This is<text:line-break/>a <em>simple</em> test.
Note that the Parser ignores the ``<b>...</b>`` tag.
Seems that this simply isn't yet implemented.
"""
from lino.utils.html2odt.html2odt import HTML2ODTParser
from odf import element
class RawXML(element.Childless, element.Node):
#~ nodeType = element.Node.ELEMENT_NODE
nodeType = element.Node.TEXT_NODE
def __init__(self, raw_xml):
self.raw_xml = raw_xml
#~ super(RawXML,self).__init__()
def toXml(self,level,f):
f.write(self.raw_xml)
def html2odt(data,encoding='iso8859-1',baseurl=''):
p = HTML2ODTParser(encoding, baseurl)
#~ failure = ""
p.feed(data)
text = p.result() # Flush the buffer
#~ return RawXML(text)
return text
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
gpl-3.0
| -918,762,337,038,523,100 | 29.597222 | 71 | 0.686791 | false |
indro/t2c
|
apps/external_apps/swaps/views.py
|
2
|
9884
|
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect, Http404
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.views.generic import date_based
from django.conf import settings
from django.db.models import Q
from swaps.models import Offer, Swap
from swaps.forms import OfferForm, ProposeSwapForm, ProposingOfferForm
try:
from notification import models as notification
except ImportError:
notification = None
try:
from threadedcomments.models import ThreadedComment
forums = True
except ImportError:
forums = False
@login_required
def offers(request, username=None):
offers = Offer.objects.filter(state=1).order_by("-offered_time")
return render_to_response("swaps/offers.html", {"offers": offers}, context_instance=RequestContext(request))
@login_required
def offer(request, offer_id):
offer = get_object_or_404(Offer, id=offer_id)
#deletable = offer.is_deletable()
return render_to_response("swaps/offer.html", {
"offer": offer,
#"deletable": deletable,
}, context_instance=RequestContext(request))
@login_required
def your_offers(request):
user = request.user
offers = Offer.objects.filter(offerer=user).order_by("-offered_time")
return render_to_response("swaps/your_offers.html", {"offers": offers}, context_instance=RequestContext(request))
@login_required
def swap(request, swap_id):
swap = get_object_or_404(Swap, id=swap_id)
return render_to_response("swaps/swap.html", {
"swap": swap,
}, context_instance=RequestContext(request))
@login_required
def proposed_by_you(request):
swaps = Swap.objects.filter(proposing_offer__offerer=request.user, state=1).order_by("-proposed_time")
return render_to_response("swaps/proposed_by_you.html", {"swaps": swaps}, context_instance=RequestContext(request))
@login_required
def proposed_to_you(request):
swaps = Swap.objects.filter(responding_offer__offerer=request.user, state=1).order_by("-proposed_time")
return render_to_response("swaps/proposed_to_you.html", {"swaps": swaps}, context_instance=RequestContext(request))
@login_required
def accepted_swaps(request):
swaps = Swap.objects.filter(
Q(state=2, proposing_offer__offerer=request.user) |
Q(state=2, responding_offer__offerer=request.user)).order_by("-accepted_time")
return render_to_response("swaps/accepted.html", {"swaps": swaps}, context_instance=RequestContext(request))
@login_required
def dead_swaps(request):
swaps = Swap.objects.filter(
Q(state__gt=3, proposing_offer__offerer=request.user) |
Q(state__gt=3, responding_offer__offerer=request.user)).order_by("-killed_time")
return render_to_response("swaps/dead.html", {"swaps": swaps}, context_instance=RequestContext(request))
@login_required
def new(request):
if request.method == "POST":
if request.POST["action"] == "create":
offer_form = OfferForm(request.POST)
if offer_form.is_valid():
offer = offer_form.save(commit=False)
offer.offerer = request.user
offer.save()
request.user.message_set.create(message=_("Successfully saved offer '%s'") % offer.short_description)
#if notification:
# if friends: # @@@ might be worth having a shortcut for sending to all friends
# notification.send((x['friend'] for x in Friendship.objects.friends_for_user(offer.offerer)), "offer_friend_post", {"post": blog})
return HttpResponseRedirect(reverse("offer_list_yours"))
else:
offer_form = OfferForm()
else:
offer_form = OfferForm()
return render_to_response("swaps/new_offer.html", {
"offer_form": offer_form
}, context_instance=RequestContext(request))
@login_required
def edit_offer(request, offer_id):
offer = get_object_or_404(Offer, id=offer_id)
if offer.offerer != request.user:
request.user.message_set.create(message="You cannot edit offers that are not yours")
return HttpResponseRedirect(reverse("offer_list_yours"))
return_to = request.GET['returnto']
if request.method == "POST":
if request.POST["action"] == "update":
offer_form = OfferForm(request.POST, instance=offer)
if offer_form.is_valid():
offer = offer_form.save(commit=False)
offer.save()
if notification:
for swap in offer.proposed_swaps.filter(state=1):
notification.send([swap.responding_offer.offerer,], "swaps_proposing_offer_changed",
{"creator": request.user,
"swap": swap,
"proposing_offer": swap.proposing_offer,
"responding_offer": swap.responding_offer})
for swap in offer.responding_swaps.filter(state=1):
notification.send([swap.proposing_offer.offerer,], "swaps_responding_offer_changed",
{"creator": request.user,
"swap": swap,
"proposing_offer": swap.proposing_offer,
"responding_offer": swap.responding_offer})
request.user.message_set.create(message=_("Successfully updated offer '%s'") % offer.short_description)
return HttpResponseRedirect(reverse(return_to))
else:
offer_form = OfferForm(instance=offer)
else:
offer_form = OfferForm(instance=offer)
return render_to_response("swaps/edit_offer.html", {
"offer_form": offer_form,
"offer": offer,
}, context_instance=RequestContext(request))
@login_required
def delete_offer(request, offer_id):
offer = get_object_or_404(Offer, id=offer_id)
if offer.offerer != request.user:
request.user.message_set.create(message="You cannot delete offers that are not yours")
return HttpResponseRedirect(reverse("offer_list_yours"))
if request.method == "POST":
offer.delete()
return HttpResponseRedirect(reverse("offer_list_yours"))
@login_required
def cancel_offer(request, offer_id):
offer = get_object_or_404(Offer, id=offer_id)
if offer.offerer != request.user:
request.user.message_set.create(message="You cannot cancel offers that are not yours")
return HttpResponseRedirect(reverse("offer_list_yours"))
if request.method == "POST":
offer.cancel()
return HttpResponseRedirect(reverse("offer_list_yours"))
@login_required
def propose_swap(request, offer_id):
offer = get_object_or_404(Offer, id=offer_id)
if request.method == "POST":
swap_form = ProposeSwapForm(request.POST)
offer_form = ProposingOfferForm(request.POST)
swap = None
if swap_form.is_valid():
swap = swap_form.save(commit=False)
swap.responding_offer = offer
swap.save()
if offer_form.is_valid():
proposing_offer = offer_form.save(commit=False)
proposing_offer.offerer = request.user
proposing_offer.save()
swap = Swap(
proposing_offer=proposing_offer,
responding_offer=offer)
swap.save()
if swap:
if notification:
notification.send([offer.offerer,], "swaps_proposal",
{"creator": request.user,
"swap": swap,
"proposing_offer": swap.proposing_offer,
"responding_offer": swap.responding_offer})
return HttpResponseRedirect(reverse("proposed_by_you"))
else:
swap_form = ProposeSwapForm()
swap_form.fields['proposing_offer'].queryset = Offer.objects.filter(offerer=request.user, state=1)
offer_form = ProposingOfferForm()
return render_to_response("swaps/propose_swap.html", {
"offer": offer,
"swap_form": swap_form,
"offer_form": offer_form,
}, context_instance=RequestContext(request))
@login_required
def accept_swap(request, swap_id):
swap = get_object_or_404(Swap, id=swap_id)
swap.accept()
swap.save()
if notification:
notification.send([swap.proposing_offer.offerer,], "swaps_acceptance",
{"creator": request.user,
"swap": swap,
"proposing_offer": swap.proposing_offer,
"responding_offer": swap.responding_offer})
return HttpResponseRedirect(reverse("accepted_swaps"))
@login_required
def reject_swap(request, swap_id):
swap = get_object_or_404(Swap, id=swap_id)
swap.reject()
swap.save()
if notification:
notification.send([swap.proposing_offer.offerer,], "swaps_rejection",
{"creator": request.user,
"swap": swap,
"proposing_offer": swap.proposing_offer,
"responding_offer": swap.responding_offer})
return HttpResponseRedirect(reverse("dead_swaps"))
@login_required
def cancel_swap(request, swap_id):
swap = get_object_or_404(Swap, id=swap_id)
swap.cancel()
swap.save()
if notification:
notification.send([swap.responding_offer.offerer,], "swaps_cancellation",
{"creator": request.user,
"swap": swap,
"proposing_offer": swap.proposing_offer,
"responding_offer": swap.responding_offer})
return HttpResponseRedirect(reverse("dead_swaps"))
|
mit
| -4,431,117,786,967,901,700 | 40.704641 | 154 | 0.635269 | false |
BiznetGIO/horizon
|
openstack_dashboard/dashboards/admin/instances/views.py
|
1
|
9689
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import futurist
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.instances \
import forms as project_forms
from openstack_dashboard.dashboards.admin.instances \
import tables as project_tables
from openstack_dashboard.dashboards.admin.instances import tabs
from openstack_dashboard.dashboards.project.instances import views
from openstack_dashboard.dashboards.project.instances.workflows \
import update_instance
# re-use console from project.instances.views to make reflection work
def console(args, **kvargs):
return views.console(args, **kvargs)
# re-use vnc from project.instances.views to make reflection work
def vnc(args, **kvargs):
return views.vnc(args, **kvargs)
# re-use spice from project.instances.views to make reflection work
def spice(args, **kvargs):
return views.spice(args, **kvargs)
# re-use rdp from project.instances.views to make reflection work
def rdp(args, **kvargs):
return views.rdp(args, **kvargs)
# re-use get_resource_id_by_name from project.instances.views
def swap_filter(resources, filters, fake_field, real_field):
return views.swap_filter(resources, filters, fake_field, real_field)
class AdminUpdateView(views.UpdateView):
workflow_class = update_instance.AdminUpdateInstance
success_url = reverse_lazy("horizon:admin:instances:index")
class AdminIndexView(tables.DataTableView):
table_class = project_tables.AdminInstancesTable
page_title = _("Instances")
def has_more_data(self, table):
return self._more
def needs_filter_first(self, table):
return self._needs_filter_first
def get_data(self):
instances = []
tenants = []
tenant_dict = {}
images = []
flavors = []
full_flavors = {}
marker = self.request.GET.get(
project_tables.AdminInstancesTable._meta.pagination_param, None)
default_search_opts = {'marker': marker,
'paginate': True,
'all_tenants': True}
search_opts = self.get_filters(default_search_opts.copy())
# If filter_first is set and if there are not other filters
# selected, then search criteria must be provided and return an empty
# list
filter_first = getattr(settings, 'FILTER_DATA_FIRST', {})
if filter_first.get('admin.instances', False) and \
len(search_opts) == len(default_search_opts):
self._needs_filter_first = True
self._more = False
return instances
self._needs_filter_first = False
def _task_get_tenants():
# Gather our tenants to correlate against IDs
try:
tmp_tenants, __ = api.keystone.tenant_list(self.request)
tenants.extend(tmp_tenants)
tenant_dict.update([(t.id, t) for t in tenants])
except Exception:
msg = _('Unable to retrieve instance project information.')
exceptions.handle(self.request, msg)
def _task_get_images():
# Gather our images to correlate againts IDs
try:
tmp_images = api.glance.image_list_detailed(self.request)[0]
images.extend(tmp_images)
except Exception:
msg = _("Unable to retrieve image list.")
exceptions.handle(self.request, msg)
def _task_get_flavors():
# Gather our flavors to correlate against IDs
try:
tmp_flavors = api.nova.flavor_list(self.request)
flavors.extend(tmp_flavors)
full_flavors.update([(str(flavor.id), flavor)
for flavor in flavors])
except Exception:
msg = _("Unable to retrieve flavor list.")
exceptions.handle(self.request, msg)
def _task_get_instances():
try:
tmp_instances, self._more = api.nova.server_list(
self.request,
search_opts=search_opts)
instances.extend(tmp_instances)
except Exception:
self._more = False
exceptions.handle(self.request,
_('Unable to retrieve instance list.'))
# In case of exception when calling nova.server_list
# don't call api.network
return
try:
api.network.servers_update_addresses(self.request, instances,
all_tenants=True)
except Exception:
exceptions.handle(
self.request,
message=_('Unable to retrieve IP addresses from Neutron.'),
ignore=True)
with futurist.ThreadPoolExecutor(max_workers=3) as e:
e.submit(fn=_task_get_tenants)
e.submit(fn=_task_get_images)
e.submit(fn=_task_get_flavors)
if 'project' in search_opts and \
not swap_filter(tenants, search_opts, 'project', 'tenant_id'):
self._more = False
return instances
elif 'image_name' in search_opts and \
not swap_filter(images, search_opts, 'image_name', 'image'):
self._more = False
return instances
elif "flavor_name" in search_opts and \
not swap_filter(flavors, search_opts, 'flavor_name', 'flavor'):
self._more = False
return instances
_task_get_instances()
# Loop through instances to get flavor and tenant info.
for inst in instances:
flavor_id = inst.flavor["id"]
try:
if flavor_id in full_flavors:
inst.full_flavor = full_flavors[flavor_id]
else:
# If the flavor_id is not in full_flavors list,
# gets it via nova api.
inst.full_flavor = api.nova.flavor_get(
self.request, flavor_id)
except Exception:
msg = _('Unable to retrieve instance size information.')
exceptions.handle(self.request, msg)
tenant = tenant_dict.get(inst.tenant_id, None)
inst.tenant_name = getattr(tenant, "name", None)
return instances
class LiveMigrateView(forms.ModalFormView):
form_class = project_forms.LiveMigrateForm
template_name = 'admin/instances/live_migrate.html'
context_object_name = 'instance'
success_url = reverse_lazy("horizon:admin:instances:index")
page_title = _("Live Migrate")
success_label = page_title
def get_context_data(self, **kwargs):
context = super(LiveMigrateView, self).get_context_data(**kwargs)
context["instance_id"] = self.kwargs['instance_id']
return context
@memoized.memoized_method
def get_hosts(self, *args, **kwargs):
try:
return api.nova.host_list(self.request)
except Exception:
redirect = reverse("horizon:admin:instances:index")
msg = _('Unable to retrieve host information.')
exceptions.handle(self.request, msg, redirect=redirect)
@memoized.memoized_method
def get_object(self, *args, **kwargs):
instance_id = self.kwargs['instance_id']
try:
return api.nova.server_get(self.request, instance_id)
except Exception:
redirect = reverse("horizon:admin:instances:index")
msg = _('Unable to retrieve instance details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
initial = super(LiveMigrateView, self).get_initial()
_object = self.get_object()
if _object:
current_host = getattr(_object, 'OS-EXT-SRV-ATTR:host', '')
initial.update({'instance_id': self.kwargs['instance_id'],
'current_host': current_host,
'hosts': self.get_hosts()})
return initial
class DetailView(views.DetailView):
tab_group_class = tabs.AdminInstanceDetailTabs
redirect_url = 'horizon:admin:instances:index'
image_url = 'horizon:admin:images:detail'
volume_url = 'horizon:admin:volumes:detail'
def _get_actions(self, instance):
table = project_tables.AdminInstancesTable(self.request)
return table.render_row_actions(instance)
|
apache-2.0
| -8,192,718,020,581,848,000 | 36.996078 | 79 | 0.611931 | false |
citrix-openstack-build/heat
|
heat/engine/resources/instance.py
|
1
|
22792
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.engine import signal_responder
from heat.engine import clients
from heat.engine import resource
from heat.engine import scheduler
from heat.engine.resources import nova_utils
from heat.engine.resources import volume
from heat.common import exception
from heat.engine.resources.network_interface import NetworkInterface
from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging
logger = logging.getLogger(__name__)
class Restarter(signal_responder.SignalResponder):
properties_schema = {
'InstanceId': {
'Type': 'String',
'Required': True,
'Description': _('Instance ID to be restarted.')}}
attributes_schema = {
"AlarmUrl": _("A signed url to handle the alarm "
"(Heat extension).")
}
def _find_resource(self, resource_id):
'''
Return the resource with the specified instance ID, or None if it
cannot be found.
'''
for resource in self.stack:
if resource.resource_id == resource_id:
return resource
return None
def handle_signal(self, details=None):
if details is None:
alarm_state = 'alarm'
else:
alarm_state = details.get('state', 'alarm').lower()
logger.info('%s Alarm, new state %s' % (self.name, alarm_state))
if alarm_state != 'alarm':
return
victim = self._find_resource(self.properties['InstanceId'])
if victim is None:
logger.info('%s Alarm, can not find instance %s' %
(self.name, self.properties['InstanceId']))
return
logger.info('%s Alarm, restarting resource: %s' %
(self.name, victim.name))
self.stack.restart_resource(victim.name)
def _resolve_attribute(self, name):
'''
heat extension: "AlarmUrl" returns the url to post to the policy
when there is an alarm.
'''
if name == 'AlarmUrl' and self.resource_id is not None:
return unicode(self._get_signed_url())
class Instance(resource.Resource):
# AWS does not require InstanceType but Heat does because the nova
# create api call requires a flavor
tags_schema = {'Key': {'Type': 'String',
'Required': True},
'Value': {'Type': 'String',
'Required': True}}
properties_schema = {
'ImageId': {
'Type': 'String',
'Required': True,
'Description': _('Glance image ID or name.')},
'InstanceType': {
'Type': 'String',
'Required': True,
'Description': _('Nova instance type (flavor).')},
'KeyName': {
'Type': 'String',
'Description': _('Optional Nova keypair name.')},
'AvailabilityZone': {
'Type': 'String',
'Description': _('Availability zone to launch the instance in.')},
'DisableApiTermination': {
'Type': 'String',
'Implemented': False,
'Description': _('Not Implemented.')},
'KernelId': {
'Type': 'String',
'Implemented': False,
'Description': _('Not Implemented.')},
'Monitoring': {
'Type': 'Boolean',
'Implemented': False,
'Description': _('Not Implemented.')},
'PlacementGroupName': {
'Type': 'String',
'Implemented': False,
'Description': _('Not Implemented.')},
'PrivateIpAddress': {
'Type': 'String',
'Implemented': False,
'Description': _('Not Implemented.')},
'RamDiskId': {
'Type': 'String',
'Implemented': False,
'Description': _('Not Implemented.')},
'SecurityGroups': {
'Type': 'List',
'Description': _('Security group names to assign.')},
'SecurityGroupIds': {
'Type': 'List',
'Description': _('Security group IDs to assign.')},
'NetworkInterfaces': {
'Type': 'List',
'Description': _('Network interfaces to associate with '
'instance.')},
'SourceDestCheck': {
'Type': 'Boolean',
'Implemented': False,
'Description': _('Not Implemented.')},
'SubnetId': {
'Type': 'String',
'Description': _('Subnet ID to launch instance in.')},
'Tags': {
'Type': 'List',
'Schema': {'Type': 'Map', 'Schema': tags_schema},
'Description': _('Tags to attach to instance.')},
'NovaSchedulerHints': {
'Type': 'List',
'Schema': {'Type': 'Map', 'Schema': tags_schema},
'Description': _('Scheduler hints to pass '
'to Nova (Heat extension).')},
'Tenancy': {
'Type': 'String',
'AllowedValues': ['dedicated', 'default'],
'Implemented': False,
'Description': _('Not Implemented.')},
'UserData': {
'Type': 'String',
'Description': _('User data to pass to instance.')},
'Volumes': {
'Type': 'List',
'Description': _('Volumes to attach to instance.')}}
attributes_schema = {'AvailabilityZone': _('The Availability Zone where '
'the specified instance is '
'launched.'),
'PrivateDnsName': _('Private DNS name of the'
' specified instance.'),
'PublicDnsName': _('Public DNS name of the specified '
'instance.'),
'PrivateIp': _('Private IP address of the specified '
'instance.'),
'PublicIp': _('Public IP address of the specified '
'instance.')}
update_allowed_keys = ('Metadata', 'Properties')
update_allowed_properties = ('InstanceType',)
def __init__(self, name, json_snippet, stack):
super(Instance, self).__init__(name, json_snippet, stack)
self.ipaddress = None
self.mime_string = None
def _set_ipaddress(self, networks):
'''
Read the server's IP address from a list of networks provided by Nova
'''
# Just record the first ipaddress
for n in networks:
if len(networks[n]) > 0:
self.ipaddress = networks[n][0]
break
def _ipaddress(self):
'''
Return the server's IP address, fetching it from Nova if necessary
'''
if self.ipaddress is None:
self.ipaddress = nova_utils.server_to_ipaddress(
self.nova(), self.resource_id)
return self.ipaddress or '0.0.0.0'
def _resolve_attribute(self, name):
res = None
if name == 'AvailabilityZone':
res = self.properties['AvailabilityZone']
elif name in ['PublicIp', 'PrivateIp', 'PublicDnsName',
'PrivateDnsName']:
res = self._ipaddress()
logger.info('%s._resolve_attribute(%s) == %s' % (self.name, name, res))
return unicode(res) if res else None
def _build_nics(self, network_interfaces,
security_groups=None, subnet_id=None):
nics = None
if network_interfaces:
unsorted_nics = []
for entry in network_interfaces:
nic = (entry
if not isinstance(entry, basestring)
else {'NetworkInterfaceId': entry,
'DeviceIndex': len(unsorted_nics)})
unsorted_nics.append(nic)
sorted_nics = sorted(unsorted_nics,
key=lambda nic: int(nic['DeviceIndex']))
nics = [{'port-id': nic['NetworkInterfaceId']}
for nic in sorted_nics]
else:
# if SubnetId property in Instance, ensure subnet exists
if subnet_id:
neutronclient = self.neutron()
network_id = NetworkInterface.network_id_from_subnet_id(
neutronclient, subnet_id)
# if subnet verified, create a port to use this subnet
# if port is not created explicitly, nova will choose
# the first subnet in the given network.
if network_id:
fixed_ip = {'subnet_id': subnet_id}
props = {
'admin_state_up': True,
'network_id': network_id,
'fixed_ips': [fixed_ip]
}
if security_groups:
props['security_groups'] = \
self._get_security_groups_id(security_groups)
port = neutronclient.create_port({'port': props})['port']
nics = [{'port-id': port['id']}]
return nics
def _get_security_groups_id(self, security_groups):
"""Extract security_groups ids from security group list
This function will be deprecated if Neutron client resolves security
group name to id internally.
Args:
security_groups : A list contains security_groups ids or names
Returns:
A list of security_groups ids.
"""
ids = []
response = self.neutron().list_security_groups(self.resource_id)
for item in response:
if item['security_groups'] is not None:
for security_group in security_groups:
for groups in item['security_groups']:
if groups['name'] == security_group \
and groups['id'] not in ids:
ids.append(groups['id'])
elif groups['id'] == security_group \
and groups['id'] not in ids:
ids.append(groups['id'])
return ids
def _get_security_groups(self):
security_groups = []
for property in ('SecurityGroups', 'SecurityGroupIds'):
if self.properties.get(property) is not None:
for sg in self.properties.get(property):
security_groups.append(sg)
if not security_groups:
security_groups = None
return security_groups
def get_mime_string(self, userdata):
if not self.mime_string:
self.mime_string = nova_utils.build_userdata(self, userdata)
return self.mime_string
def handle_create(self):
security_groups = self._get_security_groups()
userdata = self.properties['UserData'] or ''
flavor = self.properties['InstanceType']
availability_zone = self.properties['AvailabilityZone']
key_name = self.properties['KeyName']
if key_name:
# confirm keypair exists
nova_utils.get_keypair(self.nova(), key_name)
image_name = self.properties['ImageId']
image_id = nova_utils.get_image_id(self.nova(), image_name)
flavor_id = nova_utils.get_flavor_id(self.nova(), flavor)
tags = {}
if self.properties['Tags']:
for tm in self.properties['Tags']:
tags[tm['Key']] = tm['Value']
else:
tags = None
scheduler_hints = {}
if self.properties['NovaSchedulerHints']:
for tm in self.properties['NovaSchedulerHints']:
scheduler_hints[tm['Key']] = tm['Value']
else:
scheduler_hints = None
nics = self._build_nics(self.properties['NetworkInterfaces'],
security_groups=security_groups,
subnet_id=self.properties['SubnetId'])
server = None
try:
server = self.nova().servers.create(
name=self.physical_resource_name(),
image=image_id,
flavor=flavor_id,
key_name=key_name,
security_groups=security_groups,
userdata=self.get_mime_string(userdata),
meta=tags,
scheduler_hints=scheduler_hints,
nics=nics,
availability_zone=availability_zone)
finally:
# Avoid a race condition where the thread could be cancelled
# before the ID is stored
if server is not None:
self.resource_id_set(server.id)
return server, scheduler.TaskRunner(self._attach_volumes_task())
def _attach_volumes_task(self):
attach_tasks = (volume.VolumeAttachTask(self.stack,
self.resource_id,
volume_id,
device)
for volume_id, device in self.volumes())
return scheduler.PollingTaskGroup(attach_tasks)
def check_create_complete(self, cookie):
return self._check_active(cookie)
def _check_active(self, cookie):
server, volume_attach = cookie
if not volume_attach.started():
if server.status != 'ACTIVE':
server.get()
# Some clouds append extra (STATUS) strings to the status
short_server_status = server.status.split('(')[0]
if short_server_status in nova_utils.deferred_server_statuses:
return False
elif server.status == 'ACTIVE':
self._set_ipaddress(server.networks)
volume_attach.start()
return volume_attach.done()
elif server.status == 'ERROR':
fault = getattr(server, 'fault', {})
message = fault.get('message', 'Unknown')
code = fault.get('code', 500)
exc = exception.Error(_("Creation of server %(server)s "
"failed: %(message)s (%(code)s)") %
dict(server=server.name,
message=message,
code=code))
raise exc
else:
exc = exception.Error(_("Creation of server %(server)s failed "
"with unknown status: %(status)s") %
dict(server=server.name,
status=server.status))
raise exc
else:
return volume_attach.step()
def volumes(self):
"""
Return an iterator over (volume_id, device) tuples for all volumes
that should be attached to this instance.
"""
volumes = self.properties['Volumes']
if volumes is None:
return []
return ((vol['VolumeId'], vol['Device']) for vol in volumes)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if 'Metadata' in tmpl_diff:
self.metadata = tmpl_diff['Metadata']
if 'InstanceType' in prop_diff:
flavor = prop_diff['InstanceType']
flavor_id = nova_utils.get_flavor_id(self.nova(), flavor)
server = self.nova().servers.get(self.resource_id)
server.resize(flavor_id)
checker = scheduler.TaskRunner(nova_utils.check_resize,
server, flavor)
checker.start()
return checker
def check_update_complete(self, checker):
return checker.step() if checker is not None else True
def metadata_update(self, new_metadata=None):
'''
Refresh the metadata if new_metadata is None
'''
if new_metadata is None:
self.metadata = self.parsed_template('Metadata')
def validate(self):
'''
Validate any of the provided params
'''
res = super(Instance, self).validate()
if res:
return res
# check validity of key
key_name = self.properties.get('KeyName', None)
if key_name:
nova_utils.get_keypair(self.nova(), key_name)
# check validity of security groups vs. network interfaces
security_groups = self._get_security_groups()
if security_groups and self.properties.get('NetworkInterfaces'):
raise exception.ResourcePropertyConflict(
'SecurityGroups/SecurityGroupIds',
'NetworkInterfaces')
# make sure the image exists.
nova_utils.get_image_id(self.nova(), self.properties['ImageId'])
@scheduler.wrappertask
def _delete_server(self, server):
'''
Return a co-routine that deletes the server and waits for it to
disappear from Nova.
'''
yield self._detach_volumes_task()()
server.delete()
while True:
yield
try:
server.get()
except clients.novaclient.exceptions.NotFound:
self.resource_id = None
break
def _detach_volumes_task(self):
'''
Detach volumes from the instance
'''
detach_tasks = (volume.VolumeDetachTask(self.stack,
self.resource_id,
volume_id)
for volume_id, device in self.volumes())
return scheduler.PollingTaskGroup(detach_tasks)
def handle_delete(self):
'''
Delete an instance, blocking until it is disposed by OpenStack
'''
if self.resource_id is None:
return
try:
server = self.nova().servers.get(self.resource_id)
except clients.novaclient.exceptions.NotFound:
self.resource_id = None
return
server_delete_task = scheduler.TaskRunner(self._delete_server,
server=server)
server_delete_task.start()
return server_delete_task
def check_delete_complete(self, server_delete_task):
# if the resource was already deleted, server_delete_task will be None
if server_delete_task is None:
return True
else:
return server_delete_task.step()
def handle_suspend(self):
'''
Suspend an instance - note we do not wait for the SUSPENDED state,
this is polled for by check_suspend_complete in a similar way to the
create logic so we can take advantage of coroutines
'''
if self.resource_id is None:
raise exception.Error(_('Cannot suspend %s, resource_id not set') %
self.name)
try:
server = self.nova().servers.get(self.resource_id)
except clients.novaclient.exceptions.NotFound:
raise exception.NotFound(_('Failed to find instance %s') %
self.resource_id)
else:
logger.debug("suspending instance %s" % self.resource_id)
# We want the server.suspend to happen after the volume
# detachement has finished, so pass both tasks and the server
suspend_runner = scheduler.TaskRunner(server.suspend)
volumes_runner = scheduler.TaskRunner(self._detach_volumes_task())
return server, suspend_runner, volumes_runner
def check_suspend_complete(self, cookie):
server, suspend_runner, volumes_runner = cookie
if not volumes_runner.started():
volumes_runner.start()
if volumes_runner.done():
if not suspend_runner.started():
suspend_runner.start()
if suspend_runner.done():
if server.status == 'SUSPENDED':
return True
server.get()
logger.debug("%s check_suspend_complete status = %s" %
(self.name, server.status))
if server.status in list(nova_utils.deferred_server_statuses +
['ACTIVE']):
return server.status == 'SUSPENDED'
else:
raise exception.Error(_(' nova reported unexpected '
'instance[%(instance)s] '
'status[%(status)s]') %
{'instance': self.name,
'status': server.status})
else:
suspend_runner.step()
else:
return volumes_runner.step()
def handle_resume(self):
'''
Resume an instance - note we do not wait for the ACTIVE state,
this is polled for by check_resume_complete in a similar way to the
create logic so we can take advantage of coroutines
'''
if self.resource_id is None:
raise exception.Error(_('Cannot resume %s, resource_id not set') %
self.name)
try:
server = self.nova().servers.get(self.resource_id)
except clients.novaclient.exceptions.NotFound:
raise exception.NotFound(_('Failed to find instance %s') %
self.resource_id)
else:
logger.debug("resuming instance %s" % self.resource_id)
server.resume()
return server, scheduler.TaskRunner(self._attach_volumes_task())
def check_resume_complete(self, cookie):
return self._check_active(cookie)
def resource_mapping():
return {
'AWS::EC2::Instance': Instance,
'OS::Heat::HARestarter': Restarter,
}
|
apache-2.0
| 2,669,817,716,314,624,000 | 37.305882 | 79 | 0.524482 | false |
COSMOGRAIL/COSMOULINE
|
pipe/4_norm_scripts/4c_fac_makecombipngmap_NU.py
|
1
|
3080
|
#
# Similar to the 1a_checkalistars, we draw the alignment stars, this time on the combi image (to make a "nice" map)
#
execfile("../config.py")
from kirbybase import KirbyBase, KBError
from variousfct import *
import star
#import shutil
import f2n
#from datetime import datetime, timedelta
# Read reference image info from database
db = KirbyBase()
refimage = db.select(imgdb, ['imgname'], [refimgname], returnType='dict')
refimage = refimage[0]
refsexcat = os.path.join(alidir, refimage['imgname'] + ".cat")
refautostars = star.readsexcat(refsexcat)
refautostars = star.sortstarlistbyflux(refautostars)
refscalingfactor = refimage['scalingfactor']
# read and identify the manual reference catalog
refmanstars = star.readmancat(alistarscat) # So these are the "manual" star coordinates
id = star.listidentify(refmanstars, refautostars, tolerance = identtolerance) # We find the corresponding precise sextractor coordinates
preciserefmanstars = star.sortstarlistbyflux(id["match"])
maxalistars = len(refmanstars)
print "%i stars in your manual star catalog." % (len(refmanstars))
print "%i stars among them could be found in the sextractor catalog." % (len(preciserefmanstars))
# We convert the star objects into dictionnaries, to plot them using f2n.py
# (f2n.py does not use these "star" objects...)
refmanstarsasdicts = [{"name":s.name, "x":s.x, "y":s.y} for s in refmanstars]
preciserefmanstarsasdicts = [{"name":s.name, "x":s.x, "y":s.y} for s in preciserefmanstars]
refautostarsasdicts = [{"name":s.name, "x":s.x, "y":s.y} for s in refautostars]
#print refmanstarsasdicts
combifitsfile = os.path.join(workdir, "%s.fits" % combibestkey)
#combifitsfile = os.path.join(workdir, "ali", "%s_ali.fits" % refimgname)
f2nimg = f2n.fromfits(combifitsfile)
f2nimg.setzscale(z1=-5, z2=1000)
#f2nimg.rebin(2)
f2nimg.makepilimage(scale = "log", negative = False)
#f2nimg.drawstarlist(refautostarsasdicts, r = 30, colour = (150, 150, 150))
#f2nimg.drawstarlist(preciserefmanstarsasdicts, r = 7, colour = (255, 0, 0))
#f2nimg.writeinfo(["Sextractor stars (flag-filtered) : %i" % len(refautostarsasdicts)], colour = (150, 150, 150))
#f2nimg.writeinfo(["","Identified alignment stars with corrected sextractor coordinates : %i" % len(preciserefmanstarsasdicts)], colour = (255, 0, 0))
# We draw the rectangles around qso and empty region :
lims = [map(int,x.split(':')) for x in lensregion[1:-1].split(',')]
#f2nimg.drawrectangle(lims[0][0], lims[0][1], lims[1][0], lims[1][1], colour=(0,255,0), label = "Lens")
lims = [map(int,x.split(':')) for x in emptyregion[1:-1].split(',')]
#f2nimg.drawrectangle(lims[0][0], lims[0][1], lims[1][0], lims[1][1], colour=(0,255,0), label = "Empty")
f2nimg.writetitle("%s / %s" % (xephemlens.split(",")[0], combibestkey))
pngpath = os.path.join(workdir, "%s.png" % combibestkey)
f2nimg.tonet(pngpath)
print "I have written the map into :"
print pngpath
# print "Do you want to clean the selected image to save some space on the disk ? "
# proquest(True)
#
# combidir = os.path.join(workdir, combibestkey)
# os.remove(combidir)
|
gpl-3.0
| -5,670,558,067,892,289,000 | 36.560976 | 150 | 0.721429 | false |
MediaKraken/MediaKraken_Deployment
|
source/tools/rename_media_files.py
|
1
|
2715
|
"""
Copyright (C) 2019 Quinn D Granfor <spootdev@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import os
from os import walk # pylint: disable=C0412
from guessit import guessit
media_files = 0
for root, dirs, files in walk('X:\\zz_movie'):
for file_name_loop in files:
filename, file_extension = os.path.splitext(file_name_loop)
if file_extension in ('.mkv', '.mp4', '.iso'):
guessit_name = guessit(file_name_loop)
if 'title' in guessit_name:
if 'year' in guessit_name:
media_files += 1
print(filename, ':',
guessit_name['title'] + ' (' + str(guessit_name['year']) + ')', flush=True)
user_answer = input('Should I rename/move it?')
if user_answer == 'y':
os.rename(os.path.join(root, file_name_loop), os.path.join(
'X:\\zz_movie', guessit_name['title']
+ ' (' + str(
guessit_name['year']) + ')' + file_extension))
# print(os.path.join(root, file_name_loop), flush=True)
else:
print(filename, flush=True)
print(root, flush=True)
guessit_name = guessit(root)
if 'title' in guessit_name:
if 'year' in guessit_name:
media_files += 1
print(root, ':',
guessit_name['title'] + ' (' + str(guessit_name['year']) + ')', flush=True)
user_answer = input('Should I rename/move it?')
if user_answer == 'y':
os.rename(os.path.join(root, filename + file_extension), os.path.join(
'X:\\zz_movie', guessit_name['title']
+ ' (' + str(
guessit_name['year']) + ')' + file_extension))
print(media_files, flush=True)
|
gpl-3.0
| -2,735,211,053,386,424,300 | 46.631579 | 105 | 0.52523 | false |
leeping/mdtraj
|
tests/test_pdb.py
|
1
|
10857
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2017 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors: Jason Swails, Matthew Harrigan
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from __future__ import print_function, division
import numpy as np
import re, os, tempfile
from mdtraj.formats.pdb import pdbstructure
from mdtraj.formats.pdb.pdbstructure import PdbStructure
from mdtraj.testing import eq
from mdtraj import load, load_pdb
from mdtraj import Topology
import pytest
import warnings
fd, temp = tempfile.mkstemp(suffix='.pdb')
os.close(fd)
def teardown_module(module):
"""remove the temporary file created by tests in this file
this gets automatically called by nose"""
os.unlink(temp)
def test_pdbread(get_fn):
pdb = get_fn('native.pdb')
p = load(pdb)
def test_pdbwrite(get_fn):
pdb = get_fn('native.pdb')
p = load(pdb)
p.save(temp)
r = load(temp)
eq(p.xyz, r.xyz)
def test_load_multiframe(get_fn):
with open(get_fn('multiframe.pdb')) as f:
pdb = PdbStructure(f)
assert eq(len(pdb.models), 2)
assert eq(len(pdb.models[0].chains), 1)
assert eq(len(pdb.models[0].chains[0].residues), 3)
assert eq(len(list(pdb.models[0].iter_atoms())), 22)
assert eq(len(pdb.models[1].chains), 1)
assert eq(len(pdb.models[1].chains[0].residues), 3)
assert eq(len(list(pdb.models[1].iter_atoms())), 22)
t = load(get_fn('multiframe.pdb'))
assert eq(t.n_frames, 2)
assert eq(t.n_atoms, 22)
assert eq(t.xyz[0], t.xyz[1])
def test_4ZUO(get_fn):
t = load(get_fn('4ZUO.pdb'))
eq(t.n_frames, 1)
eq(t.n_atoms, 6200)
# this is a random line from the file
#ATOM 1609 O GLY A 201 -25.423 13.774 -25.234 1.00 8.92 O
atom = list(t.top.atoms)[1525]
eq(atom.element.symbol, 'O')
eq(atom.residue.name, 'GLY')
eq(atom.index, 1525)
eq(t.xyz[0, 1525], np.array([-25.423, 13.774, -25.234]) / 10) # converting to NM
# this is atom 1577 in the PDB
#CONECT 1577 5518
#ATOM 1577 O HIS A 197 -18.521 9.724 -32.805 1.00 8.81 O
#HETATM 5518 K K A 402 -19.609 10.871 -35.067 1.00 9.11 K
atom = list(t.top.atoms)[1493]
eq(atom.name, 'O')
eq(atom.residue.name, 'HIS')
eq([(a1.index, a2.index) for a1, a2 in t.top.bonds if a1.index == 1493 or a2.index == 1493],
[(1492, 1493), (1493, 5129)])
# that first bond is from a conect record
def test_2EQQ_0(get_fn):
# this is an nmr structure with 20 models
t = load(get_fn('2EQQ.pdb'))
assert eq(t.n_frames, 20)
assert eq(t.n_atoms, 423)
assert eq(len(list(t.top.residues)), 28)
def test_1vii_solvated_with_ligand(get_fn):
traj = load(get_fn("1vii_sustiva_water.pdb"))
eq(len(list(traj.top.bonds)), 5156)
eq(len([bond for bond in traj.top.bonds if bond[0].residue.name == 'LIG']), 32)
traj.save(temp)
traj = load(temp)
eq(len(list(traj.top.bonds)), 5156)
eq(len([bond for bond in traj.top.bonds if bond[0].residue.name == 'LIG']), 32)
def test_write_large(get_fn):
traj = load(get_fn('native.pdb'))
traj.xyz.fill(123456789)
with pytest.raises(ValueError):
traj.save(temp)
def test_write_large_2(get_fn):
traj = load(get_fn('native.pdb'))
traj.xyz.fill(-123456789)
with pytest.raises(ValueError):
traj.save(temp)
def test_pdbstructure_0():
pdb_lines = [
"ATOM 188 N CYS A 42 40.714 -5.292 12.123 1.00 11.29 N ",
"ATOM 189 CA CYS A 42 39.736 -5.883 12.911 1.00 10.01 C ",
"ATOM 190 C CYS A 42 40.339 -6.654 14.087 1.00 22.28 C ",
"ATOM 191 O CYS A 42 41.181 -7.530 13.859 1.00 13.70 O ",
"ATOM 192 CB CYS A 42 38.949 -6.825 12.002 1.00 9.67 C ",
"ATOM 193 SG CYS A 42 37.557 -7.514 12.922 1.00 20.12 S "
]
res = pdbstructure.Residue("CYS", 42)
for l in pdb_lines:
res._add_atom(pdbstructure.Atom(l))
for i, atom in enumerate(res):
eq(pdb_lines[i], str(atom))
def test_pdbstructure_1():
pdb_lines = [
"ATOM 188 N CYS A 42 40.714 -5.292 12.123 1.00 11.29 N",
"ATOM 189 CA CYS A 42 39.736 -5.883 12.911 1.00 10.01 C",
"ATOM 190 C CYS A 42 40.339 -6.654 14.087 1.00 22.28 C",
"ATOM 191 O CYS A 42 41.181 -7.530 13.859 1.00 13.70 O",
"ATOM 192 CB CYS A 42 38.949 -6.825 12.002 1.00 9.67 C",
"ATOM 193 SG CYS A 42 37.557 -7.514 12.922 1.00 20.12 S"
]
positions = np.array([
[ 40.714, -5.292, 12.123],
[ 39.736, -5.883, 12.911],
[ 40.339, -6.654, 14.087],
[ 41.181, -7.53, 13.859],
[ 38.949, -6.825, 12.002],
[ 37.557, -7.514, 12.922]
])
res = pdbstructure.Residue("CYS", 42)
for l in pdb_lines:
res._add_atom(pdbstructure.Atom(l))
for i, c in enumerate(res.iter_positions()):
eq(c, positions[i])
def test_pdbstructure_2():
atom = pdbstructure.Atom("ATOM 2209 CB TYR A 299 6.167 22.607 20.046 1.00 8.12 C")
expected = np.array([6.167, 22.607, 20.046])
for i, c in enumerate(atom.iter_coordinates()):
eq(expected[i], c)
def test_pdbstructure_3():
loc = pdbstructure.Atom.Location(' ', [1,2,3], 1.0, 20.0, "XXX")
expected = [1, 2, 3]
for i, c in enumerate(loc):
eq(expected[i], c)
def test_pdb_from_url():
# load pdb from URL
t1 = load_pdb('http://www.rcsb.org/pdb/files/4ZUO.pdb.gz')
t2 = load_pdb('http://www.rcsb.org/pdb/files/4ZUO.pdb')
eq(t1.n_frames, 1)
eq(t2.n_frames, 1)
eq(t1.n_atoms, 6200)
eq(t2.n_atoms, 6200)
def test_3nch_conect(get_fn):
# This has conect entries that use all available digits, good failure case.
t1 = load_pdb(get_fn('3nch.pdb.gz'))
top, bonds = t1.top.to_dataframe()
bonds = dict(((a, b), 1) for (a, b, _, _) in bonds)
eq(bonds[19782, 19783], 1) # Check that last SO4 molecule has right bonds
eq(bonds[19782, 19784], 1) # Check that last SO4 molecule has right bonds
eq(bonds[19782, 19785], 1) # Check that last SO4 molecule has right bonds
eq(bonds[19782, 19786], 1) # Check that last SO4 molecule has right bonds
def test_3nch_serial_resSeq(get_fn):
# If you use zero-based indexing, this PDB has quite large gaps in residue and atom numbering, so it's a good test case. See #528
# Gold standard values obtained via
# cat 3nch.pdb |grep ATM|tail -n 5
# HETATM19787 S SO4 D 804 -4.788 -9.395 22.515 1.00121.87 S
# HETATM19788 O1 SO4 D 804 -3.815 -9.511 21.425 1.00105.97 O
# HETATM19789 O2 SO4 D 804 -5.989 -8.733 21.999 1.00116.13 O
# HETATM19790 O3 SO4 D 804 -5.130 -10.726 23.043 1.00108.74 O
# HETATM19791 O4 SO4 D 804 -4.210 -8.560 23.575 1.00112.54 O
t1 = load_pdb(get_fn('3nch.pdb.gz'))
top, bonds = t1.top.to_dataframe()
top2 = Topology.from_dataframe(top, bonds)
eq(t1.top, top2)
top = top.set_index('serial') # Index by the actual data in the PDB
eq(str(top.ix[19791]["name"]), "O4")
eq(str(top.ix[19787]["name"]), "S")
eq(str(top.ix[19787]["resName"]), "SO4")
eq(int(top.ix[19787]["resSeq"]), 804)
def test_1ncw(get_fn):
t1 = load_pdb(get_fn('1ncw.pdb.gz'))
def test_1vii_url_and_gz(get_fn):
t1 = load_pdb('http://www.rcsb.org/pdb/files/1vii.pdb.gz')
t2 = load_pdb('http://www.rcsb.org/pdb/files/1vii.pdb')
t3 = load_pdb(get_fn('1vii.pdb.gz'))
t4 = load_pdb(get_fn('1vii.pdb'))
eq(t1.n_frames, 1)
eq(t1.n_frames, t2.n_frames)
eq(t1.n_frames, t3.n_frames)
eq(t1.n_frames, t4.n_frames)
eq(t1.n_atoms, t2.n_atoms)
eq(t1.n_atoms, t3.n_atoms)
eq(t1.n_atoms, t4.n_atoms)
def test_segment_id(get_fn):
pdb = load_pdb(get_fn('ala_ala_ala.pdb'))
pdb.save_pdb(temp)
pdb2 = load_pdb(temp)
correct_segment_id = 'AAL'
# check that all segment ids are set correctly
for ridx,r in enumerate(pdb.top.residues):
assert r.segment_id == correct_segment_id, "residue %i (0-indexed) does not have segment_id set correctly from ala_ala_ala.pdb"%(ridx)
# check that all segment ids are set correctly after a new pdb file is written
for ridx,(r1,r2) in enumerate(zip(pdb.top.residues,pdb2.top.residues)):
assert r1.segment_id == r2.segment_id, "segment_id of residue %i (0-indexed) in ala_ala_ala.pdb does not agree with value in after being written out to a new pdb file"%(ridx)
def test_bfactors(get_fn):
pdb = load_pdb(get_fn('native.pdb'))
bfactors0 = np.arange(pdb.n_atoms) / 2.0 - 4.0 # (Get some decimals..)
pdb.save_pdb(temp, bfactors=bfactors0)
with open(temp, 'r') as fh:
atom_lines = [line for line in fh.readlines() if re.search(r'^ATOM', line)]
str_bfactors1 = [l[60:66] for l in atom_lines]
flt_bfactors1 = np.array([float(i) for i in str_bfactors1])
# check formatting has a space at the beginning and not at the end
frmt = np.array([(s[0] == ' ') and (s[-1] != ' ') for s in str_bfactors1])
assert np.all(frmt)
# make sure the numbers are actually the same
eq(bfactors0, flt_bfactors1)
def test_hex(get_fn):
pdb = load_pdb(get_fn('water_hex.pdb.gz'))
assert pdb.n_atoms == 100569
assert pdb.n_residues == 33523
pdb.save(temp)
def test_dummy_pdb_box_detection(get_fn):
with warnings.catch_warnings(record=True) as war:
traj = load(get_fn('2koc.pdb'))
assert 'Unlikely unit cell' in str(war[0].message)
assert traj.unitcell_lengths is None, 'Expected dummy box to be deleted'
|
lgpl-2.1
| 1,569,474,710,171,645,400 | 36.567474 | 182 | 0.592521 | false |
pauloschilling/sentry
|
src/sentry/models/user.py
|
1
|
4560
|
"""
sentry.models.user
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import warnings
from django.contrib.auth.models import AbstractBaseUser, UserManager
from django.db import IntegrityError, models, transaction
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.db.models import BaseManager, BaseModel, BoundedAutoField
class UserManager(BaseManager, UserManager):
pass
class User(BaseModel, AbstractBaseUser):
id = BoundedAutoField(primary_key=True)
username = models.CharField(_('username'), max_length=128, unique=True)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(
_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(
_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
is_superuser = models.BooleanField(
_('superuser status'), default=False,
help_text=_('Designates that this user has all permissions without '
'explicitly assigning them.'))
is_managed = models.BooleanField(
_('managed'), default=False,
help_text=_('Designates whether this user should be treated as '
'managed. Select this to disallow the user from '
'modifying their account (username, password, etc).'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager(cache_fields=['pk'])
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
app_label = 'sentry'
db_table = 'auth_user'
verbose_name = _('user')
verbose_name_plural = _('users')
def delete(self):
if self.username == 'sentry':
raise Exception('You cannot delete the "sentry" user as it is required by Sentry.')
return super(User, self).delete()
def save(self, *args, **kwargs):
if not self.username:
self.username = self.email
return super(User, self).save(*args, **kwargs)
def has_perm(self, perm_name):
warnings.warn('User.has_perm is deprecated', DeprecationWarning)
return self.is_superuser
def has_module_perms(self, app_label):
# the admin requires this method
return self.is_superuser
def get_full_name(self):
return self.first_name
def get_short_name(self):
return self.username
def merge_to(from_user, to_user):
# TODO: we could discover relations automatically and make this useful
from sentry.models import (
AuditLogEntry, Activity, AuthIdentity, GroupBookmark,
OrganizationMember, UserOption
)
for obj in OrganizationMember.objects.filter(user=from_user):
with transaction.atomic():
try:
obj.update(user=to_user)
except IntegrityError:
pass
for obj in GroupBookmark.objects.filter(user=from_user):
with transaction.atomic():
try:
obj.update(user=to_user)
except IntegrityError:
pass
for obj in UserOption.objects.filter(user=from_user):
with transaction.atomic():
try:
obj.update(user=to_user)
except IntegrityError:
pass
Activity.objects.filter(
user=from_user,
).update(user=to_user)
AuditLogEntry.objects.filter(
actor=from_user,
).update(actor=to_user)
AuditLogEntry.objects.filter(
target_user=from_user,
).update(target_user=to_user)
AuthIdentity.objects.filter(
user=from_user,
).update(user=to_user)
def get_display_name(self):
return self.first_name or self.email or self.username
def is_active_superuser(self):
# TODO(dcramer): add VPN support via INTERNAL_IPS + ipaddr ranges
return self.is_superuser
|
bsd-3-clause
| -6,004,456,506,088,527,000 | 34.348837 | 95 | 0.617105 | false |
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/tools/filters/gtf_to_bedgraph_converter.py
|
1
|
2931
|
#!/usr/bin/env python
import os, sys, tempfile
assert sys.version_info[:2] >= ( 2, 4 )
def __main__():
# Read parms.
input_name = sys.argv[1]
output_name = sys.argv[2]
attribute_name = sys.argv[3]
# Create temp files.
tmp_name1 = tempfile.NamedTemporaryFile().name
tmp_name2 = tempfile.NamedTemporaryFile().name
# Do conversion.
skipped_lines = 0
first_skipped_line = 0
out = open( tmp_name1, 'w' )
# Write track data to temporary file.
i = 0
for i, line in enumerate( file( input_name ) ):
line = line.rstrip( '\r\n' )
if line and not line.startswith( '#' ):
try:
elems = line.split( '\t' )
start = str( int( elems[3] ) - 1 ) # GTF coordinates are 1-based, BedGraph are 0-based.
strand = elems[6]
if strand not in ['+', '-']:
strand = '+'
attributes_list = elems[8].split(";")
attributes = {}
for name_value_pair in attributes_list:
pair = name_value_pair.strip().split(" ")
name = pair[0].strip()
if name == '':
continue
# Need to strip double quote from values
value = pair[1].strip(" \"")
attributes[name] = value
value = attributes[ attribute_name ]
# GTF format: chrom source, name, chromStart, chromEnd, score, strand, frame, attributes.
# BedGraph format: chrom, chromStart, chromEnd, value
out.write( "%s\t%s\t%s\t%s\n" %( elems[0], start, elems[4], value ) )
except:
skipped_lines += 1
if not first_skipped_line:
first_skipped_line = i + 1
else:
skipped_lines += 1
if not first_skipped_line:
first_skipped_line = i + 1
out.close()
# Sort tmp file by chromosome name and chromosome start to create ordered track data.
cmd = "sort -k1,1 -k2,2n < %s > %s" % ( tmp_name1, tmp_name2 )
try:
os.system(cmd)
os.remove(tmp_name1)
except Exception, ex:
sys.stderr.write( "%s\n" % ex )
sys.exit(1)
# Create bedgraph file by combining track definition with ordered track data.
cmd = "echo 'track type=bedGraph' | cat - %s > %s " % ( tmp_name2, output_name )
try:
os.system(cmd)
os.remove(tmp_name2)
except Exception, ex:
sys.stderr.write( "%s\n" % ex )
sys.exit(1)
info_msg = "%i lines converted to BEDGraph. " % ( i + 1 - skipped_lines )
if skipped_lines > 0:
info_msg += "Skipped %d blank/comment/invalid lines starting with line #%d." %( skipped_lines, first_skipped_line )
print info_msg
if __name__ == "__main__": __main__()
|
gpl-3.0
| 92,477,252,772,895,820 | 35.6375 | 123 | 0.513477 | false |
georgemarshall/django
|
tests/auth_tests/urls.py
|
14
|
6515
|
from django.contrib import admin
from django.contrib.auth import views
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.urls import urlpatterns as auth_urlpatterns
from django.contrib.messages.api import info
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from django.template import RequestContext, Template
from django.urls import path, re_path, reverse_lazy
from django.views.decorators.cache import never_cache
from django.views.i18n import set_language
class CustomRequestAuthenticationForm(AuthenticationForm):
def __init__(self, request, *args, **kwargs):
assert isinstance(request, HttpRequest)
super().__init__(request, *args, **kwargs)
@never_cache
def remote_user_auth_view(request):
"Dummy view for remote user tests"
t = Template("Username is {{ user }}.")
c = RequestContext(request, {})
return HttpResponse(t.render(c))
def auth_processor_no_attr_access(request):
render(request, 'context_processors/auth_attrs_no_access.html')
# *After* rendering, we check whether the session was accessed
return render(request,
'context_processors/auth_attrs_test_access.html',
{'session_accessed': request.session.accessed})
def auth_processor_attr_access(request):
render(request, 'context_processors/auth_attrs_access.html')
return render(request,
'context_processors/auth_attrs_test_access.html',
{'session_accessed': request.session.accessed})
def auth_processor_user(request):
return render(request, 'context_processors/auth_attrs_user.html')
def auth_processor_perms(request):
return render(request, 'context_processors/auth_attrs_perms.html')
def auth_processor_perm_in_perms(request):
return render(request, 'context_processors/auth_attrs_perm_in_perms.html')
def auth_processor_messages(request):
info(request, "Message 1")
return render(request, 'context_processors/auth_attrs_messages.html')
def userpage(request):
pass
@permission_required('unknown.permission')
def permission_required_redirect(request):
pass
@permission_required('unknown.permission', raise_exception=True)
def permission_required_exception(request):
pass
@login_required
@permission_required('unknown.permission', raise_exception=True)
def login_and_permission_required_exception(request):
pass
uid_token = r'(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})'
# special urls for auth test cases
urlpatterns = auth_urlpatterns + [
path('logout/custom_query/', views.LogoutView.as_view(redirect_field_name='follow')),
path('logout/next_page/', views.LogoutView.as_view(next_page='/somewhere/')),
path('logout/next_page/named/', views.LogoutView.as_view(next_page='password_reset')),
path('logout/allowed_hosts/', views.LogoutView.as_view(success_url_allowed_hosts={'otherserver'})),
path('remote_user/', remote_user_auth_view),
path('password_reset_from_email/', views.PasswordResetView.as_view(from_email='staffmember@example.com')),
path(
'password_reset_extra_email_context/',
views.PasswordResetView.as_view(
extra_email_context={'greeting': 'Hello!', 'domain': 'custom.example.com'},
),
),
path(
'password_reset/custom_redirect/',
views.PasswordResetView.as_view(success_url='/custom/')),
path(
'password_reset/custom_redirect/named/',
views.PasswordResetView.as_view(success_url=reverse_lazy('password_reset'))),
path(
'password_reset/html_email_template/',
views.PasswordResetView.as_view(
html_email_template_name='registration/html_password_reset_email.html'
)),
re_path(
'^reset/custom/{}/$'.format(uid_token),
views.PasswordResetConfirmView.as_view(success_url='/custom/'),
),
re_path(
'^reset/custom/named/{}/$'.format(uid_token),
views.PasswordResetConfirmView.as_view(success_url=reverse_lazy('password_reset')),
),
re_path(
'^reset/custom/token/{}/$'.format(uid_token),
views.PasswordResetConfirmView.as_view(reset_url_token='set-passwordcustom'),
),
re_path(
'^reset/post_reset_login/{}/$'.format(uid_token),
views.PasswordResetConfirmView.as_view(post_reset_login=True),
),
re_path(
'^reset/post_reset_login_custom_backend/{}/$'.format(uid_token),
views.PasswordResetConfirmView.as_view(
post_reset_login=True,
post_reset_login_backend='django.contrib.auth.backends.AllowAllUsersModelBackend',
),
),
path('password_change/custom/',
views.PasswordChangeView.as_view(success_url='/custom/')),
path('password_change/custom/named/',
views.PasswordChangeView.as_view(success_url=reverse_lazy('password_reset'))),
path('login_required/', login_required(views.PasswordResetView.as_view())),
path('login_required_login_url/', login_required(views.PasswordResetView.as_view(), login_url='/somewhere/')),
path('auth_processor_no_attr_access/', auth_processor_no_attr_access),
path('auth_processor_attr_access/', auth_processor_attr_access),
path('auth_processor_user/', auth_processor_user),
path('auth_processor_perms/', auth_processor_perms),
path('auth_processor_perm_in_perms/', auth_processor_perm_in_perms),
path('auth_processor_messages/', auth_processor_messages),
path(
'custom_request_auth_login/',
views.LoginView.as_view(authentication_form=CustomRequestAuthenticationForm)),
re_path('^userpage/(.+)/$', userpage, name='userpage'),
path('login/redirect_authenticated_user_default/', views.LoginView.as_view()),
path('login/redirect_authenticated_user/',
views.LoginView.as_view(redirect_authenticated_user=True)),
path('login/allowed_hosts/',
views.LoginView.as_view(success_url_allowed_hosts={'otherserver'})),
path('permission_required_redirect/', permission_required_redirect),
path('permission_required_exception/', permission_required_exception),
path('login_and_permission_required_exception/', login_and_permission_required_exception),
path('setlang/', set_language, name='set_language'),
# This line is only required to render the password reset with is_admin=True
path('admin/', admin.site.urls),
]
|
bsd-3-clause
| 1,123,972,528,181,623,400 | 39.216049 | 114 | 0.698695 | false |
Micronaet/micronaet-product
|
show_photo_switch/__openerp__.py
|
1
|
1483
|
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Show photo switch',
'version': '0.1',
'category': 'Product',
'description': '''
Parameter for user to show photo in product form
''',
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'product',
'quotation_photo',
],
'init_xml': [],
'demo': [],
'data': [
'photo_view.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
|
agpl-3.0
| 4,163,398,208,031,642,600 | 33.488372 | 79 | 0.549562 | false |
PaddlePaddle/Paddle
|
python/paddle/fluid/contrib/slim/tests/test_imperative_qat_channelwise.py
|
1
|
1457
|
# copyright (c) 2018 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
from __future__ import print_function
import os
import numpy as np
import random
import unittest
import logging
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.log_helper import get_logger
from test_imperative_qat import TestImperativeQat
paddle.enable_static()
os.environ["CPU_NUM"] = "1"
if core.is_compiled_with_cuda():
fluid.set_flags({"FLAGS_cudnn_deterministic": True})
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
class TestImperativeQatChannelWise(TestImperativeQat):
def set_vars(self):
self.weight_quantize_type = 'channel_wise_abs_max'
self.activation_quantize_type = 'moving_average_abs_max'
print('weight_quantize_type', self.weight_quantize_type)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -7,447,396,531,800,148,000 | 29.354167 | 74 | 0.736445 | false |
CityGenerator/Megacosm-Generator
|
tests/test_motivation.py
|
1
|
1391
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"Fully test this module's functionality through the use of fixtures."
from megacosm.generators import Motivation
from megacosm.generators import NPC
import unittest2 as unittest
import fixtures
import fakeredis
from config import TestConfiguration
class TestMotivation(unittest.TestCase):
def setUp(self):
self.redis = fakeredis.FakeRedis()
fixtures.motivation.import_fixtures(self)
fixtures.phobia.import_fixtures(self)
fixtures.npc.import_fixtures(self)
self.redis.lpush('npc_race','gnome')
def tearDown(self):
self.redis.flushall()
def test_random_motivation(self):
""" """
motivation = Motivation(self.redis)
self.assertNotEqual(motivation.text, '')
def test_motivation_w_npc(self):
""" """
npc = NPC(self.redis)
motivation = Motivation(self.redis, {'npc': npc})
self.assertNotEqual(motivation.text, '')
self.assertEqual(motivation.npc, npc)
self.assertNotEqual('%s' % motivation, '')
def test_motivation_w_fear(self):
""" """
npc = NPC(self.redis)
motivation = Motivation(self.redis, {'npc': npc, 'kind': 'fear'})
self.assertNotEqual(motivation.text, '')
self.assertEqual(motivation.npc, npc)
self.assertNotEqual('%s' % motivation, '')
|
gpl-2.0
| -8,748,794,249,495,837,000 | 27.387755 | 73 | 0.64486 | false |
pawelmhm/splash
|
splash/qwebpage.py
|
1
|
6186
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from collections import namedtuple
import sip
from PyQt5.QtWebKitWidgets import QWebPage, QWebView
from PyQt5.QtCore import QByteArray
from twisted.python import log
import six
from splash.har_builder import HarBuilder
RenderErrorInfo = namedtuple('RenderErrorInfo', 'type code text url')
class SplashQWebView(QWebView):
"""
QWebView subclass that handles 'close' requests.
"""
onBeforeClose = None
def closeEvent(self, event):
dont_close = False
if self.onBeforeClose:
dont_close = self.onBeforeClose()
if dont_close:
event.ignore()
else:
event.accept()
class SplashQWebPage(QWebPage):
"""
QWebPage subclass that:
* changes user agent;
* logs JS console messages;
* handles alert and confirm windows;
* returns additional info about render errors;
* logs HAR events;
* stores options for various Splash components.
"""
error_info = None
custom_user_agent = None
custom_headers = None
skip_custom_headers = False
navigation_locked = False
resource_timeout = 0
response_body_enabled = False
def __init__(self, verbosity=0):
super(QWebPage, self).__init__()
self.verbosity = verbosity
self.callbacks = {
"on_request": [],
"on_response_headers": [],
"on_response": [],
}
self.mainFrame().urlChanged.connect(self.on_url_changed)
self.mainFrame().titleChanged.connect(self.on_title_changed)
self.mainFrame().loadFinished.connect(self.on_load_finished)
self.mainFrame().initialLayoutCompleted.connect(self.on_layout_completed)
self.har = HarBuilder()
def reset_har(self):
self.har.reset()
def clear_callbacks(self, event=None):
"""
Unregister all callbacks for an event. If event is None
then all callbacks are removed.
"""
if event is None:
for ev in self.callbacks:
assert ev is not None
self.clear_callbacks(ev)
return
del self.callbacks[event][:]
def on_title_changed(self, title):
self.har.store_title(title)
def on_url_changed(self, url):
self.har.store_url(url)
def on_load_finished(self, ok):
self.har.store_timing("onLoad")
def on_layout_completed(self):
self.har.store_timing("onContentLoad")
def acceptNavigationRequest(self, webFrame, networkRequest, navigationType):
if self.navigation_locked:
return False
self.error_info = None
return super(SplashQWebPage, self).acceptNavigationRequest(webFrame, networkRequest, navigationType)
def javaScriptAlert(self, frame, msg):
return
def javaScriptConfirm(self, frame, msg):
return False
def javaScriptConsoleMessage(self, msg, line_number, source_id):
if self.verbosity >= 2:
log.msg("JsConsole(%s:%d): %s" % (source_id, line_number, msg), system='render')
def userAgentForUrl(self, url):
if self.custom_user_agent is None:
return super(SplashQWebPage, self).userAgentForUrl(url)
else:
return self.custom_user_agent
# loadFinished signal handler receives ok=False at least these cases:
# 1. when there is an error with the page (e.g. the page is not available);
# 2. when a redirect happened before all related resource are loaded;
# 3. when page sends headers that are not parsed correctly
# (e.g. a bad Content-Type).
# By implementing ErrorPageExtension we can catch (1) and
# distinguish it from (2) and (3).
def extension(self, extension, info=None, errorPage=None):
if extension == QWebPage.ErrorPageExtension:
# catch the error, populate self.errorInfo and return an error page
info = sip.cast(info, QWebPage.ErrorPageExtensionOption)
domain = 'Unknown'
if info.domain == QWebPage.QtNetwork:
domain = 'Network'
elif info.domain == QWebPage.Http:
domain = 'HTTP'
elif info.domain == QWebPage.WebKit:
domain = 'WebKit'
self.error_info = RenderErrorInfo(
domain,
int(info.error),
six.text_type(info.errorString),
six.text_type(info.url.toString())
)
# XXX: this page currently goes nowhere
content = u"""
<html><head><title>Failed loading page</title></head>
<body>
<h1>Failed loading page ({0.text})</h1>
<h2>{0.url}</h2>
<p>{0.type} error #{0.code}</p>
</body></html>""".format(self.error_info)
errorPage = sip.cast(errorPage, QWebPage.ErrorPageExtensionReturn)
errorPage.content = QByteArray(content.encode('utf-8'))
return True
# XXX: this method always returns True, even if we haven't
# handled the extension. Is it correct? When can this method be
# called with extension which is not ErrorPageExtension if we
# are returning False in ``supportsExtension`` for such extensions?
return True
def supportsExtension(self, extension):
if extension == QWebPage.ErrorPageExtension:
return True
return False
def maybe_redirect(self, load_finished_ok):
"""
Return True if the current webpage state looks like a redirect.
Use this function from loadFinished handler to ignore spurious
signals.
FIXME: This can return True if server returned incorrect
Content-Type header, but there is no an additional loadFinished
signal in this case.
"""
return not load_finished_ok and self.error_info is None
def is_ok(self, load_finished_ok):
return load_finished_ok and self.error_info is None
def error_loading(self, load_finished_ok):
return load_finished_ok and self.error_info is not None
|
bsd-3-clause
| -3,589,351,583,061,082,000 | 32.803279 | 108 | 0.619787 | false |
OpenDanbi/Sherlock
|
src/ClusterDialog.py
|
1
|
2053
|
from PySide.QtGui import QDialog, QDialogButtonBox, QVBoxLayout, QLabel, QLineEdit
from PySide import QtGui, QtCore
import Lifeline
class ClusterDialog(QDialog):
editClusterName = None
def __init__(self, lifeline, defaultName, parent = None):
super(ClusterDialog, self).__init__(parent)
self.lifeline = lifeline
layout = QVBoxLayout(self)
message = QLabel('Enter group name')
layout.addWidget(message)
self.editClusterName = QLineEdit(defaultName)
self.editClusterName.setFixedHeight(30)
self.editClusterName.setFixedWidth(400)
self.editClusterName.textChanged.connect(self.validateCluster)
layout.addWidget(self.editClusterName)
self.validation_msg = QLabel(' ')
layout.addWidget(self.validation_msg)
buttons = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel, QtCore.Qt.Horizontal, self)
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
layout.addWidget(buttons)
self.validateCluster()
def validateCluster(self):
cnt = 0
for l in self.lifeline:
if self.editClusterName.text() in l.getClassName() and not l.getFlagCluster() and not l.getClusterLifeline():
cnt += 1
available_flag = True
for l in self.lifeline:
if self.editClusterName.text() in l.getClassName() and l.getFlagCluster():
available_flag = False
break
if available_flag:
self.validation_msg.setText("group name includes %d life-lines" % (cnt))
else:
self.validation_msg.setText("group name is not available")
def getClusterText(self):
return self.editClusterName.text()
@staticmethod
def getClusterName(lifelines, defaultName, parent = None):
dialog = ClusterDialog(lifelines,defaultName,parent)
result = dialog.exec_()
return (result, dialog.getClusterText())
|
lgpl-2.1
| 5,314,989,252,790,974,000 | 33.216667 | 127 | 0.6566 | false |
chatoooo/pbone-cloud
|
server.py
|
1
|
1692
|
from bottle import route, run, template, static_file, post, get, request, response
import urllib
import urllib2
from urllib2 import HTTPError
@route('/js/<filepath:path>')
def js(filepath):
return static_file(filepath, root='./js')
@route('/css/<filepath:path>')
def css(filepath):
return static_file(filepath, root='./css')
@route('/fonts/<filepath:path>')
def fonts(filepath):
return static_file(filepath, root='./fonts')
@route('/')
def index():
return static_file('index.html', root='./views')
@post('/proxy')
def proxy_post():
url = request.params.get('url')
data = request.params.get('data')
headers = request.params.get('headers')
req = urllib2.Request(url,data)
headers = headers.split(",")
for header in headers:
data = request.headers.get(header)
if data is not None:
req.add_header(header, data)
try:
res = urllib2.urlopen(req)
response.status = res.getcode()
return res.read()
except HTTPError, e:
response.status = e.getcode()
return e.read()
@get('/proxy')
def proxy_get():
url = request.params.get('url')
headers = request.params.get('headers')
req = urllib2.Request(url)
headers = headers.split(",")
for header in headers:
data = request.headers.get(header)
if data is not None:
req.add_header(header, data)
try:
res = urllib2.urlopen(req)
response.status = res.getcode()
return res.read()
except HTTPError, e:
response.status = e.getcode()
return e.read()
run(port=8000)
|
mit
| 6,883,486,318,180,225,000 | 25.770492 | 82 | 0.59279 | false |
jalavik/inspire-next
|
inspire/modules/workflows/actions/hep_approval.py
|
1
|
3652
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Approval action for INSPIRE arXiv harvesting."""
from flask import render_template, request, url_for
from flask.ext.login import current_user
from invenio_base.i18n import _
class hep_approval(object):
"""Class representing the approval action."""
name = _("Approve record")
url = url_for("holdingpen.resolve_action")
def render_mini(self, obj):
"""Method to render the minified action."""
return render_template(
'workflows/actions/hep_approval_mini.html',
message=obj.get_action_message(),
object=obj,
resolve_url=self.url,
)
def render(self, obj):
"""Method to render the action."""
return {
"side": render_template('workflows/actions/hep_approval_side.html',
message=obj.get_action_message(),
object=obj,
resolve_url=self.url),
"main": render_template('workflows/actions/hep_approval_main.html',
message=obj.get_action_message(),
object=obj,
resolve_url=self.url)
}
@staticmethod
def resolve(bwo):
"""Resolve the action taken in the approval action."""
from invenio_workflows.models import ObjectVersion
from inspire.modules.audit.api import log_prediction_action
value = request.form.get("value", "")
# Audit logging
results = bwo.get_tasks_results()
prediction_results = results.get("arxiv_guessing", {})
log_prediction_action(
action="resolve",
prediction_results=prediction_results,
object_id=bwo.id,
user_id=current_user.get_id(),
source="holdingpen",
user_action=value,
)
upload_pdf = request.form.get("pdf_submission", False)
bwo.remove_action()
extra_data = bwo.get_extra_data()
extra_data["approved"] = value in ('accept', 'accept_core')
extra_data["core"] = value == "accept_core"
extra_data["reason"] = request.form.get("text", "")
extra_data["pdf_upload"] = True if upload_pdf == "true" else False
bwo.set_extra_data(extra_data)
bwo.save(version=ObjectVersion.WAITING)
bwo.continue_workflow(delayed=True)
if extra_data["approved"]:
return {
"message": "Suggestion has been accepted!",
"category": "success",
}
else:
return {
"message": "Suggestion has been rejected",
"category": "warning",
}
|
gpl-2.0
| 5,389,820,352,519,112,000 | 34.803922 | 79 | 0.597481 | false |
LenzGr/DeepSea
|
srv/salt/_modules/purge.py
|
1
|
2689
|
# -*- coding: utf-8 -*-
"""
Cleanup related operations for resetting the Salt environment and removing
a Ceph cluster
"""
from __future__ import absolute_import
import logging
import os
import shutil
import pwd
import grp
import yaml
log = logging.getLogger(__name__)
def configuration():
"""
Purge all the necessary DeepSea related configurations
Note: leave proposals out for now, some may want to minimally roll back
without rerunning Stage 1
"""
roles()
default()
def roles():
"""
Remove the roles from the cluster/*.sls files
"""
# Keep yaml human readable/editable
friendly_dumper = yaml.SafeDumper
friendly_dumper.ignore_aliases = lambda self, data: True
cluster_dir = '/srv/pillar/ceph/cluster'
for filename in os.listdir(cluster_dir):
pathname = "{}/{}".format(cluster_dir, filename)
content = None
with open(pathname, "r") as sls_file:
content = yaml.safe_load(sls_file)
log.info("content {}".format(content))
if 'roles' in content:
content.pop('roles')
with open(pathname, "w") as sls_file:
sls_file.write(yaml.dump(content, Dumper=friendly_dumper,
default_flow_style=False))
def proposals():
"""
Remove all the generated subdirectories in .../proposals
"""
proposals_dir = '/srv/pillar/ceph/proposals'
for path in os.listdir(proposals_dir):
for partial in ['role-', 'cluster-', 'profile-', 'config']:
if partial in path:
log.info("removing {}/{}".format(proposals_dir, path))
shutil.rmtree("{}/{}".format(proposals_dir, path))
def default():
"""
Remove the .../stack/defaults directory. Preserve available_roles
"""
# Keep yaml human readable/editable
friendly_dumper = yaml.SafeDumper
friendly_dumper.ignore_aliases = lambda self, data: True
preserve = {}
content = None
pathname = "/srv/pillar/ceph/stack/default/{}/cluster.yml".format('ceph')
with open(pathname, "r") as sls_file:
content = yaml.safe_load(sls_file)
preserve['available_roles'] = content['available_roles']
stack_default = "/srv/pillar/ceph/stack/default"
shutil.rmtree(stack_default)
os.makedirs("{}/{}".format(stack_default, 'ceph'))
with open(pathname, "w") as sls_file:
sls_file.write(yaml.dump(preserve, Dumper=friendly_dumper,
default_flow_style=False))
uid = pwd.getpwnam("salt").pw_uid
gid = grp.getgrnam("salt").gr_gid
for path in [stack_default, "{}/{}".format(stack_default, 'ceph'), pathname]:
os.chown(path, uid, gid)
|
gpl-3.0
| -2,006,042,993,395,070,200 | 29.908046 | 81 | 0.628115 | false |
facebook/mcrouter
|
mcrouter/test/test_noreply.py
|
1
|
2998
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestNoReplyBase(McrouterTestCase):
config = './mcrouter/test/test_noreply.json'
def setUp(self):
# The order here must corresponds to the order of hosts in the .json
self.mc = self.add_server(self.make_memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config)
class TestNoReply(TestNoReplyBase):
def test_set_noreply(self):
mcrouter = self.get_mcrouter()
self.assertTrue(mcrouter.set("key", "value", noreply=True))
self.assertTrue(self.eventually_get(key="key", expVal="value"))
def test_add_replace_noreply(self):
mcrouter = self.get_mcrouter()
self.assertTrue(mcrouter.add("key", "value", noreply=True))
self.assertTrue(self.eventually_get(key="key", expVal="value"))
self.assertTrue(mcrouter.replace("key", "value1", noreply=True))
self.assertTrue(self.eventually_get(key="key", expVal="value1"))
def test_delete_noreply(self):
mcrouter = self.get_mcrouter()
self.assertTrue(mcrouter.set("key", "value"))
self.assertTrue(self.eventually_get(key="key", expVal="value"))
self.assertTrue(mcrouter.delete("key", noreply=True))
self.assertFalse(self.mc.get("key"))
def test_touch_noreply(self):
mcrouter = self.get_mcrouter()
self.assertTrue(mcrouter.set("key", "value"))
self.assertTrue(self.eventually_get(key="key", expVal="value"))
self.assertTrue(mcrouter.touch("key", 100, noreply=True))
self.assertTrue(self.eventually_get(key="key", expVal="value"))
def test_arith_noreply(self):
mcrouter = self.get_mcrouter()
self.assertTrue(mcrouter.set("arith", "1"))
self.assertTrue(self.eventually_get(key="arith", expVal="1"))
self.assertTrue(mcrouter.incr("arith", noreply=True))
self.assertTrue(self.eventually_get(key="arith", expVal="2"))
self.assertTrue(mcrouter.decr("arith", noreply=True))
self.assertTrue(self.eventually_get(key="arith", expVal="1"))
class TestNoReplyAppendPrepend(TestNoReplyBase):
def __init__(self, *args, **kwargs):
super(TestNoReplyAppendPrepend, self).__init__(*args, **kwargs)
self.use_mock_mc = True
def test_affix_noreply(self):
mcrouter = self.get_mcrouter()
self.assertTrue(mcrouter.set("key", "value"))
self.assertTrue(self.eventually_get(key="key", expVal="value"))
self.assertTrue(mcrouter.append("key", "123", noreply=True))
self.assertTrue(self.eventually_get(key="key", expVal="value123"))
self.assertTrue(mcrouter.prepend("key", "456", noreply=True))
self.assertTrue(self.eventually_get(key="key", expVal="456value123"))
|
mit
| 6,223,384,078,401,246,000 | 38.973333 | 77 | 0.666444 | false |
TaiSakuma/AlphaTwirl
|
tests/ROOT/compare_std_vector.py
|
1
|
2236
|
#!/usr/bin/env python
# Tai Sakuma <tai.sakuma@cern.ch>
##__________________________________________________________________||
import os, sys
import timeit
import array
import ROOT
from alphatwirl.roottree import Events, BEvents
##__________________________________________________________________||
inputPath = 'tree.root'
treeName = 'tree'
##__________________________________________________________________||
def use_BEvents():
inputFile = ROOT.TFile.Open(inputPath)
tree = inputFile.Get(treeName)
events = BEvents(tree)
jet_pt = events.jet_pt
trigger_path = events.trigger_path
trigger_version = events.trigger_version
for event in events:
for i in range(len(jet_pt)):
jet_pt[i]
# print [v for v in trigger_path]
# print [v for v in trigger_version]
##__________________________________________________________________||
def use_SetBranchAddress():
inputFile = ROOT.TFile.Open(inputPath)
tree = inputFile.Get(treeName)
tree.SetBranchStatus("*", 0)
tree.SetBranchStatus("njet", 1)
tree.SetBranchStatus("jet_pt", 1)
tree.SetBranchStatus("trigger_path", 1)
tree.SetBranchStatus("trigger_version", 1)
maxn = 65536
njet = array.array('i',[ 0 ])
jet_pt = array.array('d', maxn*[ 0 ])
tree.SetBranchAddress("njet" , njet)
tree.SetBranchAddress("jet_pt" , jet_pt)
trigger_path = ROOT.vector('string')()
tree.SetBranchAddress("trigger_path", trigger_path)
trigger_version = ROOT.vector('int')()
tree.SetBranchAddress("trigger_version", trigger_version)
for i in xrange(tree.GetEntries()):
if tree.GetEntry(i) <= 0: break
for i in range(njet[0]):
jet_pt[i]
# print [v for v in trigger_path]
# print [v for v in trigger_version]
##__________________________________________________________________||
ways = ['simplest_way', 'use_SetBranchStatus', 'use_GetLeaf', 'use_SetBranchAddress']
ways = ['use_BEvents', 'use_SetBranchAddress', 'use_BEvents', 'use_SetBranchAddress']
for w in ways:
print w, ':',
print timeit.timeit(w + '()', number = 1, setup = 'from __main__ import ' + w)
##__________________________________________________________________||
|
bsd-3-clause
| -5,530,343,492,700,515,000 | 35.064516 | 85 | 0.522361 | false |
rhatto/boaspraticas
|
conf.py
|
1
|
8046
|
# -*- coding: utf-8 -*-
#
# Boas Práticas em Desenvolvimento documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 31 19:44:44 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Boas Práticas em Desenvolvimento'
copyright = u'2015, Silvio Rhatto'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'pt_BR'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_themes', 'README.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme_path = ["_themes/sphinx_rtd_theme", ]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'BoasPrticasemDesenvolvimentodoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'BoasPrticasemDesenvolvimento.tex', u'Boas Práticas em Desenvolvimento Documentation',
u'Silvio Rhatto', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'boasprticasemdesenvolvimento', u'Boas Práticas em Desenvolvimento Documentation',
[u'Silvio Rhatto'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'BoasPrticasemDesenvolvimento', u'Boas Práticas em Desenvolvimento Documentation',
u'Silvio Rhatto', 'BoasPrticasemDesenvolvimento', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
gpl-3.0
| -4,072,515,261,222,713,000 | 32.090535 | 98 | 0.709738 | false |
kvesteri/sqlalchemy-json-api
|
tests/conftest.py
|
1
|
10864
|
import warnings
import pytest
import sqlalchemy as sa
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import sessionmaker
from sqlalchemy_json_api import CompositeId, QueryBuilder
warnings.filterwarnings('error')
@pytest.fixture(scope='class')
def base():
return declarative_base()
@pytest.fixture(scope='class')
def group_user_cls(base):
return sa.Table(
'group_user',
base.metadata,
sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')),
sa.Column('group_id', sa.Integer, sa.ForeignKey('group.id'))
)
@pytest.fixture(scope='class')
def group_cls(base):
class Group(base):
__tablename__ = 'group'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
return Group
@pytest.fixture(scope='class')
def organization_cls(base):
class Organization(base):
__tablename__ = 'organization'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
return Organization
@pytest.fixture(scope='class')
def organization_membership_cls(base, organization_cls, user_cls):
class OrganizationMembership(base):
__tablename__ = 'organization_membership'
organization_id = sa.Column(
sa.Integer,
sa.ForeignKey('organization.id'),
primary_key=True
)
user_id = sa.Column(
sa.Integer,
sa.ForeignKey('user.id'),
primary_key=True
)
user = sa.orm.relationship(user_cls, backref='memberships')
organization = sa.orm.relationship(organization_cls, backref='members')
is_admin = sa.Column(
sa.Boolean,
nullable=False,
default=False,
)
@hybrid_property
def id(self):
return CompositeId([self.organization_id, self.user_id])
return OrganizationMembership
@pytest.fixture(scope='class')
def friendship_cls(base):
return sa.Table(
'friendships',
base.metadata,
sa.Column(
'friend_a_id',
sa.Integer,
sa.ForeignKey('user.id'),
primary_key=True
),
sa.Column(
'friend_b_id',
sa.Integer,
sa.ForeignKey('user.id'),
primary_key=True
)
)
@pytest.fixture(scope='class')
def user_cls(base, group_user_cls, friendship_cls):
class User(base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
groups = sa.orm.relationship(
'Group',
secondary=group_user_cls,
backref='users'
)
# this relationship is used for persistence
friends = sa.orm.relationship(
'User',
secondary=friendship_cls,
primaryjoin=id == friendship_cls.c.friend_a_id,
secondaryjoin=id == friendship_cls.c.friend_b_id,
)
friendship_union = sa.select([
friendship_cls.c.friend_a_id,
friendship_cls.c.friend_b_id
]).union(
sa.select([
friendship_cls.c.friend_b_id,
friendship_cls.c.friend_a_id]
)
).alias()
User.all_friends = sa.orm.relationship(
'User',
secondary=friendship_union,
primaryjoin=User.id == friendship_union.c.friend_a_id,
secondaryjoin=User.id == friendship_union.c.friend_b_id,
viewonly=True,
order_by=User.id
)
return User
@pytest.fixture(scope='class')
def category_cls(base, group_user_cls, friendship_cls):
class Category(base):
__tablename__ = 'category'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
created_at = sa.Column(sa.DateTime)
parent_id = sa.Column(sa.Integer, sa.ForeignKey('category.id'))
parent = sa.orm.relationship(
'Category',
backref='subcategories',
remote_side=[id],
order_by=id
)
return Category
@pytest.fixture(scope='class')
def article_cls(base, category_cls, user_cls):
class Article(base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
_name = sa.Column('name', sa.String)
name_synonym = sa.orm.synonym('name')
@hybrid_property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@hybrid_property
def name_upper(self):
return self.name.upper() if self.name else None
@name_upper.expression
def name_upper(cls):
return sa.func.upper(cls.name)
content = sa.Column(sa.String)
category_id = sa.Column(sa.Integer, sa.ForeignKey(category_cls.id))
category = sa.orm.relationship(category_cls, backref='articles')
author_id = sa.Column(sa.Integer, sa.ForeignKey(user_cls.id))
author = sa.orm.relationship(
user_cls,
primaryjoin=author_id == user_cls.id,
backref='authored_articles'
)
owner_id = sa.Column(sa.Integer, sa.ForeignKey(user_cls.id))
owner = sa.orm.relationship(
user_cls,
primaryjoin=owner_id == user_cls.id,
backref='owned_articles'
)
return Article
@pytest.fixture(scope='class')
def comment_cls(base, article_cls, user_cls):
class Comment(base):
__tablename__ = 'comment'
id = sa.Column(sa.Integer, primary_key=True)
content = sa.Column(sa.String)
article_id = sa.Column(sa.Integer, sa.ForeignKey(article_cls.id))
article = sa.orm.relationship(
article_cls,
backref=sa.orm.backref('comments')
)
author_id = sa.Column(sa.Integer, sa.ForeignKey(user_cls.id))
author = sa.orm.relationship(user_cls, backref='comments')
article_cls.comment_count = sa.orm.column_property(
sa.select([sa.func.count(Comment.id)])
.where(Comment.article_id == article_cls.id)
.correlate(article_cls).label('comment_count')
)
return Comment
@pytest.fixture(scope='class')
def composite_pk_cls(base):
class CompositePKModel(base):
__tablename__ = 'composite_pk_model'
a = sa.Column(sa.Integer, primary_key=True)
b = sa.Column(sa.Integer, primary_key=True)
return CompositePKModel
@pytest.fixture(scope='class')
def dns():
return 'postgresql://postgres@localhost/sqlalchemy_json_api_test'
@pytest.yield_fixture(scope='class')
def engine(dns):
engine = create_engine(dns)
yield engine
engine.dispose()
@pytest.yield_fixture(scope='class')
def connection(engine):
conn = engine.connect()
yield conn
conn.close()
@pytest.fixture(scope='class')
def model_mapping(
article_cls,
category_cls,
comment_cls,
group_cls,
user_cls,
organization_cls,
organization_membership_cls
):
return {
'articles': article_cls,
'categories': category_cls,
'comments': comment_cls,
'groups': group_cls,
'users': user_cls,
'organizations': organization_cls,
'memberships': organization_membership_cls
}
@pytest.yield_fixture(scope='class')
def table_creator(base, connection, model_mapping):
sa.orm.configure_mappers()
base.metadata.create_all(connection)
yield
base.metadata.drop_all(connection)
@pytest.yield_fixture(scope='class')
def session(connection):
Session = sessionmaker(bind=connection)
session = Session()
yield session
session.close_all()
@pytest.fixture(scope='class')
def dataset(
session,
user_cls,
group_cls,
article_cls,
category_cls,
comment_cls,
organization_cls,
organization_membership_cls
):
organization = organization_cls(name='Organization 1')
organization2 = organization_cls(name='Organization 2')
organization3 = organization_cls(name='Organization 3')
group = group_cls(name='Group 1')
group2 = group_cls(name='Group 2')
user = user_cls(
id=1,
name='User 1',
groups=[group, group2],
memberships=[
organization_membership_cls(
organization=organization,
is_admin=True
),
organization_membership_cls(
organization=organization2,
is_admin=True
),
organization_membership_cls(
organization=organization3,
is_admin=True
)
]
)
user2 = user_cls(id=2, name='User 2')
user3 = user_cls(id=3, name='User 3', groups=[group])
user4 = user_cls(id=4, name='User 4', groups=[group2])
user5 = user_cls(id=5, name='User 5')
user.friends = [user2]
user2.friends = [user3, user4]
user3.friends = [user5]
article = article_cls(
name='Some article',
author=user,
owner=user2,
category=category_cls(
id=1,
name='Some category',
subcategories=[
category_cls(
id=2,
name='Subcategory 1',
subcategories=[
category_cls(
id=3,
name='Subsubcategory 1',
subcategories=[
category_cls(
id=5,
name='Subsubsubcategory 1',
),
category_cls(
id=6,
name='Subsubsubcategory 2',
)
]
)
]
),
category_cls(id=4, name='Subcategory 2'),
]
),
comments=[
comment_cls(
id=1,
content='Comment 1',
author=user
),
comment_cls(
id=2,
content='Comment 2',
author=user2
),
comment_cls(
id=3,
content='Comment 3',
author=user
),
comment_cls(
id=4,
content='Comment 4',
author=user2
)
]
)
session.add(user3)
session.add(user4)
session.add(article)
session.commit()
@pytest.fixture
def query_builder(model_mapping):
return QueryBuilder(model_mapping)
|
bsd-3-clause
| 381,387,033,388,502,140 | 26.296482 | 79 | 0.554308 | false |
tmmgarcia/frac-turtle
|
runner.py
|
1
|
1457
|
from turtle import Turtle, colormode
from random import randint
import sys
def randColor():
return randint(0,255)
def drawTriangle(t,dist):
t.fillcolor(randColor(),randColor(),randColor())
t.down()
t.setheading(0)
t.begin_fill()
t.forward(dist)
t.left(120)
t.forward(dist)
t.left(120)
t.forward(dist)
t.setheading(0)
t.end_fill()
t.up()
def sierpinski(t,levels,size):
if levels == 0:
# Draw triangle
drawTriangle(t,size)
else:
half = size/2
levels -= 1
# Recursive calls
sierpinski(t,levels,half)
t.setpos(t.xcor()+half,t.ycor())
sierpinski(t,levels,half)
t.left(120)
t.forward(half)
t.setheading(0)
sierpinski(t,levels,half)
t.right(120)
t.forward(half)
t.setheading(0)
def main(configuration):
t = Turtle()
t.speed(10)
t.up()
t.setpos(-configuration['size']/2,-configuration['size']/2)
colormode(255)
sierpinski(t,configuration['level'],configuration['size'])
def start():
configuration = {'level': 2, 'size': 480}
if len(sys.argv) >= 2 and sys.argv[1].isdigit():
configuration['level'] = int(sys.argv[1])
if len(sys.argv) == 3 and sys.argv[2].isdigit():
configuration['size'] = int(sys.argv[2])
main(configuration)
raw_input("Press ENTER to continue")
start()
|
mit
| -4,172,545,080,756,532,700 | 21.555556 | 63 | 0.575154 | false |
eswartz/panda3d-stuff
|
lib/utils/filesystem.py
|
1
|
6149
|
__author__ = 'ejs'
import errno
import glob
import logging
import inspect
import os
import sys
import platform
def mkdirP(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_actual_filename(name):
if not '\\' in name:
return name
dirs = name.split('\\')
# disk letter
test_name = [dirs[0].upper()]
for d in dirs[1:]:
test_name += ["%s[%s]" % (d[:-1], d[-1])]
res = glob.glob('\\'.join(test_name))
if not res:
#File not found
return None
return res[0]
_mainScriptDir = None
def getScriptDir(module=None, toParent=None):
"""
Find the directory where the main script is running
From http://stackoverflow.com/questions/3718657/how-to-properly-determine-current-script-directory-in-python/22881871#22881871
:param follow_symlinks:
:return:
"""
global _mainScriptDir
if module and not _mainScriptDir:
if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze
path = os.path.abspath(sys.executable)
else:
path = inspect.getabsfile(module)
if not toParent:
toParent = "../.."
path = os.path.join(path, toParent) # remove our package and module
path = os.path.realpath(path)
# Windows needs real case for e.g. model path lookups
path = get_actual_filename(path)
_mainScriptDir = os.path.dirname(path) # our package
return _mainScriptDir
_libScriptDir = None
def getLibScriptDir():
"""
Find the directory where the main script is running
From http://stackoverflow.com/questions/3718657/how-to-properly-determine-current-script-directory-in-python/22881871#22881871
:param follow_symlinks:
:return:
"""
global _libScriptDir
if not _libScriptDir:
if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze
path = os.path.abspath(sys.executable)
else:
path = inspect.getabsfile(sys.modules['utils.filesystem'])
path = os.path.join(path, "../..") # remove our package and module
path = os.path.realpath(path)
# Windows needs real case for e.g. model path lookups
path = get_actual_filename(path)
#print "getLibScriptDir:",path
_libScriptDir = os.path.dirname(path) # our package
return _libScriptDir
def getUserDataDir():
"""
Get real user data folder under which the game data can be stored.
:return:
"""
if platform.system() == 'Windows':
# HOME is not trustworthy
userhome = os.environ.get('USERPROFILE')
if not userhome:
userhome = os.path.expanduser('~')
data_dir = os.path.join(userhome, "AppData", "Roaming")
if not os.path.exists(data_dir):
data_dir = os.path.join(userhome, "Documents")
elif platform.system() == 'Linux':
data_dir = os.path.expanduser("~/.config")
elif platform.system() == 'Darwin':
data_dir = os.path.expanduser("~/Library")
else:
data_dir = os.path.expanduser("~")
return data_dir
_tempDir = None
def findDataFilename(name, extract=False, executable=False):
"""
Resolve a filename along Panda's model-path.
:param name:
:return: filename or None
"""
from panda3d.core import Filename, getModelPath
from panda3d.core import VirtualFileSystem
logging.debug("findDataFilename: "+ name +" on: \n" + str(getModelPath().getValue()))
vfs = VirtualFileSystem.getGlobalPtr()
fileName = Filename(name)
vfile = vfs.findFile(fileName, getModelPath().getValue())
if not vfile:
if extract and name.endswith(".exe"):
fileName = Filename(name[:-4])
vfile = vfs.findFile(fileName, getModelPath().getValue())
if not vfile:
return None
fileName = vfile.getFilename()
if extract:
# see if the file is embedded in some virtual place OR has the wrong perms
from panda3d.core import SubfileInfo
info = SubfileInfo()
needsCopy = not vfile.getSystemInfo(info) or info.getFilename() != fileName
if not needsCopy:
if executable:
# see if on Linux or OSX and not executable
try:
stat = os.stat(fileName.toOsSpecific())
if (stat.st_mode & 0111) == 0:
logging.error("Found %s locally, but not marked executable!", fileName)
needsCopy = True
except:
needsCopy = True
if needsCopy:
# virtual file needs to be copied out
global _tempDir
if not _tempDir:
import tempfile
_tempDir = os.path.realpath(tempfile.mkdtemp())
#print "Temp dir:",_tempDir
xpath = _tempDir + '/' + fileName.getBasename()
xTarg = Filename.fromOsSpecific(xpath)
# on Windows, case-sensitivity must be honored for the following to work
xTarg.makeCanonical()
print "extracting",fileName,"to",xTarg
if not xTarg.exists():
if not vfs.copyFile(fileName, xTarg):
raise IOError("extraction failed when copying " + str(fileName) + " to " + str(xTarg))
fileName = xTarg
os.chmod(fileName.toOsSpecific(), 0777)
return fileName
def findDataFile(name, extract=False, executable=False):
"""
Resolve a filename along Panda's model-path.
:param name:
:return: path or None
"""
fileName = findDataFilename(name, extract, executable)
if not fileName:
return None
return fileName.toOsSpecific()
def toPanda(path):
path = path.replace('\\', '/')
# make Windows path look Unix-y for the VFS
if len(path) > 3 and path[1] == ':' and path[2] == '/':
path = '/' + path[0].lower() + path[2:]
return path
|
mit
| -939,937,366,241,566,100 | 29.142157 | 130 | 0.592942 | false |
leon-lei/learning-materials
|
data_science/numpy_tutorials/numpy_practice1_array_random_reshape.py
|
1
|
3008
|
import numpy as np
# from numpy.random import randint
my_list = [1,2,3,4,5,6]
new_list = [[1,2,3], [4,5,6], [7,8,9]]
# 1D array
print('Casting a premade list into a 1D numpy array')
print(np.array(my_list))
# 2D array, note the extra brackets being displayed
print('\nCasting a list of lists into a 2D numpy array')
print(np.array(new_list))
# similar to regular range function
# (start, stop, step)
print('\n np.arange to create a 1D array from (start, stop, step)')
print(np.arange(0,10,2))
# returns evenly space points between (start, stop, num=50)
# only a 1D array
# example below returns 30 evenly space pts between 0 and 5
print('\n np.linspace to return evenly space arrays from (start, stop, num)')
print(np.linspace(0,5,30))
# arrays of zeros and ones
# 2D arrays as we're passing in tuples
print('\n Zeros and Ones')
print(np.zeros((3,3)))
print()
print(np.ones((3,3)))
# identity matrix - for linear algebra problems
# returns a 2D array with ones on the diagonal and zeros elsewhere
# will square the argument, thus example below is returning a 7x7 array
print('\n Identity Matrix')
print(np.eye(7))
# random.rand
# returns random values in a given shape, not ints
# 1st example is 1D array
# 2nd example is 2D array, note we don't have to pass in tuples as like before
print('\n random.rand as a 1D array')
print(np.random.rand(5))
print('\n random.rand as a 2D array')
print(np.random.rand(5,5))
# random.randn
# returns sample from "Standard Normal"/ Gaussian distribution
# 2D plus arrays no need to pass in tuples either
print('\n Standard Normal/ Gaussian distribution in a 1D array')
print(np.random.randn(7))
print('\n Same Gaussian except in a 2D array if 2 arguments were passed in')
print(np.random.randn(4,4))
# random.randint
# returns 1 random int if size is not specified
# (low, high, size)
print('\n random.randint to return n random ints from (low, high, size)')
print(np.random.randint(0,10,5))
# reshaping an array
# first build a 1D array using np.arange
# then reshape and assign to a new variable
# note that total size of new array must remain the same
# if OG array was only 25 elements, we cannot reshape it into a 5x10 array
print('\n array.reshape on an array created with np.arange(0, 25)')
arr = np.arange(0,25)
print(arr)
arr2 = arr.reshape(5,5)
print('\n Note reshaping does not alter the original array,\n so we assigned it to a new variable')
print(arr2)
# shape attribute
print('\n the shape of the array is {}'.format(arr2.shape))
# finding max and min
# finding position of the max and min
# finding the type of the array with dtype attribute
randr = np.random.randint(0,100,20)
print('\n finding the max/min of a random array')
print(randr)
print('\nThe max is {} and min is {}'.format(randr.max(), randr.min()))
print('The max of {} is located at position {}'.format(randr.max(), randr.argmax()))
print('The min of {} is located at position {}'.format(randr.min(), randr.argmin()))
print('\nThe type of the array is {}'.format(randr.dtype))
|
mit
| 1,797,216,221,528,837,600 | 33.976744 | 99 | 0.721742 | false |
theyosh/TerrariumPI
|
terrariumCollector.py
|
1
|
24965
|
# -*- coding: utf-8 -*-
import terrariumLogging
logger = terrariumLogging.logging.getLogger(__name__)
import sqlite3
import time
import copy
import os
from terrariumUtils import terrariumUtils
class terrariumCollector(object):
DATABASE = 'history.db'
# Store data every Xth minute. Except switches and doors
STORE_MODULO = 1 * 60
def __init__(self,versionid):
logger.info('Setting up collector database %s' % (terrariumCollector.DATABASE,))
self.__recovery = False
self.__connect()
self.__create_database_structure()
self.__upgrade(int(versionid.replace('.','')))
logger.info('TerrariumPI Collecter is ready')
def __connect(self):
self.db = sqlite3.connect(terrariumCollector.DATABASE)
# https://www.whoishostingthis.com/compare/sqlite/optimize/
with self.db as db:
cur = db.cursor()
cur.execute('PRAGMA journal_mode = MEMORY')
cur.execute('PRAGMA temp_store = MEMORY')
# Line below is not safe for a Pi. As this can/will corrupt the database when the Pi crashes....
# cur.execute('PRAGMA synchronous = OFF')
self.db.row_factory = sqlite3.Row
logger.info('Database connection created to database %s' % (terrariumCollector.DATABASE,))
def __create_database_structure(self):
with self.db as db:
cur = db.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS sensor_data
(id VARCHAR(50),
type VARCHAR(15),
timestamp INTEGER(4),
current FLOAT(4),
limit_min FLOAT(4),
limit_max FLOAT(4),
alarm_min FLOAT(4),
alarm_max FLOAT(4),
alarm INTEGER(1))''')
cur.execute('CREATE UNIQUE INDEX IF NOT EXISTS sensor_data_unique ON sensor_data(id,type,timestamp ASC)')
cur.execute('CREATE INDEX IF NOT EXISTS sensor_data_timestamp ON sensor_data(timestamp ASC)')
cur.execute('CREATE INDEX IF NOT EXISTS sensor_data_avg ON sensor_data(type,timestamp ASC)')
cur.execute('CREATE INDEX IF NOT EXISTS sensor_data_id ON sensor_data(id,timestamp ASC)')
cur.execute('''CREATE TABLE IF NOT EXISTS switch_data
(id VARCHAR(50),
timestamp INTEGER(4),
state INTERGER(1),
power_wattage FLOAT(2),
water_flow FLOAT(2))''')
cur.execute('CREATE UNIQUE INDEX IF NOT EXISTS switch_data_unique ON switch_data(id,timestamp ASC)')
cur.execute('CREATE INDEX IF NOT EXISTS switch_data_timestamp ON switch_data(timestamp ASC)')
cur.execute('CREATE INDEX IF NOT EXISTS switch_data_id ON switch_data(id,timestamp ASC)')
cur.execute('''CREATE TABLE IF NOT EXISTS door_data
(id INTEGER(4),
timestamp INTEGER(4),
state TEXT CHECK( state IN ('open','closed') ) NOT NULL DEFAULT 'closed')''')
cur.execute('CREATE UNIQUE INDEX IF NOT EXISTS door_data_unique ON door_data(id,timestamp ASC)')
cur.execute('CREATE INDEX IF NOT EXISTS door_data_timestamp ON door_data(timestamp ASC)')
cur.execute('CREATE INDEX IF NOT EXISTS door_data_id ON door_data(id,timestamp ASC)')
cur.execute('''CREATE TABLE IF NOT EXISTS weather_data
(timestamp INTEGER(4),
wind_speed FLOAT(4),
temperature FLOAT(4),
pressure FLOAT(4),
wind_direction VARCHAR(50),
weather VARCHAR(50),
icon VARCHAR(50))''')
cur.execute('CREATE UNIQUE INDEX IF NOT EXISTS weather_data_unique ON weather_data(timestamp ASC)')
cur.execute('''CREATE TABLE IF NOT EXISTS system_data
(timestamp INTEGER(4),
load_load1 FLOAT(4),
load_load5 FLOAT(4),
load_load15 FLOAT(4),
uptime INTEGER(4),
temperature FLOAT(4),
cores VARCHAR(25),
memory_total INTEGER(6),
memory_used INTEGER(6),
memory_free INTEGER(6),
disk_total INTEGER(6),
disk_used INTEGER(6),
disk_free INTEGER(6))''')
cur.execute('CREATE UNIQUE INDEX IF NOT EXISTS system_data_unique ON system_data(timestamp ASC)')
db.commit()
def __upgrade(self,to_version):
# Set minimal version to 3.0.0
current_version = 300
table_upgrades = {'310' : ['ALTER TABLE system_data ADD COLUMN disk_total INTEGER(6)',
'ALTER TABLE system_data ADD COLUMN disk_used INTEGER(6)',
'ALTER TABLE system_data ADD COLUMN disk_free INTEGER(6)'],
'380' : ['DROP INDEX IF EXISTS sensor_data_type',
'CREATE INDEX IF NOT EXISTS sensor_data_avg ON sensor_data (type, timestamp ASC)',
'DROP INDEX IF EXISTS sensor_data_id',
'CREATE INDEX IF NOT EXISTS sensor_data_id ON sensor_data (id, timestamp ASC)',
'DROP INDEX IF EXISTS switch_data_id',
'CREATE INDEX IF NOT EXISTS switch_data_id ON switch_data (id, timestamp ASC)',
'DROP INDEX IF EXISTS door_data_id',
'CREATE INDEX IF NOT EXISTS door_data_id ON door_data (id, timestamp ASC)']}
try:
with open('.collector.update.{}.sql'.format('393'),'r') as sql_file:
table_upgrades['393'] = [line.strip() for line in sql_file.readlines()]
os.remove('.collector.update.{}.sql'.format('393'))
logger.warning('There are {} sensors that have an updated ID and needs to be renamed in the database. This can take a lot of time! Please wait...'
.format(len(table_upgrades['393'])/2))
except IOError as ex:
# No updates... just ignore
pass
with self.db as db:
cur = db.cursor()
db_version = int(cur.execute('PRAGMA user_version').fetchall()[0][0])
if db_version > current_version:
current_version = db_version
if current_version == to_version:
logger.info('Collector database is up to date')
elif current_version < to_version:
logger.info('Collector database is out of date. Running updates from %s to %s' % (current_version,to_version))
# Execute updates
with self.db as db:
cur = db.cursor()
for update_version in table_upgrades:
if current_version < int(update_version) <= to_version:
# Execute all updates between the versions
for sql_upgrade in table_upgrades[update_version]:
try:
cur.execute(sql_upgrade)
logger.info('Collector database upgrade for version %s succeeded! %s' % (update_version,sql_upgrade))
except Exception as ex:
if 'duplicate column name' not in str(ex):
logger.error('Error updating collector database. Please contact support. Error message: %s' % (ex,))
if '380' == update_version:
self.__upgrade_to_380()
db.commit()
if int(to_version) % 10 == 0:
logger.warning('Cleaning up disk space. This will take a couple of minutes depending on the database size and sd card disk speed.')
filesize = os.path.getsize(terrariumCollector.DATABASE)
speed = 2 # MBps
duration = filesize / 1024.0 / 1024.0 / speed
logger.warning('Current database is {} in size and with a speed of {}MBps it will take {} to complete'.format(terrariumUtils.format_filesize(filesize),speed,terrariumUtils.format_uptime(duration)))
cur.execute('VACUUM')
cur.execute('PRAGMA user_version = ' + str(to_version))
logger.info('Updated collector database. Set version to: %s' % (to_version,))
db.commit()
def __upgrade_to_380(self):
# This update will remove 'duplicate' records that where added for better graphing... This will now be done at the collecting the data
tables = ['door_data','switch_data']
with self.db as db:
for table in tables:
cur = db.cursor()
data = cur.execute('SELECT id, timestamp, state FROM ' + table + ' ORDER BY id ASC, timestamp ASC')
data = data.fetchall()
prev_state = None
prev_id = None
for row in data:
if prev_id is None:
prev_id = row['id']
elif prev_id != row['id']:
prev_id = row['id']
prev_state = None
if prev_state is None:
prev_state = row['state']
continue
if row['state'] == prev_state:
cur.execute('DELETE FROM ' + table + ' WHERE id = ? AND timestamp = ? AND state = ?', (row['id'],row['timestamp'],row['state']))
prev_state = row['state']
prev_id = row['id']
db.commit()
logger.info('Collector database upgrade for version 3.8.0 succeeded! Removed duplicate records')
def __recover(self):
starttime = time.time()
# Based on: http://www.dosomethinghere.com/2013/02/20/fixing-the-sqlite-error-the-database-disk-image-is-malformed/
# Enable recovery status
self.__recovery = True
logger.warn('TerrariumPI Collecter recovery mode is starting! %s', (self.__recovery,))
# Create empty sql dump variable
sqldump = ''
lines = 0
with open('.recovery.sql', 'w') as f:
# Dump SQL data line for line
for line in self.db.iterdump():
lines += 1
sqldump += line + "\n"
f.write('%s\n' % line)
logger.warn('TerrariumPI Collecter recovery mode created SQL dump of %s lines and %s bytes!', (lines,strlen(sqldump),))
# Delete broken db
os.remove(terrariumCollector.DATABASE)
logger.warn('TerrariumPI Collecter recovery mode deleted faulty database from disk %s', (terrariumCollector.DATABASE,))
# Reconnect will recreate the db
logger.warn('TerrariumPI Collecter recovery mode starts reconnecting database to create a new clean database at %s', (terrariumCollector.DATABASE,))
self.__connect()
self.__create_database_structure()
cur = self.db.cursor()
# Load the SQL data back to db
cur.executescript(sqldump)
logger.warn('TerrariumPI Collecter recovery mode restored the old data in a new database. %s', (terrariumCollector.DATABASE,))
# Return to normal mode
self.__recovery = False
logger.warn('TerrariumPI Collecter recovery mode is finished in %s seconds!', (time.time()-starttime,))
def __log_data(self,type,id,newdata):
timer = time.time()
if self.__recovery:
logger.warn('TerrariumPI Collecter is in recovery mode. Cannot store new logging data!')
return
now = int(time.time())
rows = []
if type not in ['switches','door']:
now -= (now % terrariumCollector.STORE_MODULO)
try:
with self.db as db:
cur = db.cursor()
if type in ['humidity','moisture','temperature','distance','ph','conductivity','light','uva','uvb','uvi','fertility','co2','volume']:
cur.execute('REPLACE INTO sensor_data (id, type, timestamp, current, limit_min, limit_max, alarm_min, alarm_max, alarm) VALUES (?,?,?,?,?,?,?,?,?)',
(id, type, now, newdata['current'], newdata['limit_min'], newdata['limit_max'], newdata['alarm_min'], newdata['alarm_max'], newdata['alarm']))
if type in ['weather']:
cur.execute('REPLACE INTO weather_data (timestamp, wind_speed, temperature, pressure, wind_direction, weather, icon) VALUES (?,?,?,?,?,?,?)',
(now, newdata['wind_speed'], newdata['temperature'], newdata['pressure'], newdata['wind_direction'], newdata['weather'], newdata['icon']))
if type in ['system']:
cur.execute('REPLACE INTO system_data (timestamp, load_load1, load_load5, load_load15, uptime, temperature, cores, memory_total, memory_used, memory_free, disk_total, disk_used, disk_free) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)',
(now, newdata['load']['load1'], newdata['load']['load5'], newdata['load']['load15'], newdata['uptime'], newdata['temperature'], newdata['cores'], newdata['memory']['total'], newdata['memory']['used'], newdata['memory']['free'],newdata['disk']['total'], newdata['disk']['used'], newdata['disk']['free']))
if type in ['switches']:
if 'time' in newdata:
now = newdata['time']
cur.execute('REPLACE INTO switch_data (id, timestamp, state, power_wattage, water_flow) VALUES (?,?,?,?,?)',
(id, now, newdata['state'], newdata['current_power_wattage'], newdata['current_water_flow']))
if type in ['door']:
cur.execute('REPLACE INTO door_data (id, timestamp, state) VALUES (?,?,?)',
(id, now, newdata))
db.commit()
except sqlite3.DatabaseError as ex:
logger.error('TerrariumPI Collecter exception! %s', (ex,))
if 'database disk image is malformed' == str(ex):
self.__recover()
logger.debug('Timing: updating %s data in %s seconds.' % (type,time.time()-timer))
def stop(self):
self.db.close()
logger.info('Shutdown data collector')
def get_total_power_water_usage(self):
timer = time.time()
totals = {'power_wattage' : {'duration' : 0 , 'wattage' : 0.0},
'water_flow' : {'duration' : 0 , 'water' : 0.0}}
sql = '''SELECT SUM(total_wattage) AS Watt, SUM(total_water) AS Water, MAX(timestamp2)-MIN(timestamp) AS TotalTime FROM (
SELECT
t1.timestamp as timestamp,
t2.timestamp as timestamp2,
t2.timestamp-t1.timestamp AS duration_in_seconds,
(t2.timestamp-t1.timestamp) * t1.power_wattage AS total_wattage,
((t2.timestamp-t1.timestamp) / 60.0) * t1.water_flow AS total_water
FROM switch_data AS t1
LEFT JOIN switch_data AS t2
ON t2.id = t1.id
AND t2.timestamp = (SELECT MIN(timestamp) FROM switch_data WHERE timestamp > t1.timestamp AND id = t1.id)
WHERE t1.state > 0)'''
with self.db as db:
cur = db.cursor()
cur.execute(sql)
row = cur.fetchone()
if row['TotalTime'] is not None and row['Watt'] is not None:
totals = {'power_wattage' : {'duration' : int(row['TotalTime']) , 'wattage' : float(row['Watt'])},
'water_flow' : {'duration' : int(row['TotalTime']) , 'water' : float(row['Water'])}}
logger.debug('Timing: Total power and water usage calculation done in %s seconds.' % ((time.time() - timer),))
return totals
def log_switch_data(self,data):
if data['hardwaretype'] not in ['pwm-dimmer','remote-dimmer','dc-dimmer']:
# Store normal switches with value 100 indicating full power (aka no dimming)
data['state'] = (100 if data['state'] == 1 else 0)
self.__log_data('switches',data['id'],data)
def log_door_data(self,data):
self.__log_data('door',data['id'], data['state'])
def log_weather_data(self,data):
self.__log_data('weather',None,data)
def log_sensor_data(self,data):
self.__log_data(data['type'],data['id'],data)
def log_system_data(self, data):
self.__log_data('system',None,data)
def get_history(self, parameters = [], starttime = None, stoptime = None, exclude_ids = None):
# Default return object
timer = time.time()
history = {}
periods = {'day' : 1 * 24,
'week' : 7 * 24,
'month' : 30 * 24,
'year' : 365 * 24,
'all' : 3650 * 24}
modulo = terrariumCollector.STORE_MODULO
logtype = parameters[0]
del(parameters[0])
# Define start time
if starttime is None:
starttime = int(time.time())
# Define stop time
if stoptime is None:
stoptime = starttime - (24 * 60 * 60)
if len(parameters) > 0 and parameters[-1] in periods:
stoptime = starttime - periods[parameters[-1]] * 60 * 60
modulo = (periods[parameters[-1]] / 24) * terrariumCollector.STORE_MODULO
del(parameters[-1])
sql = ''
filters = (stoptime,starttime,)
if logtype == 'sensors':
fields = { 'current' : [], 'alarm_min' : [], 'alarm_max' : [] , 'limit_min' : [], 'limit_max' : []}
sql = 'SELECT id, type, timestamp,' + ', '.join(list(fields.keys())) + ' FROM sensor_data WHERE timestamp >= ? AND timestamp <= ?'
if len(parameters) > 0 and parameters[0] == 'average':
sql = 'SELECT "average" AS id, type, timestamp'
for field in fields:
sql = sql + ', AVG(' + field + ') as ' + field
sql = sql + ' FROM sensor_data WHERE timestamp >= ? AND timestamp <= ?'
if exclude_ids is not None:
sql = sql + ' AND sensor_data.id NOT IN (\'' + '\',\''.join(exclude_ids) +'\')'
if len(parameters) == 2:
sql = sql + ' AND type = ?'
filters = (stoptime,starttime,parameters[1],)
sql = sql + ' GROUP BY type, timestamp'
elif len(parameters) == 2 and parameters[0] in ['temperature','humidity','distance','ph','conductivity','light','uva','uvb','uvi','fertility']:
sql = sql + ' AND type = ? AND id = ?'
filters = (stoptime,starttime,parameters[0],parameters[1],)
elif len(parameters) == 1 and parameters[0] in ['temperature','humidity','distance','ph','conductivity','light','uva','uvb','uvi','fertility']:
sql = sql + ' AND type = ?'
filters = (stoptime,starttime,parameters[0],)
elif len(parameters) == 1:
sql = sql + ' AND id = ?'
filters = (stoptime,starttime,parameters[0],)
elif logtype == 'switches':
fields = { 'power_wattage' : [], 'water_flow' : [] }
sql = '''SELECT id, "switches" AS type, timestamp, timestamp2, state, ''' + ', '.join(list(fields.keys())) + ''' FROM (
SELECT
t1.id AS id,
t1.timestamp AS timestamp,
IFNULL(t2.timestamp, ''' + str(starttime) + ''') as timestamp2,
t1.power_wattage AS power_wattage,
t1.water_flow AS water_flow,
t1.state AS state
FROM switch_data AS t1
LEFT JOIN switch_data AS t2
ON t2.id = t1.id
AND t2.timestamp = (SELECT MIN(timestamp) FROM switch_data WHERE switch_data.timestamp > t1.timestamp AND switch_data.id = t1.id) )
WHERE timestamp2 > IFNULL((SELECT MAX(timestamp) AS timelimit FROM switch_data AS ttable WHERE ttable.id = id AND ttable.timestamp < ?),0)
AND timestamp <= ?'''
if len(parameters) > 0 and parameters[0] is not None:
sql = sql + ' AND id = ?'
filters = (stoptime,starttime,parameters[0],)
elif logtype == 'doors':
fields = {'state' : []}
sql = '''SELECT id, "doors" AS type, timestamp, timestamp2, (CASE WHEN state == 'open' THEN 1 ELSE 0 END) AS state FROM (
SELECT
t1.id AS id,
t1.timestamp AS timestamp,
IFNULL(t2.timestamp, ''' + str(starttime) + ''') as timestamp2,
t1.state AS state
FROM door_data AS t1
LEFT JOIN door_data AS t2
ON t2.id = t1.id
AND t2.timestamp = (SELECT MIN(timestamp) FROM door_data WHERE door_data.timestamp > t1.timestamp AND door_data.id = t1.id) )
WHERE timestamp2 > IFNULL((SELECT MAX(timestamp) AS timelimit FROM door_data AS ttable WHERE ttable.id = id AND ttable.timestamp < ?),0)
AND timestamp <= ?'''
if len(parameters) > 0 and parameters[0] is not None:
sql = sql + ' AND id = ?'
filters = (stoptime,starttime,parameters[0],)
elif logtype == 'weather':
fields = { 'wind_speed' : [], 'temperature' : [], 'pressure' : [] , 'wind_direction' : [], 'rain' : [],
'weather' : [], 'icon' : []}
sql = 'SELECT "city" AS id, "weather" AS type, timestamp, ' + ', '.join(list(fields.keys())) + ' FROM weather_data WHERE timestamp >= ? AND timestamp <= ?'
elif logtype == 'system':
fields = ['load_load1', 'load_load5','load_load15','uptime', 'temperature','cores', 'memory_total', 'memory_used' , 'memory_free', 'disk_total', 'disk_used' , 'disk_free']
if len(parameters) > 0 and parameters[0] == 'load':
fields = ['load_load1', 'load_load5','load_load15']
elif len(parameters) > 0 and parameters[0] == 'cores':
fields = ['cores']
elif len(parameters) > 0 and parameters[0] == 'uptime':
fields = ['uptime']
elif len(parameters) > 0 and parameters[0] == 'temperature':
fields = ['temperature']
elif len(parameters) > 0 and parameters[0] == 'memory':
fields = ['memory_total', 'memory_used' , 'memory_free']
elif len(parameters) > 0 and parameters[0] == 'disk':
fields = ['disk_total', 'disk_used' , 'disk_free']
sql = 'SELECT "system" AS type, timestamp, ' + ', '.join(fields) + ' FROM system_data WHERE timestamp >= ? AND timestamp <= ?'
sql = sql + ' ORDER BY timestamp ASC, type ASC' + (', id ASC' if logtype != 'system' else '')
if not self.__recovery:
try:
first_item = None
with self.db as db:
cur = db.cursor()
for row in cur.execute(sql, filters):
if row['type'] not in history:
history[row['type']] = {}
if logtype == 'system':
for field in fields:
system_parts = field.split('_')
if system_parts[0] not in history[row['type']]:
history[row['type']][system_parts[0]] = {} if len(system_parts) == 2 else []
if len(system_parts) == 2:
if system_parts[1] not in history[row['type']][system_parts[0]]:
history[row['type']][system_parts[0]][system_parts[1]] = []
history[row['type']][system_parts[0]][system_parts[1]].append([row['timestamp'] * 1000,row[field]])
else:
history[row['type']][system_parts[0]].append([row['timestamp'] * 1000,row[field]])
else:
if row['id'] not in history[row['type']]:
history[row['type']][row['id']] = copy.deepcopy(fields)
if row['type'] in ['switches','doors']:
history[row['type']][row['id']]['totals'] = {'duration' : 0, 'power_wattage' : 0, 'water_flow' : 0}
if row['type'] in ['switches','doors'] and row['state'] > 0 and row['timestamp2'] is not None and '' != row['timestamp2']:
# Update totals data
duration = float(row['timestamp2'] - (row['timestamp'] if row['timestamp'] >= stoptime else stoptime))
history[row['type']][row['id']]['totals']['duration'] += duration
if 'switches' == row['type']:
history[row['type']][row['id']]['totals']['power_wattage'] += duration * float(row['power_wattage'])
# Devide by 60 to get Liters water used per minute based on seconds durations
history[row['type']][row['id']]['totals']['water_flow'] += (duration / 60.0) * float(row['water_flow'])
for field in fields:
history[row['type']][row['id']][field].append([ (row['timestamp'] if row['timestamp'] >= stoptime else stoptime) * 1000,row[field]])
if row['type'] in ['switches','doors'] and row['timestamp2'] is not None and '' != row['timestamp2']:
# Add extra point for nicer graphing of doors and power switches
history[row['type']][row['id']][field].append([row['timestamp2'] * 1000,row[field]])
logger.debug('Timing: history %s query: %s seconds' % (logtype,time.time()-timer))
except sqlite3.DatabaseError as ex:
logger.error('TerrariumPI Collecter exception! %s', (ex,))
if 'database disk image is malformed' == str(ex):
self.__recover()
# In order to get nicer graphs, we are adding a start and end time based on the selected time range if needed
if logtype in ['switches','doors'] and logtype not in history and len(parameters) > 0:
# Create 'empty' history array if single id is requested
history[logtype] = {}
history[logtype][parameters[0]] = copy.deepcopy(fields)
for field in fields:
history[logtype][parameters[0]][field].append([stoptime * 1000,0])
history[logtype][parameters[0]][field].append([starttime * 1000,0])
return history
|
gpl-3.0
| 3,670,249,768,931,652,600 | 45.926692 | 325 | 0.576247 | false |
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Tasking/Mcl_Cmd_Banner_Tasking.py
|
1
|
5237
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: Mcl_Cmd_Banner_Tasking.py
def TaskingMain(namespace):
import mcl.imports
import mcl.target
import mcl.tasking
from mcl.object.Message import MarshalMessage
mcl.imports.ImportWithNamespace(namespace, 'mca.network.cmd.banner', globals())
mcl.imports.ImportWithNamespace(namespace, 'mca.network.cmd.banner.tasking', globals())
lpParams = mcl.tasking.GetParameters()
tgtParams = mca.network.cmd.banner.Params()
tgtParams.targetAddr = lpParams['targetAddress']
tgtParams.broadcast = lpParams['broadcast']
tgtParams.wait = lpParams['wait']
tgtParams.dstPort = lpParams['dstPort']
tgtParams.srcPort = lpParams['srcPort']
if lpParams['protocol'] == 1:
protocol = 'TCP'
tgtParams.socketType = mca.network.cmd.banner.SOCKET_TYPE_TCP
elif lpParams['protocol'] == 2:
protocol = 'UDP'
tgtParams.socketType = mca.network.cmd.banner.SOCKET_TYPE_UDP
elif lpParams['protocol'] == 3:
protocol = 'ICMP'
tgtParams.socketType = mca.network.cmd.banner.SOCKET_TYPE_ICMP
else:
mcl.tasking.OutputError('Invalid protocol type (%u)' % lpParams['protocol'])
return False
if tgtParams.dstPort == 0 and tgtParams.socketType != mca.network.cmd.banner.SOCKET_TYPE_ICMP:
mcl.tasking.OutputError('A port must be specified for this type of connection')
return False
else:
if lpParams['...'] != None:
if not _bufferScrubber(lpParams['...'], tgtParams.data):
mcl.tasking.OutputError('Invalid send buffer')
return False
taskXml = mcl.tasking.Tasking()
taskXml.SetTargetRemote('%s' % tgtParams.targetAddr)
taskXml.SetType(protocol)
if tgtParams.dstPort != 0:
taskXml.AddSearchMask('%u' % tgtParams.dstPort)
mcl.tasking.OutputXml(taskXml.GetXmlObject())
rpc = mca.network.cmd.banner.tasking.RPC_INFO_BANNER
msg = MarshalMessage()
tgtParams.Marshal(msg)
rpc.SetData(msg.Serialize())
rpc.SetMessagingType('message')
res = mcl.tasking.RpcPerformCall(rpc)
if res != mcl.target.CALL_SUCCEEDED:
mcl.tasking.RecordModuleError(res, 0, mca.network.cmd.banner.errorStrings)
return False
return True
def _bufferScrubber(input, data):
i = 0
while i < len(input):
try:
if input[i] != '\\':
charToAdd = ord(input[i])
else:
if input[i + 1] == 'a':
charToAdd = ord('\x07')
elif input[i + 1] == 'b':
charToAdd = ord('\x08')
elif input[i + 1] == 'f':
charToAdd = ord('\x0c')
elif input[i + 1] == 'n':
charToAdd = ord('\n')
elif input[i + 1] == 'r':
charToAdd = ord('\r')
elif input[i + 1] == 't':
charToAdd = ord('\t')
elif input[i + 1] == 'v':
charToAdd = ord('\x0b')
elif input[i + 1] == '?':
charToAdd = ord('\\?')
elif input[i + 1] == "'":
charToAdd = ord("'")
elif input[i + 1] == '"':
charToAdd = ord('"')
elif input[i + 1] == '\\':
charToAdd = ord('\\')
elif input[i + 1] == '0' or input[i + 1] == '1' or input[i + 1] == '2' or input[i + 1] == '3':
sum = 0
j = i + 1
while j <= i + 3:
if j >= len(input):
return False
charval = ord(input[j]) - ord('0')
if charval >= 0 and charval <= 7:
sum = 8 * sum + charval
else:
return False
j = j + 1
charToAdd = sum
i = i + 2
elif input[i + 1] == 'X' or input[i + 1] == 'x':
sum = 0
i = i + 2
j = i
while j <= i + 1:
if j >= len(input):
return False
charval = ord(input[j].upper()) - ord('0')
if charval >= 0 and charval <= 9:
sum = 16 * sum + charval
elif charval + ord('0') >= ord('A') and charval + ord('0') <= ord('F'):
sum = 16 * sum + charval - 7
else:
return False
charToAdd = sum
j = j + 1
else:
return False
i = i + 1
data.append(charToAdd)
finally:
i = i + 1
return True
if __name__ == '__main__':
import sys
if TaskingMain(sys.argv[1]) != True:
sys.exit(-1)
|
unlicense
| -4,678,044,798,674,551,000 | 38.089552 | 110 | 0.47088 | false |
pegasus-isi/pegasus
|
packages/pegasus-common/test/test_json.py
|
1
|
2441
|
import io
from enum import Enum
from pathlib import Path
from uuid import UUID
import pytest
from Pegasus.json import dump_all, dumps, load_all, loads
class _Color(Enum):
RED = 1
class _Html:
def __html__(self):
return "html"
class _Json:
def __json__(self):
return "json"
@pytest.mark.parametrize(
"s, expected",
[
('{"key": 1}', 1),
('{"key": "2018-10-10"}', "2018-10-10"),
('{"key": "yes"}', "yes"),
('{"key": true}', True),
],
)
def test_loads(s, expected):
"""Test :meth:`Pegasus.json.loads`."""
rv = loads(s)
assert type(rv["key"]) == type(expected)
assert rv["key"] == expected
@pytest.mark.parametrize(
"obj, expected",
[
({"key": 1}, '{"key": 1}'),
({"key": "2018-10-10"}, '{"key": "2018-10-10"}'),
({"key": "yes"}, '{"key": "yes"}'),
({"key": True}, '{"key": true}'),
({"key": Path("./aaa")}, '{"key": "aaa"}'),
({"key": Path("../aaa")}, '{"key": "../aaa"}'),
],
)
def test_dumps(obj, expected):
"""Test :meth:`Pegasus.json.dumps`."""
assert dumps(obj) == expected
@pytest.mark.parametrize(
"obj, expected", [('{"key": 1}\n{"key": 2}', [{"key": 1}, {"key": 2}])],
)
def test_load_all(obj, expected):
"""Test :meth:`Pegasus.json.load_all`."""
assert list(load_all(obj)) == expected
assert list(load_all(io.StringIO(obj))) == expected
@pytest.mark.parametrize(
"obj, expected",
[
({"key": 1}, '{"key": 1}\n'),
({"key": _Color.RED}, '{"key": "RED"}\n'),
(
{"key": UUID("{12345678-1234-5678-1234-567812345678}")},
'{"key": "12345678-1234-5678-1234-567812345678"}\n',
),
({"key": _Html()}, '{"key": "html"}\n'),
({"key": _Json()}, '{"key": "json"}\n'),
({"key": "2018-10-10"}, '{"key": "2018-10-10"}\n'),
({"key": "yes"}, '{"key": "yes"}\n'),
({"key": True}, '{"key": true}\n'),
({"key": Path("./aaa")}, '{"key": "aaa"}\n'),
({"key": Path("../aaa")}, '{"key": "../aaa"}\n'),
],
)
def test_dump_all(obj, expected):
"""Test :meth:`Pegasus.json.dumps`."""
assert dump_all([obj]) == expected
out = io.StringIO()
dump_all([obj], out)
assert out.getvalue() == expected
with pytest.raises(TypeError) as e:
dump_all([obj], 1)
assert "s must either be None or an open text file" in str(e.value)
|
apache-2.0
| -6,907,769,663,152,580,000 | 24.968085 | 76 | 0.487505 | false |
macarthur-lab/xbrowse
|
seqr/views/apis/phenotips_api_tests.py
|
1
|
1162
|
import json
import mock
from django.test import TestCase
from django.urls.base import reverse
from seqr.views.apis.phenotips_api import phenotips_edit_handler, phenotips_pdf_handler
from seqr.views.utils.test_utils import _check_login, create_proxy_request_stub
class PhenotipsAPITest(TestCase):
fixtures = ['users', '1kg_project']
@mock.patch('seqr.views.apis.phenotips_api.proxy_request', create_proxy_request_stub())
def test_phenotips_edit(self):
url = reverse(phenotips_edit_handler, args=['R0001_1kg', 'I000001_na19675'])
_check_login(self, url)
response = self.client.post(url, content_type='application/json', data=json.dumps({'some_json': 'test'}))
self.assertEqual(response.status_code, 200)
@mock.patch('seqr.views.apis.phenotips_api.proxy_request', create_proxy_request_stub())
def test_phenotips_pdf(self):
url = reverse(phenotips_pdf_handler, args=['R0001_1kg', 'I000001_na19675'])
_check_login(self, url)
response = self.client.post(url, content_type='application/json', data=json.dumps({'some_json': 'test'}))
self.assertEqual(response.status_code, 200)
|
agpl-3.0
| 6,250,293,494,988,709,000 | 40.5 | 113 | 0.707401 | false |
emi420/APIzza
|
mail.api/mail/settings-sample.py
|
1
|
4268
|
"""
Django settings for the mail.api project.
Rename this file to settings.py and replace the
"CHANGEME" string in configuration options to use
these sample settings.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Django settings for mail project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
USE_TZ = True
ADMINS = (
# ('Emilio Mariscal', 'emilio.mariscal@voolks.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
# Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '',
# Or path to database file if using sqlite3.
'USER': '',
# Not used with sqlite3.
'PASSWORD': '',
# Not used with sqlite3.
'HOST': '',
# Set to empty string for localhost. Not used with sqlite3.
'PORT': '',
# Set to empty string for default. Not used with sqlite3.
},
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Tijuana'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'CHANGEME'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
#'django.contrib.auth.middleware.AuthenticationMiddleware',
#'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mail.urls'
# Python dotted path to the WSGI lication used by Django's runserver.
WSGI_LICATION = 'mail.wsgi.lication'
# Setting SMTP
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
# Gmail account
EMAIL_HOST_USER = 'CHANGEME'
# Gmail password
EMAIL_HOST_PASSWORD = 'CHANGEME'
INSTALLED_APPS = (
#'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
#'django.contrib.sites',
# 'django.contrib.messages',
# 'django.contrib.staticfiles',
# 'mathfilters',
# Uncomment the next line to enable the admin:
#'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
gpl-3.0
| 4,870,246,465,533,267,000 | 29.056338 | 108 | 0.662605 | false |
tacone/rapache
|
RapacheGtk/VirtualHostGui.py
|
1
|
21051
|
#!/usr/bin/env python
# Rapache - Apache Configuration Tool
# Copyright (C) 2008 Stefano Forenza, Jason Taylor, Emanuele Gentili
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""".
Issues with the new window:
- self.parent doesn't work
- onblur doesn't trigger when pressing Return
- changing a domain name doesn't change subdomains
- empty server aliases shuoldn't be managed
ALSO:
- please implement a delete directive func in the parser
- move denorm. vhosts in another tab
- merge with Qense warning window
"""
import sys
import re
try:
import pygtk
pygtk.require("2.0")
except:
pass
try:
import gtk
import gtk.glade
except:
sys.exit(1)
import os
import pango
import tempfile
import traceback
import RapacheGtk.GuiUtils
from RapacheCore.VirtualHost import *
from RapacheGtk import GuiUtils
from EditDomainNameGui import EditDomainNameWindow
import RapacheGtk.DesktopEnvironment as Desktop
class UnexpectedCase( Exception ):
pass
class VirtualHostWindow:
def __init__ ( self, parent = None):
self.parent = parent
self.plugins = []
self.vhost = None
gladefile = os.path.join(Configuration.GLADEPATH, "edit_vhost.glade")
wtree = gtk.glade.XML(gladefile)
self.window = wtree.get_widget("dialog_edit_vhost")
self.entry_domain = wtree.get_widget("entry_domain")
self.entry_location = wtree.get_widget("entry_location")
self.button_location = wtree.get_widget("button_location")
self.treeview_domain = wtree.get_widget("treeview_domain")
self.checkbutton_hosts = wtree.get_widget("checkbutton_hosts")
self.label_hosts = wtree.get_widget("label_hosts")
self.toolbutton_domain_add = wtree.get_widget("toolbutton_domain_add")
self.toolbutton_domain_edit = wtree.get_widget("toolbutton_domain_edit")
self.toolbutton_domain_delete = wtree.get_widget("toolbutton_domain_delete")
self.combobox_vhost_backups = wtree.get_widget("combobox_vhost_backups")
self.notebook = wtree.get_widget("notebook")
self.button_save = wtree.get_widget("button_save")
self.error_area = wtree.get_widget("error_area")
self.label_path = wtree.get_widget("label_path")
self.message_text = wtree.get_widget("message_text")
self.error_area = wtree.get_widget("error_area")
self.treeview_menu = wtree.get_widget("treeview_menu")
signals = {
"on_toolbutton_domain_add_clicked" : self.on_toolbutton_domain_add_clicked,
"on_toolbutton_domain_edit_clicked" : self.on_toolbutton_domain_edit_clicked,
"on_toolbutton_domain_delete_clicked": self.on_toolbutton_domain_delete_clicked,
"on_button_save_clicked" : self.on_button_save_clicked,
"on_button_cancel_clicked" : self.on_button_cancel_clicked,
"on_entry_domain_changed" : self.on_entry_domain_changed,
"on_button_location_clicked" : self.on_button_location_clicked,
"on_entry_domain_focus_out_event" : self.on_entry_domain_focus_out_event,
"on_button_location_clear_clicked" : self.on_button_location_clear_clicked,
"on_button_restore_version_clicked" : self.on_button_restore_version_clicked,
"on_linkbutton_documentation_clicked" : self.on_linkbutton_documentation_clicked,
"on_notebook_switch_page" : self.on_notebook_switch_page,
"on_treeview_menu_cursor_changed" : self.on_treeview_menu_cursor_changed,
"on_button_error_close_clicked" : self.on_button_error_close_clicked
}
wtree.signal_autoconnect(signals)
# add on destroy to quit loop
self.window.connect("destroy", self.on_destroy)
self.combobox_vhost_backups.set_active(0)
self.text_view_vhost_source = GuiUtils.new_apache_sourceview()
wtree.get_widget( 'text_view_vhost_source_area' ).add( self.text_view_vhost_source )
self.text_view_vhost_source.show()
# Setup tree
column = gtk.TreeViewColumn(('Domains'))
column.set_spacing(4)
cell = gtk.CellRendererText()
column.pack_start(cell, True)
column.set_attributes(cell, markup=0)
self.treeview_domain.append_column(column)
self.treeview_domain_store = gtk.ListStore(str, object)
self.treeview_domain.set_model(self.treeview_domain_store)
GuiUtils.style_as_tooltip( self.error_area )
self.on_entry_domain_changed()
#Setup Menu Tree
column = gtk.TreeViewColumn(('Icon'))
column.set_spacing(4)
cell = gtk.CellRendererPixbuf()
column.pack_start(cell, expand=False)
column.set_attributes(cell, pixbuf=0)
self.treeview_menu.append_column(column)
column = gtk.TreeViewColumn(('Title'))
column.set_spacing(4)
cell = gtk.CellRendererText()
column.pack_start(cell, True)
column.set_attributes(cell, markup=1)
self.treeview_menu.append_column(column)
store = gtk.ListStore(gtk.gdk.Pixbuf, str, int)
self.treeview_menu.set_model(store)
icon_theme = gtk.icon_theme_get_default()
store.append((icon_theme.lookup_icon("applications-internet", 24, 0).load_icon(), "Domain", 0))
# init enabled plugins
for plugin in self.parent.plugin_manager.plugins:
try:
if plugin.is_enabled():
content, title, pixbuf = plugin.init_vhost_properties()
tab_count = self.notebook.get_n_pages() - 1
plugin._tab_number = self.notebook.insert_page(content, gtk.Label(title), tab_count)
store.append((pixbuf, title, tab_count))
content.show()
self.plugins.append(plugin)
except Exception:
traceback.print_exc(file=sys.stdout)
store.append((icon_theme.load_icon(gtk.STOCK_EDIT, 24, 0), "Definition File", self.notebook.get_n_pages() - 1))
select = self.treeview_menu.get_selection()
select.select_path(0)
self.__previous_active_tab = 0
self.accel_group = gtk.AccelGroup()
self.window.add_accel_group(self.accel_group)
self.button_save.add_accelerator("clicked", self.accel_group, 13, 0, 0)
self.vhost = VirtualHostModel( "")
def on_treeview_menu_cursor_changed(self, widget):
model, iter = self.treeview_menu.get_selection().get_selected()
if not iter: return
page_number = model.get_value(iter, 2)
# Save
result = True
error = ""
if self.__previous_active_tab == self.notebook.get_n_pages() - 1:
result, error = self.save_edit_tab()
elif self.__previous_active_tab == 0:
self.save_domain_tab()
result = True
else:
result, error = self.save_plugin_tab(self.__previous_active_tab)
# process
if not result:
self.show_error("Sorry can not change tabs, " + error)
select = self.treeview_menu.get_selection()
select.select_path((self.__previous_active_tab))
return
self.clear_error()
# Load
if page_number == self.notebook.get_n_pages() - 1:
self.load_edit_tab()
elif page_number == 0:
self.load_domain_tab()
else:
self.update_plugin_tab(page_number)
self.window.set_title("VirtualHost Editor - " + self.vhost.get_server_name() )
self.__previous_active_tab = page_number
self.notebook.set_current_page(page_number)
def on_notebook_switch_page(self, notebook, page, page_num):
return
def on_linkbutton_documentation_clicked(self, widget):
Desktop.open_url( widget.get_uri() )
def on_button_restore_version_clicked(self, widget):
buf = self.text_view_vhost_source.get_buffer()
if buf.get_modified():
md = gtk.MessageDialog(self.window, flags=0, type=gtk.MESSAGE_QUESTION, buttons=gtk.BUTTONS_OK_CANCEL, message_format="Are you sure, you will lose all your current changes")
result = md.run()
md.destroy()
if result != gtk.RESPONSE_OK:
return
selected = self.combobox_vhost_backups.get_active()
if selected == 0:
buf.set_text( self.vhost.get_source() )
else:
value = self.combobox_vhost_backups.get_active_text()[7:]
buf.set_text( self.vhost.get_source_version(value) )
buf.set_modified(False)
def run(self):
# Load UI Plugins
if self.vhost:
site = self.vhost
else:
#this should never happen since now we initialize an empty VirtualHostModel
#inside __init__
raise UnexpectedCase, "Internal error, existing VirtualHostModel expected"
pass
self.window.show()
gtk.main()
def load (self, vhost ):
if vhost:
self.vhost = vhost
#hosts tooggling not supported on editing
#self.checkbutton_hosts.hide()
#self.label_hosts.hide()
else:
self.checkbutton_hosts.set_active(True)
self.load_domain_tab()
for file in self.vhost.get_backup_files():
self.combobox_vhost_backups.append_text("Backup " + file[0][-21:-4])
self.label_path.set_text( self.vhost.get_source_filename() )
self.on_entry_domain_changed()
def save_edit_tab(self):
#print "Save edit tab"
buf = self.text_view_vhost_source.get_buffer()
content = buf.get_text(buf.get_start_iter(), buf.get_end_iter())
return self.vhost.load_from_string( content ), "your edited source does not seem to be valid syntax"
def load_edit_tab(self):
#print "load edit tab"
# open edit tab update content
buf = self.text_view_vhost_source.get_buffer()
text = self.vhost.get_source_generated()
buf.set_text( text )
buf.set_modified(False)
def load_domain_tab(self):
#print "Load domain tab"
vhost_name = self.vhost.get_server_name()
self.window.set_title("VirtualHost Editor - " + vhost_name )
self.window.set_icon_from_file(self.vhost.get_icon())
modal = self.treeview_menu.get_model()
iter = modal.get_iter(0)
modal.set_value(iter, 0, self.window.get_icon())
server_name = ''
if self.vhost.config and self.vhost.config.servername and self.vhost.config.servername.value:
server_name = self.vhost.config.servername.value
self.entry_domain.set_text( server_name )
""" ???
if not self.vhost.is_default():
self.entry_domain.set_text( server_name )
elif self.vhost.config.ServerName:
self.entry_domain.set_sensitive(False)
"""
document_root = self.vhost.get_document_root()
if ( document_root != None ):
self.entry_location.set_text( document_root )
server_alias = None
self.treeview_domain_store.clear()
server_alias = self.vhost.get_server_alias()
if server_alias:
for domain in server_alias:
self.treeview_domain_store.append((domain, None))
def save_domain_tab(self):
#print "Save domain tab"
if self.entry_location.get_text() == "" and self.vhost.is_new:
self.set_default_values_from_domain( True )
#if not self.vhost.is_default():
if self.entry_domain.get_text():
self.vhost.config.ServerName.value = self.entry_domain.get_text()
elif self.vhost.config.ServerName:
del self.vhost.config.ServerName
self.window.set_title("VirtualHost Editor - " + self.vhost.get_server_name() )
if self.vhost.config.DocumentRoot:
old_document_root = self.vhost.config.DocumentRoot.value
if old_document_root != self.entry_location.get_text():
ds = self.vhost.config.Directory.search( [old_document_root] )
if len(ds) > 0:
d = ds[0]
d.value = self.entry_location.get_text()
self.vhost.config.DocumentRoot.value = self.entry_location.get_text()
aliases = self.get_server_aliases_list()
if len(aliases) > 0:
self.vhost.config.ServerAlias.opts = self.get_server_aliases_list()
elif self.vhost.config.ServerAlias:
del self.vhost.config.ServerAlias
self.hack_hosts = self.checkbutton_hosts.get_active()
return
def update_plugin_tab(self, tab):
#print "Update plugin : ", tab
if self.plugins:
for plugin in self.plugins:
try:
if plugin.is_enabled() and plugin._tab_number == tab:
plugin.load_vhost_properties(self.vhost)
except Exception:
traceback.print_exc(file=sys.stdout)
def save_plugin_tab(self, tab):
result = True
error = ""
#print "Save plugin : ", tab
if self.plugins:
for plugin in self.plugins:
try:
if plugin.is_enabled() and plugin._tab_number == tab:
result, error = plugin.update_vhost_properties(self.vhost)
except Exception:
traceback.print_exc(file=sys.stdout)
return result, error
def get_domain (self):
return self.entry_domain.get_text().strip()
#url.lower().startswith('http://')
#url[7:]
def set_default_values_from_domain(self, force_domain=False):
domain = self.get_domain()
# auto set the location
if domain and (not self.entry_location.get_text() or force_domain):
self.entry_location.set_text( "/var/www/%s" % (domain +"/httpdocs" ))
if force_domain and not domain:
self.entry_location.set_text("")
def on_entry_domain_focus_out_event(self, widget, opt):
self.set_default_values_from_domain()
def on_entry_domain_changed(self, unused_widget = None):
widget = self.entry_domain
name = widget.get_text()
if valid_domain_name( name ) or (self.vhost and self.vhost.is_default()):
self.button_save.set_sensitive(True);
else:
self.button_save.set_sensitive(False);
def on_button_location_clear_clicked(self, widget):
self.set_default_values_from_domain(True)
def on_button_location_clicked(self, widget):
chooser = gtk.FileChooserDialog(
title=None,
action=gtk.FILE_CHOOSER_ACTION_CREATE_FOLDER,
buttons=(gtk.STOCK_CANCEL,
gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN,
gtk.RESPONSE_OK))
location = self.entry_location.get_text().strip()
while not Shell.command.exists(location):
location = os.path.abspath(os.path.join(location, os.path.pardir))
if not location:
location = "/var/www"
chooser.set_current_folder(location)
response = chooser.run()
if response == gtk.RESPONSE_OK:
self.entry_location.set_text( chooser.get_filename() )
chooser.destroy()
def on_destroy(self, widget, data=None):
gtk.main_quit()
def on_toolbutton_domain_add_clicked(self, widget):
edw = EditDomainNameWindow(self.entry_domain.get_text().strip())
domain = edw.run()
if domain:
self.treeview_domain_store.append((domain, None))
return
def get_server_aliases_list (self ):
aliases = []
for row in self.treeview_domain_store: aliases.append( row[0] )
return aliases
def on_toolbutton_domain_edit_clicked(self, widget):
model, iter = self.treeview_domain.get_selection().get_selected()
if not iter: return
domain = model.get_value(iter, 0)
edw = EditDomainNameWindow( domain )
result = edw.run()
if result:
self.treeview_domain_store.set_value(iter, 0, edw.return_value)
return
def on_toolbutton_domain_delete_clicked(self, widget):
model, iter = self.treeview_domain.get_selection().get_selected()
if not iter: return
self.treeview_domain_store.remove(iter)
return
def on_button_save_clicked(self, widget):
# Save
result, error = True, ""
if self.__previous_active_tab == self.notebook.get_n_pages() - 1:
result, error = self.save_edit_tab()
elif self.__previous_active_tab == 0:
self.save_domain_tab()
else:
result, error = self.save_plugin_tab(self.__previous_active_tab)
# if errors
if not result:
md = gtk.MessageDialog(self.window, flags=0, type=gtk.MESSAGE_ERROR, buttons=gtk.BUTTONS_OK, message_format=error)
result = md.run()
md.destroy()
return
# All plugins on save
if self.plugins:
for plugin in self.plugins:
try:
if plugin.is_enabled():
res, message = plugin.save_vhost_properties(self.vhost)
if not res:
result = False
if tab_number and plugin._tab_number == tab_number:
self.show_error ( message )
except Exception:
traceback.print_exc(file=sys.stdout)
is_new = self.vhost.is_new
self.vhost.hack_hosts = self.checkbutton_hosts.get_active()
# save over buffer content
self.vhost.save()
#update /etc/hosts only if it's a new vhost
if is_new:
if self.hack_hosts:
#update servername
if self.vhost.config.ServerName and self.vhost.config.ServerName.value:
Shell.command.sudo_execute ( [os.path.join(Configuration.APPPATH, "hosts-manager"), '-a', self.vhost.config.ServerName.value ] )
#add an entry for each host
if self.vhost.config.ServerAlias:
for alias in self.vhost.config.ServerAlias:
Shell.command.sudo_execute ( [os.path.join(Configuration.APPPATH, 'hosts-manager'), '-a', alias ])
# check apache config
returncode, error = self.parent.apache.test_config()
if not returncode:
error = error.strip()
md = gtk.MessageDialog(self.window, flags=0, type=gtk.MESSAGE_ERROR, buttons=gtk.BUTTONS_OK_CANCEL, message_format="Changes have been saved, but an error has been detected: \n\n"+error + "\n\nAre you sure you want to continue? Apache may not start until all errors are resolved.")
result = md.run()
md.destroy()
if result != gtk.RESPONSE_OK:
return
#self.parent.create_vhost_list()
self.parent.refresh_vhosts()
self.parent.please_restart()
self.window.destroy()
def on_button_cancel_clicked(self, widget):
self.window.destroy()
return
def on_button_error_close_clicked(self, widget):
self.clear_error()
def show_error ( self, message ):
self.message_text.set_label( '<b>'+message+'</b>' )
self.error_area.show()
def clear_error ( self):
self.error_area.hide()
|
gpl-3.0
| 3,130,645,934,634,689,000 | 37.911275 | 292 | 0.584485 | false |
bradallred/gemrb
|
gemrb/GUIScripts/bg2/GUICG3.py
|
1
|
3602
|
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#character generation, alignment (GUICG3)
import GemRB
import GUICommon
import CommonTables
from ie_stats import *
from GUIDefines import *
import CharGenCommon
AlignmentWindow = 0
TextAreaControl = 0
DoneButton = 0
MyChar = 0
def OnLoad():
global AlignmentWindow, TextAreaControl, DoneButton
global MyChar
MyChar = GemRB.GetVar ("Slot")
Kit = GUICommon.GetKitIndex (MyChar)
if Kit == 0:
KitName = GUICommon.GetClassRowName (MyChar)
else:
#rowname is just a number, first value row what we need here
KitName = CommonTables.KitList.GetValue(Kit, 0)
AlignmentOk = GemRB.LoadTable("ALIGNMNT")
CommonTables.Aligns = CommonTables.Aligns
AlignmentWindow = GemRB.LoadWindow(3, "GUICG")
CharGenCommon.PositionCharGenWin(AlignmentWindow)
for i in range(9):
Button = AlignmentWindow.GetControl(i+2)
Button.SetFlags(IE_GUI_BUTTON_RADIOBUTTON,OP_OR)
Button.SetState(IE_GUI_BUTTON_DISABLED)
Button.SetText (CommonTables.Aligns.GetValue (i,0))
if AlignmentOk.GetValue(KitName, CommonTables.Aligns.GetValue (i, 4)) != 0:
Button.SetState(IE_GUI_BUTTON_ENABLED)
Button.SetEvent(IE_GUI_BUTTON_ON_PRESS, AlignmentPress)
Button.SetVarAssoc("Alignment", i)
BackButton = AlignmentWindow.GetControl(13)
BackButton.SetText(15416)
BackButton.MakeEscape()
DoneButton = AlignmentWindow.GetControl(0)
DoneButton.SetText(11973)
DoneButton.MakeDefault()
TextAreaControl = AlignmentWindow.GetControl(11)
TextAreaControl.SetText(9602)
DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, NextPress)
BackButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, BackPress)
DoneButton.SetDisabled(True)
AlignmentWindow.Focus()
return
def AlignmentPress():
Alignment = GemRB.GetVar("Alignment")
TextAreaControl.SetText (CommonTables.Aligns.GetValue (Alignment, 1))
DoneButton.SetDisabled(False)
GemRB.SetVar ("Alignment", CommonTables.Aligns.GetValue (Alignment, 3))
return
def BackPress():
if AlignmentWindow:
AlignmentWindow.Unload()
GemRB.SetVar("Alignment",-1) #scrapping the alignment value
GemRB.SetNextScript("CharGen4")
return
def NextPress():
if AlignmentWindow:
AlignmentWindow.Unload()
# save previous stats:
# alignment
# reputation
# alignment abilities
Alignment = GemRB.GetVar ("Alignment")
GemRB.SetPlayerStat (MyChar, IE_ALIGNMENT, Alignment)
# use the alignment to apply starting reputation
RepTable = GemRB.LoadTable ("repstart")
AlignmentAbbrev = CommonTables.Aligns.FindValue (3, Alignment)
Rep = RepTable.GetValue (AlignmentAbbrev, 0) * 10
GemRB.SetPlayerStat (MyChar, IE_REPUTATION, Rep)
# set the party rep if this in the main char
if MyChar == 1:
GemRB.GameSetReputation (Rep)
# diagnostic output
print "CharGen5 output:"
print "\tAlignment: ",Alignment
print "\tReputation: ",Rep
GemRB.SetNextScript("CharGen5") #appearance
return
|
gpl-2.0
| -7,012,643,100,263,473,000 | 29.786325 | 81 | 0.764575 | false |
PaulMcInnis/JobPy
|
jobfunnel/resources/defaults.py
|
1
|
1253
|
"""Default arguments for both JobFunnelConfigManager and CLI arguments.
NOTE: Not all defaults here are used, as we rely on YAML for demo and not kwargs
"""
import os
from pathlib import Path
from jobfunnel.resources.enums import (Locale, DelayAlgorithm, Provider, Remoteness)
DEFAULT_LOG_LEVEL_NAME = 'INFO'
DEFAULT_LOCALE = Locale.CANADA_ENGLISH
DEFAULT_CITY = 'Waterloo'
DEFAULT_PROVINCE = 'ON'
DEFAULT_SEARCH_KEYWORDS = ['Python']
DEFAULT_COMPANY_BLOCK_LIST = []
DEFAULT_SEARCH_RADIUS = 25
DEFAULT_MAX_LISTING_DAYS = 60
DEFAULT_DELAY_MAX_DURATION = 5.0
DEFAULT_DELAY_MIN_DURATION = 1.0
DEFAULT_DELAY_ALGORITHM = DelayAlgorithm.LINEAR
# FIXME: re-enable glassdoor once we fix issue with it. (#87)
DEFAULT_PROVIDERS = [Provider.MONSTER, Provider.INDEED] #, Provider.GLASSDOOR]
DEFAULT_PROVIDER_NAMES = [p.name for p in DEFAULT_PROVIDERS]
DEFAULT_RETURN_SIMILAR_RESULTS = False
DEFAULT_RANDOM_DELAY = False
DEFAULT_RANDOM_CONVERGING_DELAY = False
DEFAULT_REMOTENESS = Remoteness.ANY
# Defaults we use from localization, the scraper can always override it.
DEFAULT_DOMAIN_FROM_LOCALE = {
Locale.CANADA_ENGLISH: 'ca',
Locale.CANADA_FRENCH: 'ca',
Locale.USA_ENGLISH: 'com',
Locale.UK_ENGLISH: 'co.uk',
Locale.FRANCE_FRENCH: 'fr',
}
|
mit
| 7,973,866,357,179,708,000 | 34.8 | 84 | 0.757382 | false |
770352/Dozer
|
dozer/cogs/general.py
|
1
|
11619
|
"""General, basic commands that are common for Discord bots"""
import inspect
import discord
from discord.ext.commands import BadArgument, cooldown, BucketType, Group, has_permissions
from ._utils import *
from .. import db
class General(Cog):
"""General commands common to all Discord bots."""
@command()
async def ping(self, ctx):
"""Check the bot is online, and calculate its response time."""
if ctx.guild is None:
location = 'DMs'
else:
location = 'the **%s** server' % ctx.guild.name
response = await ctx.send('Pong! We\'re in %s.' % location)
delay = response.created_at - ctx.message.created_at
await response.edit(
content=response.content + '\nTook %d ms to respond.' % (delay.seconds * 1000 + delay.microseconds // 1000))
ping.example_usage = """
`{prefix}ping` - Calculate and display the bot's response time
"""
@cooldown(1, 10, BucketType.channel)
@command(name='help', aliases=['about'])
@bot_has_permissions(add_reactions=True, embed_links=True,
read_message_history=True) # Message history is for internals of paginate()
async def base_help(self, ctx, *target):
"""Show this message."""
if not target: # No commands - general help
await self._help_all(ctx)
elif len(target) == 1: # Cog or command
target_name = target[0]
if target_name in ctx.bot.cogs:
await self._help_cog(ctx, ctx.bot.cogs[target_name])
else:
command = ctx.bot.get_command(target_name)
if command is None:
raise BadArgument('that command/cog does not exist!')
else:
await self._help_command(ctx, command)
else: # Command with subcommand
command = ctx.bot.get_command(' '.join(target))
if command is None:
raise BadArgument('that command does not exist!')
else:
await self._help_command(ctx, command)
base_help.example_usage = """
`{prefix}help` - General help message
`{prefix}help help` - Help about the help command
`{prefix}help General` - Help about the General category
"""
async def _help_all(self, ctx):
"""Gets the help message for all commands."""
info = discord.Embed(title='Dozer: Info', description='A guild management bot for FIRST Discord servers',
color=discord.Color.blue())
info.set_thumbnail(url=self.bot.user.avatar_url)
info.add_field(name='About',
value="Dozer: A collaborative bot for FIRST Discord servers, developed by the FRC Discord Server Development Team")
info.add_field(name='About `{}{}`'.format(ctx.prefix, ctx.invoked_with), value=inspect.cleandoc("""
This command can show info for all commands, a specific command, or a category of commands.
Use `{0}{1} {1}` for more information.
""".format(ctx.prefix, ctx.invoked_with)), inline=False)
info.add_field(name='Support',
value="Join our development server at https://discord.gg/bB8tcQ8 for support, to help with development, or if "
"you have any questions or comments!")
info.add_field(name="Open Source",
value="Dozer is open source! Feel free to view and contribute to our Python code "
"[on Github](https://github.com/FRCDiscord/Dozer)")
info.set_footer(text='Dozer Help | all commands | Info page')
await self._show_help(ctx, info, 'Dozer: Commands', '', 'all commands', ctx.bot.commands)
async def _help_command(self, ctx, command):
"""Gets the help message for one command."""
info = discord.Embed(title='Command: {}{}'.format(ctx.prefix, command.signature), description=command.help or (
None if command.example_usage else 'No information provided.'), color=discord.Color.blue())
usage = command.example_usage
if usage is not None:
info.add_field(name='Usage', value=usage.format(prefix=ctx.prefix, name=ctx.invoked_with), inline=False)
info.set_footer(text='Dozer Help | {!r} command | Info'.format(command.qualified_name))
await self._show_help(ctx, info, 'Subcommands: {prefix}{signature}', '', '{command.qualified_name!r} command',
command.commands if isinstance(command, Group) else set(), command=command, signature=command.signature)
async def _help_cog(self, ctx, cog):
"""Gets the help message for one cog."""
await self._show_help(ctx, None, 'Category: {cog_name}', inspect.cleandoc(cog.__doc__ or ''),
'{cog_name!r} category',
(command for command in ctx.bot.commands if command.instance is cog),
cog_name=type(cog).__name__)
async def _show_help(self, ctx, start_page, title, description, footer, commands, **format_args):
"""Creates and sends a template help message, with arguments filled in."""
format_args['prefix'] = ctx.prefix
footer = 'Dozer Help | {} | Page {}'.format(footer,
'{page_num} of {len_pages}')
# Page info is inserted as a parameter so page_num and len_pages aren't evaluated now
if commands:
command_chunks = list(chunk(sorted(commands, key=lambda cmd: cmd.name), 4))
format_args['len_pages'] = len(command_chunks)
pages = []
for page_num, page_commands in enumerate(command_chunks):
format_args['page_num'] = page_num + 1
page = discord.Embed(title=title.format(**format_args), description=description.format(**format_args), color=discord.Color.blue())
for command in page_commands:
if command.short_doc:
embed_value = command.short_doc
elif command.example_usage: # Usage provided - show the user the command to see it
embed_value = 'Use `{0.prefix}{0.invoked_with} {1.qualified_name}` for more information.'.format(
ctx, command)
else:
embed_value = 'No information provided.'
page.add_field(name=ctx.prefix + command.signature, value=embed_value, inline=False)
page.set_footer(text=footer.format(**format_args))
pages.append(page)
if start_page is not None:
pages.append({'info': start_page})
if len(pages) == 1:
await ctx.send(embed=pages[0])
elif start_page is not None:
info_emoji = '\N{INFORMATION SOURCE}'
p = Paginator(ctx, (info_emoji, ...), pages, start='info',
auto_remove=ctx.channel.permissions_for(ctx.me))
async for reaction in p:
if reaction == info_emoji:
p.go_to_page('info')
else:
await paginate(ctx, pages, auto_remove=ctx.channel.permissions_for(ctx.me))
elif start_page: # No commands - command without subcommands or empty cog - but a usable info page
await ctx.send(embed=start_page)
else: # No commands, and no info page
format_args['len_pages'] = 1
format_args['page_num'] = 1
embed = discord.Embed(title=title.format(**format_args), description=description.format(**format_args), color=discord.Color.blue())
embed.set_footer(text=footer.format(**format_args))
await ctx.send(embed=embed)
@has_permissions(change_nickname=True)
@command()
async def nick(self, ctx, *, nicktochangeto):
"""Allows a member to change their nickname."""
await discord.Member.edit(ctx.author, nick=nicktochangeto[:32])
await ctx.send("Nick successfully changed to " + nicktochangeto[:32])
if len(nicktochangeto) > 32:
await ctx.send("Warning: truncated nickname to 32 characters")
@command()
async def invite(self, ctx):
"""
Display the bot's invite link.
The generated link gives all permissions the bot requires. If permissions are removed, some commands will be unusable.
"""
perms = 0
for cmd in ctx.bot.walk_commands():
perms |= cmd.required_permissions.value
await ctx.send('<{}>'.format(discord.utils.oauth_url(ctx.me.id, discord.Permissions(perms))))
@has_permissions(create_instant_invite=True)
@bot_has_permissions(create_instant_invite=True)
@command()
async def invites(self, ctx, num, hours=24):
"""
Generates a set number of single use invites.
"""
with db.Session() as session:
settings = session.query(WelcomeChannel).filter_by(id=ctx.guild.id).one_or_none()
if settings is None:
await ctx.send(
"There is no welcome channel set. Please set one using `{0}welcomeconifg channel` and try again.".format(
ctx.prefix))
return
else:
invitechannel = ctx.bot.get_channel(settings.channel_id)
if invitechannel is None:
await ctx.send(
"There was an issue getting your welcome channel. Please set it again using `{0} welcomeconfig channel`.".format(
ctx.prefix))
return
text = ""
for i in range(int(num)):
invite = await invitechannel.create_invite(max_age=hours * 3600, max_uses=1, unique=True,
reason="Autogenerated by {}".format(ctx.author))
text += "Invite {0}: <{1}>\n".format(i + 1, invite.url)
await ctx.send(text)
invites.example_usage = """
`{prefix}invtes 5` - Generates 5 single use invites.
`{prefix}invites 2 12` Generates 2 single use invites that last for 12 hours.
"""
@command()
@has_permissions(administrator=True)
async def welcomeconfig(self, ctx, *, welcome_channel: discord.TextChannel):
"""
Sets the new member channel for this guild.
"""
if welcome_channel.guild != ctx.guild:
await ctx.send("That channel is not in this guild.")
return
with db.Session() as Session:
settings = Session.query(WelcomeChannel).filter_by(id=ctx.guild.id).one_or_none()
if settings is None:
settings = WelcomeChannel(id=ctx.guild.id, channel_id=welcome_channel.id)
Session.add(settings)
else:
settings.member_role = welcome_channel.id
await ctx.send("Welcome channel set to {}".format(welcome_channel.mention))
welcomeconfig.example_usage = """
`{prefix}welcomeconfig #new-members` - Sets the invite channel to #new-members.
"""
def setup(bot):
"""Adds the general cog to the bot"""
bot.remove_command('help')
bot.add_cog(General(bot))
class WelcomeChannel(db.DatabaseObject):
"""Maintains a list of channels for welcome messages"""
__tablename__ = 'welcome_channel'
id = db.Column(db.BigInteger, primary_key=True)
channel_id = db.Column(db.BigInteger, nullable=True)
|
gpl-3.0
| 4,882,674,967,214,114,000 | 48.866953 | 146 | 0.58542 | false |
schneegor/WeatherServer
|
weather_server/show_weather/models.py
|
1
|
2423
|
# -*- coding: utf-8 -*-
"""
WeatherServer
Copyright (C) 2015 Full Stack Embedded
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from django.db import models
# TODO: tostring methods, also for obs
class Station(models.Model):
"""Metadata for the observing station."""
#: Unique station identifier
station_id = models.IntegerField()
#: Station's longitude in WGS84
longitude = models.DecimalField(max_digits=7, decimal_places=4)
#: Station's latitude in WGS84
latitude = models.DecimalField(max_digits=6, decimal_places=4)
#: Station's elevation over mean sea level in WGS84
elevation = models.FloatField()
#: Station's informal name
name = models.CharField(max_length=80)
#: Date of station activation.
activated = models.DateTimeField('Station activated')
#: Station's deactivation date. A reactivated station is a new station.
deactivated = models.DateTimeField('Station deactivated',
blank=True,
null=True)
description = models.CharField(max_length=200)
class Observation(models.Model):
"""
Weather observation.
Observations are always in SI units.
"""
obs_date = models.DateTimeField('observation date')
#: Observing station
station = models.ForeignKey(Station)
temperature = models.DecimalField(max_digits=5, decimal_places=2)
#: In %
relative_humidity = models.DecimalField(max_digits=3, decimal_places=1)
#: In mm
precipitation = models.IntegerField()
#: In m/s
wind_speed = models.DecimalField(max_digits=5, decimal_places=2)
#: In degrees clockwise from cartographic north
wind_direction = models.IntegerField()
#: In hPa
pressure = models.IntegerField()
|
gpl-2.0
| 7,606,419,842,446,554,000 | 35.712121 | 75 | 0.704911 | false |
metglobal/django-exchange
|
exchange/conversion.py
|
1
|
4152
|
from collections import namedtuple
from operator import itemgetter
from datetime import timedelta
from django.conf import settings
from exchange.adapters import BaseAdapter
from exchange.utils import import_class, memoize
from exchange.models import ExchangeRate
from exchange.cache import (update_rates_cached, get_rate_cached,
get_rates_cached, CACHE_ENABLED, set_cached_rate)
Price = namedtuple('Price', ('value', 'currency'))
EXCHANGE_ADAPTER_CLASS_KEY = 'EXCHANGE_ADAPTER_CLASS'
EXCHANGE_DEFAULT_ADAPTER_CLASS = \
'exchange.adapters.openexchangerates.OpenExchangeRatesAdapter'
def update_rates(adapter_class_name=None):
adapter_class_name = (adapter_class_name or
getattr(settings,
EXCHANGE_ADAPTER_CLASS_KEY,
EXCHANGE_DEFAULT_ADAPTER_CLASS))
adapter_class = import_class(adapter_class_name)
adapter = adapter_class()
if not isinstance(adapter, BaseAdapter):
raise TypeError("invalid adapter class: %s" % adapter_class_name)
adapter.update()
if CACHE_ENABLED:
update_rates_cached()
def convert_values(args_list):
"""convert_value in bulk.
:param args_list: list of value, source, target currency pairs
:return: map of converted values
"""
rate_map = get_rates(map(itemgetter(1, 2), args_list))
value_map = {}
for value, source, target in args_list:
args = (value, source, target)
if source == target:
value_map[args] = value
else:
value_map[args] = value * rate_map[(source, target)]
return value_map
def get_rates(currencies):
sources = []
targets = []
if CACHE_ENABLED:
rate_map = get_rates_cached(currencies)
for (source, target), rate in rate_map.items():
if not rate:
sources.append(source)
targets.append(target)
else:
rate_map = {c: None for c in currencies}
sources = map(itemgetter(0), currencies)
targets = map(itemgetter(1), currencies)
rates = ExchangeRate.objects.filter(
source__code__in=sources,
target__code__in=targets).values_list(
'source__code',
'target__code',
'rate')
for source, target, rate in rates:
key = (source, target)
# Some other combinations that are not in currencies originally
# may have been fetched from the query
if key in rate_map:
rate_map[key] = rate
return rate_map
@memoize(ttl=timedelta(minutes=1))
def get_rate(source_currency, target_currency):
rate = None
if CACHE_ENABLED:
rate = get_rate_cached(source_currency, target_currency)
if not rate:
rate = ExchangeRate.objects.get_rate(source_currency, target_currency)
if CACHE_ENABLED:
set_cached_rate(source_currency, target_currency, rate)
return rate
def convert_value(value, source_currency, target_currency):
"""Converts the price of a currency to another one using exchange rates
:param price: the price value
:param type: decimal
:param source_currency: source ISO-4217 currency code
:param type: str
:param target_currency: target ISO-4217 currency code
:param type: str
:returns: converted price instance
:rtype: ``Price``
"""
# If price currency and target currency is same
# return given currency as is
if source_currency == target_currency:
return value
rate = get_rate(source_currency, target_currency)
return value * rate
def convert(price, currency):
"""Shorthand function converts a price object instance of a source
currency to target currency
:param price: the price value
:param type: decimal
:param currency: target ISO-4217 currency code
:param type: str
:returns: converted price instance
:rtype: ``Price``
"""
# If price currency and target currency is same
# return given currency as is
value = convert_value(price.value, price.currency, currency)
return Price(value, currency)
|
mit
| -6,385,014,705,637,827,000 | 28.034965 | 78 | 0.650048 | false |
mylokin/schematec
|
tests/test_schema_array.py
|
1
|
2059
|
from __future__ import absolute_import
import pytest
import schematec.schema
import schematec.converters as converters
import schematec.validators as validators
import schematec.exc as exc
def test_empty_schema_with_empty_value():
schema = schematec.schema.array()
assert schema([]) == []
def test_empty_schema_with_non_empty_value():
schema = schematec.schema.array()
assert schema([1]) == [1]
def test_schema_with_missed_keys():
schema = schematec.schema.array(converters.string)
assert schema([1]) == ['1']
def test_integer_to_string_converter():
schema = schematec.schema.array(converters.string)
assert schema([1]) == ['1']
def test_integer_to_integer_converter():
schema = schematec.schema.array(converters.integer)
assert schema([1]) == [1]
def test_bound_validator_skipped():
schema = schematec.schema.array(validators.length(3))
assert schema([1]) == [1]
def test_bound_validator():
schema = schematec.schema.array(validators.length(3))
assert schema(['1']) == ['1']
def test_bound_validator_error():
schema = schematec.schema.array(validators.length(3))
with pytest.raises(exc.ValidationError):
schema(['1234'])
def test_schema_with_converters_and_validators():
schema = schematec.schema.array(converters.string & validators.length(3))
assert schema([123]) == ['123']
def test_schema_with_converters_and_validators_fail_on_convertation():
schema = schematec.schema.array(converters.string & validators.length(3))
with pytest.raises(exc.ConvertationError):
schema([None])
def test_schema_with_converters_and_validators_fail_on_length():
schema = schematec.schema.array(converters.string & validators.length(3))
with pytest.raises(exc.ValidationError):
schema(['1234'])
def test_schema_with_converters_and_validators_fail_on_length_for_various_values():
schema = schematec.schema.array(converters.string & validators.length(3))
with pytest.raises(exc.ValidationError):
schema(['123', '1234'])
|
mit
| -4,460,862,772,345,532,400 | 26.092105 | 83 | 0.70374 | false |
sekikn/ambari
|
ambari-common/src/main/python/resource_management/libraries/functions/security_commons.py
|
2
|
11845
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime, timedelta
from resource_management import Execute, File
from tempfile import mkstemp
import os
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
from resource_management.core.source import StaticFile
FILE_TYPE_XML = 'XML'
FILE_TYPE_PROPERTIES = 'PROPERTIES'
FILE_TYPE_JAAS_CONF = 'JAAS_CONF'
# The property name used by the hadoop credential provider
HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME = 'hadoop.security.credential.provider.path'
# Copy JCEKS provider to service specific location and update the ACL
def update_credential_provider_path(config, config_type, dest_provider_path, file_owner, file_group, use_local_jceks=False):
"""
Copies the JCEKS file for the specified config from the default location to the given location,
and sets the ACLs for the specified owner and group. Also updates the config type's configuration
hadoop credential store provider with the copied file name.
:param config: configurations['configurations'][config_type]
:param config_type: Like hive-site, oozie-site, etc.
:param dest_provider_path: The full path to the file where the JCEKS provider file is to be copied to.
:param file_owner: File owner
:param file_group: Group
:return: A copy of the config that was modified or the input config itself if nothing was modified.
"""
# Get the path to the provider <config_type>.jceks
if HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME in config:
provider_path = config[HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME]
src_provider_path = provider_path[len('jceks://file'):]
File(dest_provider_path,
owner = file_owner,
group = file_group,
mode = 0640,
content = StaticFile(src_provider_path)
)
# make a copy of the config dictionary since it is read-only
config_copy = config.copy()
# overwrite the provider path with the path specified
if use_local_jceks:
config_copy[HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME] = 'localjceks://file{0}'.format(dest_provider_path)
else:
config_copy[HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME] = 'jceks://file{0}'.format(dest_provider_path)
return config_copy
return config
def validate_security_config_properties(params, configuration_rules):
"""
Generic security configuration validation based on a set of rules and operations
:param params: The structure where the config parameters are held
:param configuration_rules: A structure containing rules and expectations,
Three types of checks are currently supported by this method:
1. value_checks - checks that a certain value must be set
2. empty_checks - checks that the property values must not be empty
3. read_checks - checks that the value represented by the property describes a readable file on the filesystem
:return: Issues found - should be empty if all is good
"""
issues = {}
for config_file, rule_sets in configuration_rules.iteritems():
# Each configuration rule set may have 0 or more of the following rule sets:
# - value_checks
# - empty_checks
# - read_checks
try:
# Each rule set has at least a list of relevant property names to check in some way
# The rule set for the operation of 'value_checks' is expected to be a dictionary of
# property names to expected values
actual_values = params[config_file] if config_file in params else {}
# Process Value Checks
# The rules are expected to be a dictionary of property names to expected values
rules = rule_sets['value_checks'] if 'value_checks' in rule_sets else None
if rules:
for property_name, expected_value in rules.iteritems():
actual_value = get_value(actual_values, property_name, '')
if actual_value != expected_value:
issues[config_file] = "Property %s contains an unexpected value. " \
"Expected/Actual: %s/%s" \
% (property_name, expected_value, actual_value)
# Process Empty Checks
# The rules are expected to be a list of property names that should not have empty values
rules = rule_sets['empty_checks'] if 'empty_checks' in rule_sets else None
if rules:
for property_name in rules:
actual_value = get_value(actual_values, property_name, '')
if not actual_value:
issues[config_file] = "Property %s must exist and must not be empty" % property_name
# Process Read Checks
# The rules are expected to be a list of property names that resolve to files names and must
# exist and be readable
rules = rule_sets['read_checks'] if 'read_checks' in rule_sets else None
if rules:
for property_name in rules:
actual_value = get_value(actual_values, property_name, None)
if not actual_value:
issues[config_file] = "Property %s does not exist" % property_name
elif not os.path.isfile(actual_value):
issues[config_file] = "Property %s points to an inaccessible file - %s" % (property_name, actual_value)
except Exception as e:
issues[config_file] = "Exception occurred while validating the config file\nCauses: %s" % str(e)
return issues
def build_expectations(config_file, value_checks, empty_checks, read_checks):
"""
Helper method used to build the check expectations dict
:return:
"""
configs_expectations = {}
configs_expectations[config_file] = {}
if value_checks:
configs_expectations[config_file]['value_checks'] = value_checks
if empty_checks:
configs_expectations[config_file]['empty_checks'] = empty_checks
if read_checks:
configs_expectations[config_file]['read_checks'] = read_checks
return configs_expectations
def get_params_from_filesystem(conf_dir, config_files):
"""
Used to retrieve properties from xml config files and build a dict
The dictionary of configuration files to file types should contain one of the following values"
'XML'
'PROPERTIES'
:param conf_dir: directory where the configuration files sit
:param config_files: dictionary of configuration file names to (supported) file types
:return: a dictionary of config-type to a dictionary of key/value pairs for
"""
result = {}
from xml.etree import ElementTree as ET
import ConfigParser, StringIO
import re
for config_file, file_type in config_files.iteritems():
file_name, file_ext = os.path.splitext(config_file)
config_filepath = conf_dir + os.sep + config_file
if not os.path.isfile(config_filepath):
continue
if file_type == FILE_TYPE_XML:
configuration = ET.parse(config_filepath)
props = configuration.getroot().getchildren()
config_file_id = file_name if file_name else config_file
result[config_file_id] = {}
for prop in props:
result[config_file_id].update({prop[0].text: prop[1].text})
elif file_type == FILE_TYPE_PROPERTIES:
with open(config_filepath, 'r') as f:
config_string = '[root]\n' + f.read()
ini_fp = StringIO.StringIO(re.sub(r'\\\s*\n', '\\\n ', config_string))
config = ConfigParser.RawConfigParser()
config.readfp(ini_fp)
props = config.items('root')
result[file_name] = {}
for key, value in props:
result[file_name].update({key : value})
elif file_type == FILE_TYPE_JAAS_CONF:
section_header = re.compile('^(\w+)\s+\{\s*$')
section_data = re.compile('^\s*([^ \s\=\}\{]+)\s*=?\s*"?([^ ";]+)"?;?\s*$')
section_footer = re.compile('^\}\s*;?\s*$')
section_name = "root"
result[file_name] = {}
with open(config_filepath, 'r') as f:
for line in f:
if line:
line = line.strip()
m = section_header.search(line)
if m:
section_name = m.group(1)
if section_name not in result[file_name]:
result[file_name][section_name] = {}
else:
m = section_footer.search(line)
if m:
section_name = "root"
else:
m = section_data.search(line)
if m:
result[file_name][section_name][m.group(1)] = m.group(2)
return result
def cached_kinit_executor(kinit_path, exec_user, keytab_file, principal, hostname, temp_dir,
expiration_time=5):
"""
Main cached kinit executor - Uses a temporary file on the FS to cache executions. Each command
will have its own file and only one entry (last successful execution) will be stored
"""
key = str(hash("%s|%s" % (principal, keytab_file)))
filename = key + "_tmp.txt"
file_path = temp_dir + os.sep + "kinit_executor_cache"
output = None
# First execution scenario dir file existence check
if not os.path.exists(file_path):
os.makedirs(file_path)
file_path += os.sep + filename
# If the file does not exist create before read
if not os.path.isfile(file_path):
with open(file_path, 'w+') as new_file:
new_file.write("{}")
try:
with open(file_path, 'r') as cache_file:
output = json.load(cache_file)
except:
# In the extraordinary case the temporary file gets corrupted the cache should be reset to avoid error loop
with open(file_path, 'w+') as cache_file:
cache_file.write("{}")
if (not output) or (key not in output) or ("last_successful_execution" not in output[key]):
new_cached_exec(key, file_path, kinit_path, temp_dir, exec_user, keytab_file, principal, hostname)
else:
last_run_time = output[key]["last_successful_execution"]
now = datetime.now()
if (now - datetime.strptime(last_run_time, "%Y-%m-%d %H:%M:%S.%f") > timedelta(minutes=expiration_time)):
new_cached_exec(key, file_path, kinit_path, temp_dir, exec_user, keytab_file, principal, hostname)
def new_cached_exec(key, file_path, kinit_path, temp_dir, exec_user, keytab_file, principal, hostname):
"""
Entry point of an actual execution - triggered when timeout on the cache expired or on fresh execution
"""
now = datetime.now()
temp_kinit_cache_fd, temp_kinit_cache_filename = mkstemp(dir=temp_dir)
command = "%s -c %s -kt %s %s" % \
(kinit_path, temp_kinit_cache_filename, keytab_file,
principal.replace("_HOST", hostname))
os.close(temp_kinit_cache_fd)
try:
# Ensure the proper user owns this file
File(temp_kinit_cache_filename, owner=exec_user, mode=0600)
# Execute the kinit
Execute(command, user=exec_user)
with open(file_path, 'w+') as cache_file:
result = {key: {"last_successful_execution": str(now)}}
json.dump(result, cache_file)
finally:
File(temp_kinit_cache_filename, action='delete')
def get_value(values, property_path, default_value):
names = property_path.split('/')
current_dict = values
for name in names:
if name in current_dict:
current_dict = current_dict[name]
else:
return default_value
return current_dict
|
apache-2.0
| -8,151,399,823,975,610,000 | 39.986159 | 128 | 0.678345 | false |
acigna/pywez
|
zsi/test/test_t2.py
|
1
|
3220
|
#!/usr/bin/env python
import sys
import unittest
from ZSI import TC, ParsedSoap, ParseException, FaultFromZSIException, FaultFromException, SoapWriter
class t2TestCase(unittest.TestCase):
"Test case wrapper for old ZSI t2 test case"
def checkt2(self):
try:
ps = ParsedSoap(IN)
except ParseException, e:
print >>OUT, FaultFromZSIException(e).AsSOAP()
self.fail()
except Exception, e:
# Faulted while processing; assume it's in the
# header.
print >>OUT, FaultFromException(e, 1).AsSOAP()
self.fail()
# We are not prepared to handle any actors or mustUnderstand elements.
# Arbitrary fault back with the first one found.
a = ps.WhatActorsArePresent()
if len(a):
print >>OUT, FaultFromActor(a[0]).AsSOAP()
self.fail()
mu = ps.WhatMustIUnderstand()
if len(mu):
uri, localname = mu[0]
print >>OUT, FaultFromNotUnderstood(uri, localname).AsSOAP()
self.fail()
try:
player = ps.Parse(Player)
except EvaluateException, e:
print >>OUT, FaultFromZSIException(e).AsSOAP()
self.fail()
try:
import operator
total = reduce(operator.add, player.Scores, 0)
result = Average(foo(total, len(player.Scores)))
sw = SoapWriter().serialize(result)
str(sw)
#print >>OUT, str(sw)
except Exception, e:
print >>OUT, FaultFromException(e, 0, sys.exc_info()[2]).AsSOAP()
self.fail()
def makeTestSuite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(t2TestCase, "check"))
return suite
class Player:
'''Input class.'''
def __init__(self, name=None):
pass
Player.typecode = TC.Struct(Player, [ TC.String('Name', optional=1),
TC.Array('xsd:integer', TC.Integer(),
'Scores'), ], 'GetAverage')
class Average:
'''Output class.'''
def __init__(self, average):
self.average = average
Average.typecode = TC.Struct(Average, [ TC.Integer('average'),
], 'GetAverageResponse', inline=1)
def bar(total, len):
return total / len
def foo(total, len):
return bar(total, len)
OUT = sys.stdout
IN='''<SOAP-ENV:Envelope
xmlns="http://www.example.com/schemas/TEST"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:ZSI="http://www.zolera.com/schemas/ZSI/">
<SOAP-ENV:Header>
<trans SOAP-ENV:mustUnderstand="0"/>
</SOAP-ENV:Header>
<SOAP-ENV:Body>
<GetAverage>
<Scores SOAP-ENC:arrayType="xsd:integer">
<i>84</i>
<xxi>101</xxi>
<foi>200</foi>
<izzz>4</izzz>
</Scores>
<Name>John Doe</Name>
</GetAverage>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>'''
def main():
unittest.main(defaultTest="makeTestSuite")
if __name__ == "__main__" : main()
|
mit
| 1,620,134,022,374,674,400 | 28.814815 | 101 | 0.557764 | false |
tensorflow/deepmath
|
deepmath/deephol/utilities/proof_checker_lib.py
|
1
|
10623
|
"""Exports proof logs to OCaml files to be loaded by HOL Light.
Processes multiple proof logs, but can generate at most one proof per theorem.
"""
from __future__ import absolute_import
from __future__ import division
# Import Type Annotations
from __future__ import print_function
import tensorflow as tf
from typing import Dict, Iterable, List, Text
from deepmath.deephol import deephol_pb2
from deepmath.deephol.utilities import proof_analysis
from deepmath.deephol import theorem_fingerprint
from deepmath.proof_assistant import proof_assistant_pb2
class ProofFailedError(Exception):
pass
def put_in_quotes(s: Text):
return '"%s"' % s
def _tactic_string_to_ocaml(tactic_string: Text) -> Text:
return 'Parse_tactic.parse ' + put_in_quotes(tactic_string)
def tactic_application_to_string(t_app: deephol_pb2.TacticApplication) -> Text:
"""Generate tactic strings.
Args:
t_app: TacticApplication proto
Returns:
tactic string; to be parsed by third_party/hol_light/parse_tactic.ml
Raises:
ProofFailedError: When invariants of the tactic application are not met.
"""
tactic_str = str(t_app.tactic)
for i, param in enumerate(t_app.parameters):
tactic_str += ' '
if param.parameter_type == deephol_pb2.Tactic.UNKNOWN:
if not param.unknown:
raise ProofFailedError(
'No (or empty) parameter UNKNOWN given for parameter '
'index %d of tactic %s' % (i, t_app.tactic))
tactic_str += str(param.unknown)
elif param.parameter_type == deephol_pb2.Tactic.TERM:
if not param.term:
raise ProofFailedError('Tactic %s expected term at parameter index %d' %
(t_app.tactic, i))
tactic_str += str(param.term)
elif param.parameter_type == deephol_pb2.Tactic.THEOREM:
if not param.theorems or len(param.theorems) != 1:
raise ProofFailedError(
'Tactic %s expected single theorem at parameter index %d' %
(t_app.tactic, i))
tactic_str += theorem_fingerprint.ToTacticArgument(param.theorems[0])
elif param.parameter_type == deephol_pb2.Tactic.THEOREM_LIST:
if not param.theorems:
tactic_str += '[ ]'
else:
tactic_str += str('[ %s ]' % ' ; '.join([
theorem_fingerprint.ToTacticArgument(thm) for thm in param.theorems
]))
else:
raise ProofFailedError('Unsupported param type: %s' %
str(param.parameter_type))
return tactic_str
def proof_log_as_dict(log: deephol_pb2.ProofLog
) -> Dict[int, deephol_pb2.ProofNode]:
"""Turns proof log into a dictionary."""
d = {}
for node in log.nodes:
fingerprint = theorem_fingerprint.Fingerprint(node.goal)
if fingerprint in d:
raise ValueError('Duplicate subgoal in fingerprint. Ignoring')
d[fingerprint] = node
return d
def proof_linearization(proof_log: deephol_pb2.ProofLog
) -> List[deephol_pb2.TacticApplication]:
"""Turns a proof into a list of tactic applications."""
if not proof_log.HasField('theorem_in_database'):
raise ValueError('Proof log requires field theorem_in_database')
node_dict = proof_log_as_dict(proof_log)
fingerprint = theorem_fingerprint.Fingerprint(proof_log.theorem_in_database)
if fingerprint not in node_dict:
raise ValueError(
'Fingerprint of proof_log.theorem_in_database missing in the proof log.'
)
# Compute a linearization of the tactic applications in left-first order.
tactics = []
open_goals = [proof_log.theorem_in_database]
visited = set()
while open_goals:
goal = open_goals.pop()
fingerprint = theorem_fingerprint.Fingerprint(goal)
if fingerprint in visited:
raise ProofFailedError('Cycle detected!')
visited.add(fingerprint)
try:
proofnode = node_dict[fingerprint]
except KeyError:
raise ProofFailedError('Subgoal not found in proof log: %s.' % str(goal))
if not proofnode.proofs:
raise ProofFailedError('No tactic app found for goal %s' % str(goal))
if len(proofnode.proofs) > 1:
tf.logging.warning('Multiple proofs detected for goal; ignoring all but '
'the first one.')
tactic_application = proofnode.proofs[0] # only checking the first one
tactics.append(tactic_application)
subgoals = list(tactic_application.subgoals) # create a copy
subgoals.reverse() # to enable getting next goal with subgoals.pop()
open_goals.extend(subgoals)
return tactics
def ocaml_proof(proof_log: deephol_pb2.ProofLog) -> List[Text]:
"""Turns a proof log into OCaml code.
Args:
proof_log: Must contain exactly one proof of the given theorem.
Returns:
OCaml code for the proof.
Raises:
ProofFailedError: If an error in the proof is detected.
ValueError: If an error in the checking logic is detected.
"""
if not proof_log.HasField('theorem_in_database'):
raise ValueError('Expected field proof_log.theorem_in_database to be set.')
theorem = proof_log.theorem_in_database
lines = ['']
if theorem.pretty_printed:
# Quotes around the expression are necessary to avoid
# interpretation of '(*' and '*)' as nested comments.
lines.append('(* "%s" *)' % theorem.pretty_printed)
lines.append('')
tactics = proof_linearization(proof_log)
ocaml_parsed_tactics = [
_tactic_string_to_ocaml(tactic_application_to_string(tactic))
for tactic in tactics
]
proof = ' THEN\n '.join(ocaml_parsed_tactics)
quoted_hypotheses = map(put_in_quotes, theorem.hypotheses)
wrapped_proof = 'fun () ->\n decode_goal [%s] "%s",\n %s' % (
'; '.join(quoted_hypotheses), theorem.conclusion, proof)
in_core = 'true' if 'core' in theorem.library_tag else 'false'
lines.append('register_proof %d (\n %s) %s;;' %
(theorem.goal_fingerprint, wrapped_proof, in_core))
return lines
def ocaml_proof_header():
"""Creates the prelude to the OCaml file; enabling the proofs to be loaded."""
return [
'set_jrh_lexer;;', 'open Lib;;', 'open Printer;;',
'open Theorem_fingerprint;;', 'open Import_proofs;;', 'open Tactics;;',
'', 'Printer.current_encoding := Printer.Sexp;;', ''
]
def verify(proof_logs: Iterable[deephol_pb2.ProofLog],
theorem_database: proof_assistant_pb2.TheoremDatabase) -> Text:
"""Generates an OCaml file of proofs for HOL Light to replay.
Args:
proof_logs: Proofs to be checked; assumes the top theorem is the first node
of each proof log, and that there is at most one proof log for each
theorem.
theorem_database: list of theorems and definitions
Returns:
An OCaml file as string.
Raises:
ValueError: If the proof logs could not be converted to OCaml.
"""
proof_logs_processed = 0
proof_logs_with_closed_proofs = 0
proof_logs_without_proof = 0
theorems_with_closed_proofs = 0
successful_proofs = 0
failed_proofs = 0
missing_proofs = 0
missing_in_database = 0
duplicate_proofs = 0
# Prepare theorem databse for efficient lookup
theorem_database_fingerprints = {
theorem_fingerprint.Fingerprint(t) for t in theorem_database.theorems
}
# Count closed proofs in proof log and index by fingerprint of theorems
proof_logs_dict = {}
for log in proof_logs:
proof_logs_processed += 1
if not log.nodes or log.nodes[0].status != deephol_pb2.ProofNode.PROVED:
proof_logs_without_proof += 1
continue
proof_logs_with_closed_proofs += 1
# Ensure consistency of log.nodes[0] and log.theorem_in_database
node0_is_thm = log.nodes[0].goal.tag == proof_assistant_pb2.Theorem.THEOREM
if not node0_is_thm and not log.HasField('theorem_in_database'):
raise ValueError('Not sure which theorem this log proves.')
if not log.HasField('theorem_in_database'):
log.theorem_in_database.CopyFrom(log.nodes[0].goal)
# Start the actual loop logic
fingerprint = theorem_fingerprint.Fingerprint(log.theorem_in_database)
if fingerprint in proof_logs_dict:
tf.logging.warning(
'Can generate at most one OCaml proof per theorem. '
'Dectected an additional proof for fingerprint %d.\n\n%s',
fingerprint, str(log.nodes[0].goal))
duplicate_proofs += 1
continue
proof_logs_dict[fingerprint] = log
theorems_with_closed_proofs += 1
if fingerprint not in theorem_database_fingerprints:
missing_in_database += 1
# MAIN LOOP
lines = ocaml_proof_header()
for theorem in theorem_database.theorems:
# Find theorem and its proof in the proof logs
fingerprint = theorem_fingerprint.Fingerprint(theorem)
try:
proof_log = proof_logs_dict[fingerprint]
except KeyError:
continue
try:
# Extract a single proof from the proof log
extracted = proof_analysis.extract_proof(proof_log)
if not extracted:
raise ValueError('Proof log claims a closed proof for '
'fingerprint %d, but no proof could be '
'extracted' % fingerprint)
lines.extend(ocaml_proof(extracted))
successful_proofs += 1
except ProofFailedError as e:
tf.logging.error('Proof of %s failed: %s',
theorem_fingerprint.ToTacticArgument(theorem), str(e))
failed_proofs += 1
# Detailed stats
tf.logging.info('PROOF LOG STATS')
tf.logging.info('Proof logs processed: %d', proof_logs_processed)
tf.logging.info('Proof logs without proofs: %d', proof_logs_without_proof)
tf.logging.info('Proof logs with closed proofs: %d',
proof_logs_with_closed_proofs)
tf.logging.info('PROOF STATS')
tf.logging.info('Successful proofs: %d', successful_proofs)
tf.logging.info('Missing proofs: %d', missing_proofs)
tf.logging.info('Failed proofs: %d', failed_proofs)
tf.logging.info('Theorems with proofs in proof logs: %d',
theorems_with_closed_proofs)
if duplicate_proofs:
tf.logging.warning('Proofs in proof logs that were ignored: %d',
duplicate_proofs)
if missing_in_database:
tf.logging.warning(
'Found a proof for a theorem that is not in the theorem database',
missing_in_database)
if successful_proofs + failed_proofs != theorems_with_closed_proofs:
raise ValueError('Internal error in the proof checker. Number of theorems '
'checked did not match the proof log.')
if successful_proofs < theorems_with_closed_proofs or failed_proofs > 0:
tf.logging.warning('Proof log could NOT be verified.')
return '\n'.join(lines)
|
apache-2.0
| 2,743,703,238,344,068,600 | 36.013937 | 80 | 0.674574 | false |
AuxinJeron/ACS-VRP
|
src/VRPCenter.py
|
1
|
2479
|
from AntColony import AntColony
from AntGraph import AntGraph
from TspPainter import tspPainter
import logging
logger = logging.getLogger("logger")
class VRPCenter:
def __init__(self, tspparser):
self.build_graph(tspparser)
def build_graph(self, tspparser):
self.antGraph = AntGraph(tspparser.cities_coord)
self.lockers = tspparser.lockers
self.lockers_dict = {}
self.delivers_dict = {}
for locker in self.lockers:
self.lockers_dict[locker.id] = locker
self.delivers = tspparser.delivers
for deliver in self.delivers:
self.delivers_dict[deliver.id] = deliver
self.demands = tspparser.demands
self.build_nearest_locker()
def build_nearest_locker(self):
for deliver in self.delivers:
deliver.locker_id = deliver.nearest_locker(self.lockers, self.antGraph.nodes_mat)
locker = self.lockers_dict[deliver.locker_id]
locker.delivers.append(deliver.id)
def start(self):
antColony = AntColony(self.antGraph, self.lockers, self.lockers_dict, self.delivers, self.delivers_dict, self.demands, 10, 250)
antColony.start()
best_path_routes = antColony.best_path_routes
best_path_cost = antColony.best_path_cost
logger.info("-------------------------------------------")
logger.info("Problem optimization result")
logger.info("-------------------------------------------")
if best_path_routes != None:
logger.info("Best path routes found is")
for key in best_path_routes.keys():
logger.info("Deliver {} {}".format(key, best_path_routes[key]))
logger.info("Locker scheme is")
for locker in self.lockers:
logger.info("Locker {} scheme: {}".format(locker.id, self.locker_scheme(locker, best_path_routes)))
logger.info("cost : {}".format(best_path_cost))
tspPainter.drawRoutes(best_path_routes)
else:
logger.info("Failed to path routes")
input("Press Enter to quit...")
def locker_scheme(self, locker, path_routes):
capacity = 0
for deliver_id in locker.delivers:
if deliver_id in path_routes.keys():
path = path_routes[deliver_id]
for pack in path:
capacity += pack.capacity
capacity += self.demands[locker.pos]
return capacity
|
apache-2.0
| -708,314,445,237,468,300 | 37.75 | 135 | 0.595401 | false |
kantai/passe-framework-prototype
|
django/middleware/server.py
|
1
|
5329
|
import os, Pyro4, new
import django.htoken.serializer
from django.utils.importlib import import_module
from django.analysis.persisted import mw_socket
from django.http import get_changeset
PYRO_NAME = 'middleware'
def pre_req(self, request, delta):
request = self.cereal.deserialize(request)
self.cereal.apply_req_delta(request, delta)
request.reset_changeset()
return request
def spawn_middleware_server(mw_path):
pid = os.fork()
if pid == 0:
start_daemon(mw_path)
import sys
sys.exit(0)
else:
return pid
def get_middleware_methods(self):
names = ('process_request', 'process_view', 'process_template_response', 'process_response',
'process_exception')
return [ name for name in names if hasattr(self, name) ]
from traceback import format_exc
def proxied_response(self, request, response, delta):
try:
request = pre_req(self, request, delta)
response = self._process_response(request, response)
return response, request.get_changeset()
except:
print format_exc()
def proxied_template_response(self, request, response, delta):
try:
request = pre_req(self, request, delta)
response = self._process_template_response(request, response)
return response, request.get_changeset()
except:
print format_exc()
def proxied_request(self, request, delta):
try:
request = pre_req(self, request, delta)
response = self._process_request(request)
return response, request.get_changeset()
except:
print format_exc()
def proxied_view(self, request, callback_dummy, callback_args, callback_kwargs, delta):
try:
request = pre_req(self, request, delta)
response = self._process_view(request, callback_dummy, callback_args, callback_kwargs)
return response, request.get_changeset()
except:
print format_exc()
def proxied_exception(self, request, e, delta):
try:
request = pre_req(self, request, delta)
response = self._process_exception(request, e)
return response, request.get_changeset()
except:
print format_exc()
def start_daemon(middleware_path):
try:
mw_module, mw_classname = middleware_path.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured('%s isn\'t a middleware module' % middleware_path)
try:
mod = import_module(mw_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured('Error importing middleware %s: "%s"' % (mw_module, e))
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Middleware module "%s" does not define a "%s" class'
% (mw_module, mw_classname))
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
return
mw_instance.get_middleware_methods = new.instancemethod(get_middleware_methods,
mw_instance, mw_instance.__class__) # fuh!
names = mw_instance.get_middleware_methods()
if 'process_response' in names:
mw_instance._process_response = mw_instance.process_response
mw_instance.process_response = new.instancemethod(proxied_response,
mw_instance,
mw_instance.__class__)
if 'process_exception' in names:
mw_instance._process_exception = mw_instance.process_exception
mw_instance.process_exception = new.instancemethod(proxied_exception,
mw_instance,
mw_instance.__class__)
if 'process_template_response' in names:
mw_instance._process_template_response = mw_instance.process_template_response
mw_instance.process_template_response = new.instancemethod(proxied_template_response,
mw_instance,
mw_instance.__class__)
if 'process_view' in names:
mw_instance._process_view = mw_instance.process_view
mw_instance.process_view = new.instancemethod(proxied_view,
mw_instance,
mw_instance.__class__)
if 'process_request' in names:
mw_instance._process_request = mw_instance.process_request
mw_instance.process_request = new.instancemethod(proxied_request,
mw_instance,
mw_instance.__class__)
daemon = False
try:
local = mw_instance
daemon = Pyro4.Daemon(unixsocket=mw_socket(middleware_path))
daemon.serializer = django.htoken.serializer.Serializer()
local.cereal = daemon.serializer
daemon.register(local, PYRO_NAME)
daemon.requestLoop()
finally:
if daemon:
daemon.close()
|
bsd-3-clause
| 833,321,002,150,552,200 | 39.992308 | 102 | 0.579658 | false |
NicoVarg99/daf-recipes
|
ckan/ckan/ckan/ckanext/reclineview/tests/test_view.py
|
1
|
6249
|
# encoding: utf-8
import paste.fixture
from ckan.common import config
import ckan.model as model
import ckan.tests.legacy as tests
import ckan.plugins as p
import ckan.lib.helpers as h
import ckanext.reclineview.plugin as plugin
import ckan.lib.create_test_data as create_test_data
import ckan.config.middleware as middleware
from ckan.tests import helpers, factories
class BaseTestReclineViewBase(tests.WsgiAppCase):
@classmethod
def setup_class(cls):
cls.config_templates = config['ckan.legacy_templates']
config['ckan.legacy_templates'] = 'false'
wsgiapp = middleware.make_app(config['global_conf'], **config)
p.load(cls.view_type)
cls.app = paste.fixture.TestApp(wsgiapp)
cls.p = cls.view_class()
create_test_data.CreateTestData.create()
cls.resource_view, cls.package, cls.resource_id = \
_create_test_view(cls.view_type)
@classmethod
def teardown_class(cls):
config['ckan.legacy_templates'] = cls.config_templates
p.unload(cls.view_type)
model.repo.rebuild_db()
def test_can_view(self):
data_dict = {'resource': {'datastore_active': True}}
assert self.p.can_view(data_dict)
data_dict = {'resource': {'datastore_active': False}}
assert not self.p.can_view(data_dict)
def test_title_description_iframe_shown(self):
url = h.url_for(controller='package', action='resource_read',
id=self.package.name, resource_id=self.resource_id)
result = self.app.get(url)
assert self.resource_view['title'] in result
assert self.resource_view['description'] in result
assert 'data-module="data-viewer"' in result.body
class TestReclineView(BaseTestReclineViewBase):
view_type = 'recline_view'
view_class = plugin.ReclineView
def test_it_has_no_schema(self):
schema = self.p.info().get('schema')
assert schema is None, schema
def test_can_view_format_no_datastore(self):
'''
Test can_view with acceptable formats when datastore_active is False
(DataProxy in use).
'''
formats = ['CSV', 'XLS', 'TSV', 'csv', 'xls', 'tsv']
for resource_format in formats:
data_dict = {'resource': {'datastore_active': False,
'format': resource_format}}
assert self.p.can_view(data_dict)
def test_can_view_bad_format_no_datastore(self):
'''
Test can_view with incorrect formats when datastore_active is False.
'''
formats = ['TXT', 'txt', 'doc', 'JSON']
for resource_format in formats:
data_dict = {'resource': {'datastore_active': False,
'format': resource_format}}
assert not self.p.can_view(data_dict)
class TestReclineViewDatastoreOnly(helpers.FunctionalTestBase):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('recline_view'):
p.load('recline_view')
if not p.plugin_loaded('datastore'):
p.load('datastore')
app_config = config.copy()
app_config['ckan.legacy_templates'] = 'false'
app_config['ckan.plugins'] = 'recline_view datastore'
app_config['ckan.views.default_views'] = 'recline_view'
wsgiapp = middleware.make_app(config['global_conf'], **app_config)
cls.app = paste.fixture.TestApp(wsgiapp)
@classmethod
def teardown_class(cls):
if p.plugin_loaded('recline_view'):
p.unload('recline_view')
if p.plugin_loaded('datastore'):
p.unload('datastore')
def test_create_datastore_only_view(self):
dataset = factories.Dataset()
data = {
'resource': {'package_id': dataset['id']},
'fields': [{'id': 'a'}, {'id': 'b'}],
'records': [{'a': 1, 'b': 'xyz'}, {'a': 2, 'b': 'zzz'}]
}
result = helpers.call_action('datastore_create', **data)
resource_id = result['resource_id']
url = h.url_for(controller='package', action='resource_read',
id=dataset['id'], resource_id=resource_id)
result = self.app.get(url)
assert 'data-module="data-viewer"' in result.body
class TestReclineGridView(BaseTestReclineViewBase):
view_type = 'recline_grid_view'
view_class = plugin.ReclineGridView
def test_it_has_no_schema(self):
schema = self.p.info().get('schema')
assert schema is None, schema
class TestReclineGraphView(BaseTestReclineViewBase):
view_type = 'recline_graph_view'
view_class = plugin.ReclineGraphView
def test_it_has_the_correct_schema_keys(self):
schema = self.p.info().get('schema')
expected_keys = ['offset', 'limit', 'graph_type', 'group', 'series']
_assert_schema_exists_and_has_keys(schema, expected_keys)
class TestReclineMapView(BaseTestReclineViewBase):
view_type = 'recline_map_view'
view_class = plugin.ReclineMapView
def test_it_has_the_correct_schema_keys(self):
schema = self.p.info().get('schema')
expected_keys = ['offset', 'limit', 'map_field_type',
'latitude_field', 'longitude_field', 'geojson_field',
'auto_zoom', 'cluster_markers']
_assert_schema_exists_and_has_keys(schema, expected_keys)
def _create_test_view(view_type):
context = {'model': model,
'session': model.Session,
'user': model.User.get('testsysadmin').name}
package = model.Package.get('annakarenina')
resource_id = package.resources[1].id
resource_view = {'resource_id': resource_id,
'view_type': view_type,
'title': u'Test View',
'description': u'A nice test view'}
resource_view = p.toolkit.get_action('resource_view_create')(
context, resource_view)
return resource_view, package, resource_id
def _assert_schema_exists_and_has_keys(schema, expected_keys):
assert schema is not None, schema
keys = schema.keys()
keys.sort()
expected_keys.sort()
assert keys == expected_keys, '%s != %s' % (keys, expected_keys)
|
gpl-3.0
| 3,710,239,937,665,848,300 | 33.716667 | 78 | 0.612738 | false |
taigaio/taiga-contrib-gitlab-auth
|
back/taiga_contrib_gitlab_auth/services.py
|
1
|
3705
|
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import transaction as tx
from django.db import IntegrityError
from django.utils.translation import ugettext as _
from django.apps import apps
from taiga.base.utils.slug import slugify_uniquely
from taiga.base import exceptions as exc
from taiga.auth.services import send_register_email
from taiga.auth.services import make_auth_response_data, get_membership_by_token
from taiga.auth.signals import user_registered as user_registered_signal
from . import connector
@tx.atomic
def gitlab_register(username:str, email:str, full_name:str, gitlab_id:int, bio:str, token:str=None):
"""
Register a new user from gitlab.
This can raise `exc.IntegrityError` exceptions in
case of conflics found.
:returns: User
"""
auth_data_model = apps.get_model("users", "AuthData")
user_model = apps.get_model("users", "User")
try:
# Gitlab user association exist?
auth_data = auth_data_model.objects.get(key="gitlab", value=gitlab_id)
user = auth_data.user
except auth_data_model.DoesNotExist:
try:
# Is a user with the same email as the gitlab user?
user = user_model.objects.get(email=email)
auth_data_model.objects.create(user=user, key="gitlab", value=gitlab_id, extra={})
except user_model.DoesNotExist:
# Create a new user
username_unique = slugify_uniquely(username, user_model, slugfield="username")
user = user_model.objects.create(email=email,
username=username_unique,
full_name=full_name,
bio=bio)
auth_data_model.objects.create(user=user, key="gitlab", value=gitlab_id, extra={})
send_register_email(user)
user_registered_signal.send(sender=user.__class__, user=user)
if token:
membership = get_membership_by_token(token)
try:
membership.user = user
membership.save(update_fields=["user"])
except IntegrityError:
raise exc.IntegrityError(_("This user is already a member of the project."))
return user
def gitlab_login_func(request):
code = request.DATA.get('code', None)
token = request.DATA.get('token', None)
redirectUri = request.DATA.get('redirectUri', None)
email, user_info = connector.me(code, redirectUri)
user = gitlab_register(username=user_info.username,
email=email,
full_name=user_info.full_name,
gitlab_id=user_info.id,
bio=user_info.bio,
token=token)
data = make_auth_response_data(user)
return data
|
agpl-3.0
| -6,168,161,143,944,050,000 | 38.817204 | 100 | 0.652984 | false |
bxlab/bx-python
|
lib/bx_extras/stats.py
|
1
|
153779
|
# Copyright (c) 1999-2002 Gary Strangman; All Rights Reserved.
#
# This software is distributable under the terms of the GNU
# General Public License (GPL) v2, the text of which can be found at
# http://www.gnu.org/copyleft/gpl.html. Installing, importing or otherwise
# using this module constitutes acceptance of the terms of this License.
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fittness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
# Comments and/or additions are welcome (send e-mail to:
# strang@nmr.mgh.harvard.edu).
#
"""
stats.py module
(Requires pstat.py module.)
#################################################
####### Written by: Gary Strangman ###########
####### Last modified: May 10, 2002 ###########
#################################################
A collection of basic statistical functions for python. The function
names appear below.
IMPORTANT: There are really *3* sets of functions. The first set has an 'l'
prefix, which can be used with list or tuple arguments. The second set has
an 'a' prefix, which can accept NumPy array arguments. These latter
functions are defined only when NumPy is available on the system. The third
type has NO prefix (i.e., has the name that appears below). Functions of
this set are members of a "Dispatch" class, c/o David Ascher. This class
allows different functions to be called depending on the type of the passed
arguments. Thus, stats.mean is a member of the Dispatch class and
stats.mean(range(20)) will call stats.lmean(range(20)) while
stats.mean(Numeric.arange(20)) will call stats.amean(Numeric.arange(20)).
This is a handy way to keep consistent function names when different
argument types require different functions to be called. Having
implementated the Dispatch class, however, means that to get info on
a given function, you must use the REAL function name ... that is
"print stats.lmean.__doc__" or "print stats.amean.__doc__" work fine,
while "print stats.mean.__doc__" will print the doc for the Dispatch
class. NUMPY FUNCTIONS ('a' prefix) generally have more argument options
but should otherwise be consistent with the corresponding list functions.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful
CENTRAL TENDENCY: geometricmean
harmonicmean
mean
median
medianscore
mode
MOMENTS: moment
variation
skew
kurtosis
skewtest (for Numpy arrays only)
kurtosistest (for Numpy arrays only)
normaltest (for Numpy arrays only)
ALTERED VERSIONS: tmean (for Numpy arrays only)
tvar (for Numpy arrays only)
tmin (for Numpy arrays only)
tmax (for Numpy arrays only)
tstdev (for Numpy arrays only)
tsem (for Numpy arrays only)
describe
FREQUENCY STATS: itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
VARIABILITY: obrientransform
samplevar
samplestdev
signaltonoise (for Numpy arrays only)
var
stdev
sterr
sem
z
zs
zmap (for Numpy arrays only)
TRIMMING FCNS: threshold (for Numpy arrays only)
trimboth
trim1
round (round all vals to 'n' decimals; Numpy only)
CORRELATION FCNS: covariance (for Numpy arrays only)
correlation (for Numpy arrays only)
paired
pearsonr
spearmanr
pointbiserialr
kendalltau
linregress
INFERENTIAL STATS: ttest_1samp
ttest_ind
ttest_rel
chisquare
ks_2samp
mannwhitneyu
ranksums
wilcoxont
kruskalwallish
friedmanchisquare
PROBABILITY CALCS: chisqprob
erfcc
zprob
ksprob
fprob
betacf
gammln
betai
ANOVA FUNCTIONS: F_oneway
F_value
SUPPORT FUNCTIONS: writecc
incr
sign (for Numpy arrays only)
sum
cumsum
ss
summult
sumdiffsquared
square_of_sums
shellsort
rankdata
outputpairedstats
findwithin
"""
# CHANGE LOG:
# ===========
# 02-11-19 ... fixed attest_ind and attest_rel for div-by-zero Overflows
# 02-05-10 ... fixed lchisqprob indentation (failed when df=even)
# 00-12-28 ... removed aanova() to separate module, fixed licensing to
# match Python License, fixed doc string & imports
# 00-04-13 ... pulled all "global" statements, except from aanova()
# added/fixed lots of documentation, removed io.py dependency
# changed to version 0.5
# 99-11-13 ... added asign() function
# 99-11-01 ... changed version to 0.4 ... enough incremental changes now
# 99-10-25 ... added acovariance and acorrelation functions
# 99-10-10 ... fixed askew/akurtosis to avoid divide-by-zero errors
# added aglm function (crude, but will be improved)
# 99-10-04 ... upgraded acumsum, ass, asummult, asamplevar, avar, etc. to
# all handle lists of 'dimension's and keepdims
# REMOVED ar0, ar2, ar3, ar4 and replaced them with around
# reinserted fixes for abetai to avoid math overflows
# 99-09-05 ... rewrote achisqprob/aerfcc/aksprob/afprob/abetacf/abetai to
# handle multi-dimensional arrays (whew!)
# 99-08-30 ... fixed l/amoment, l/askew, l/akurtosis per D'Agostino (1990)
# added anormaltest per same reference
# re-wrote azprob to calc arrays of probs all at once
# 99-08-22 ... edited attest_ind printing section so arrays could be rounded
# 99-08-19 ... fixed amean and aharmonicmean for non-error(!) overflow on
# short/byte arrays (mean of #s btw 100-300 = -150??)
# 99-08-09 ... fixed asum so that the None case works for Byte arrays
# 99-08-08 ... fixed 7/3 'improvement' to handle t-calcs on N-D arrays
# 99-07-03 ... improved attest_ind, attest_rel (zero-division errortrap)
# 99-06-24 ... fixed bug(?) in attest_ind (n1=a.shape[0])
# 04/11/99 ... added asignaltonoise, athreshold functions, changed all
# max/min in array section to N.maximum/N.minimum,
# fixed square_of_sums to prevent integer overflow
# 04/10/99 ... !!! Changed function name ... sumsquared ==> square_of_sums
# 03/18/99 ... Added ar0, ar2, ar3 and ar4 rounding functions
# 02/28/99 ... Fixed aobrientransform to return an array rather than a list
# 01/15/99 ... Essentially ceased updating list-versions of functions (!!!)
# 01/13/99 ... CHANGED TO VERSION 0.3
# fixed bug in a/lmannwhitneyu p-value calculation
# 12/31/98 ... fixed variable-name bug in ldescribe
# 12/19/98 ... fixed bug in findwithin (fcns needed pstat. prefix)
# 12/16/98 ... changed amedianscore to return float (not array) for 1 score
# 12/14/98 ... added atmin and atmax functions
# removed umath from import line (not needed)
# l/ageometricmean modified to reduce chance of overflows (take
# nth root first, then multiply)
# 12/07/98 ... added __version__variable (now 0.2)
# removed all 'stats.' from anova() fcn
# 12/06/98 ... changed those functions (except shellsort) that altered
# arguments in-place ... cumsum, ranksort, ...
# updated (and fixed some) doc-strings
# 12/01/98 ... added anova() function (requires NumPy)
# incorporated Dispatch class
# 11/12/98 ... added functionality to amean, aharmonicmean, ageometricmean
# added 'asum' function (added functionality to N.add.reduce)
# fixed both moment and amoment (two errors)
# changed name of skewness and askewness to skew and askew
# fixed (a)histogram (which sometimes counted points <lowerlimit)
import copy
import math
import string
from . import pstat # required 3rd party module
__version__ = 0.6
# DISPATCH CODE
class Dispatch:
"""
The Dispatch class, care of David Ascher, allows different functions to
be called depending on the argument types. This way, there can be one
function name regardless of the argument type. To access function doc
in stats.py module, prefix the function with an 'l' or 'a' for list or
array arguments, respectively. That is, print stats.lmean.__doc__ or
print stats.amean.__doc__ or whatever.
"""
def __init__(self, *tuples):
self._dispatch = {}
for func, types in tuples:
for t in types:
if t in self._dispatch.keys():
raise ValueError("can't have two dispatches on "+str(t))
self._dispatch[t] = func
self._types = list(self._dispatch.keys())
def __call__(self, arg1, *args, **kw):
if type(arg1) not in self._types:
raise TypeError("don't know how to dispatch %s arguments" % type(arg1))
return self._dispatch[type(arg1)](*(arg1,) + args, **kw)
# LIST-BASED FUNCTIONS
# Define these regardless
# CENTRAL TENDENCY
def lgeometricmean(inlist):
"""
Calculates the geometric mean of the values in the passed list.
That is: n-th root of (x1 * x2 * ... * xn). Assumes a '1D' list.
Usage: lgeometricmean(inlist)
"""
mult = 1.0
one_over_n = 1.0/len(inlist)
for item in inlist:
mult = mult * pow(item, one_over_n)
return mult
def lharmonicmean(inlist):
"""
Calculates the harmonic mean of the values in the passed list.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.
Usage: lharmonicmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + 1.0/item
return len(inlist) / sum
def lmean(inlist):
"""
Returns the arithematic mean of the values in the passed list.
Assumes a '1D' list, but will function on the 1st dim of an array(!).
Usage: lmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + item
return sum/float(len(inlist))
def lmedian(inlist, numbins=1000):
"""
Returns the computed median value of a list of numbers, given the
number of bins to use for the histogram (more bins brings the computed value
closer to the median score, default number of bins = 1000). See G.W.
Heiman's Basic Stats (1st Edition), or CRC Probability & Statistics.
Usage: lmedian (inlist, numbins=1000)
"""
(hist, smallest, binsize, extras) = histogram(inlist, numbins) # make histog
cumhist = cumsum(hist) # make cumulative histogram
for i in range(len(cumhist)): # get 1st(!) index holding 50%ile score
if cumhist[i] >= len(inlist)/2.0:
cfbin = i
break
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = cumhist[cfbin-1]
freq = float(hist[cfbin]) # frequency IN the 50%ile bin
median = LRL + ((len(inlist)/2.0 - cfbelow)/float(freq))*binsize # median formula
return median
def lmedianscore(inlist):
"""
Returns the 'middle' score of the passed list. If there is an even
number of scores, the mean of the 2 middle scores is returned.
Usage: lmedianscore(inlist)
"""
newlist = sorted(copy.deepcopy(inlist))
if len(newlist) % 2 == 0: # if even number of scores, average middle 2
index = len(newlist)/2 # integer division correct
median = float(newlist[index] + newlist[index-1]) / 2
else:
index = len(newlist)/2 # int divsion gives mid value when count from 0
median = newlist[index]
return median
def lmode(inlist):
"""
Returns a list of the modal (most common) score(s) in the passed
list. If there is more than one such score, all are returned. The
bin-count for the mode(s) is also returned.
Usage: lmode(inlist)
Returns: bin-count for mode(s), a list of modal value(s)
"""
scores = sorted(pstat.unique(inlist))
freq = []
for item in scores:
freq.append(inlist.count(item))
maxfreq = max(freq)
mode = []
stillmore = 1
while stillmore:
try:
indx = freq.index(maxfreq)
mode.append(scores[indx])
del freq[indx]
del scores[indx]
except ValueError:
stillmore = 0
return maxfreq, mode
# MOMENTS
def lmoment(inlist, moment=1):
"""
Calculates the nth moment about the mean for a sample (defaults to
the 1st moment). Used to calculate coefficients of skewness and kurtosis.
Usage: lmoment(inlist,moment=1)
Returns: appropriate moment (r) from ... 1/n * SUM((inlist(i)-mean)**r)
"""
if moment == 1:
return 0.0
else:
mn = mean(inlist)
n = len(inlist)
s = 0
for x in inlist:
s = s + (x-mn)**moment
return s/float(n)
def lvariation(inlist):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6.
Usage: lvariation(inlist)
"""
return 100.0*samplestdev(inlist)/float(mean(inlist))
def lskew(inlist):
"""
Returns the skewness of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lskew(inlist)
"""
return moment(inlist, 3)/pow(moment(inlist, 2), 1.5)
def lkurtosis(inlist):
"""
Returns the kurtosis of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lkurtosis(inlist)
"""
return moment(inlist, 4)/pow(moment(inlist, 2), 2.0)
def ldescribe(inlist):
"""
Returns some descriptive statistics of the passed list (assumed to be 1D).
Usage: ldescribe(inlist)
Returns: n, mean, standard deviation, skew, kurtosis
"""
n = len(inlist)
mm = (min(inlist), max(inlist))
m = mean(inlist)
sd = stdev(inlist)
sk = skew(inlist)
kurt = kurtosis(inlist)
return n, mm, m, sd, sk, kurt
# FREQUENCY STATS
def litemfreq(inlist):
"""
Returns a list of pairs. Each pair consists of one of the scores in inlist
and it's frequency count. Assumes a 1D list is passed.
Usage: litemfreq(inlist)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = sorted(pstat.unique(inlist))
freq = []
for item in scores:
freq.append(inlist.count(item))
return pstat.abut(scores, freq)
def lscoreatpercentile(inlist, percent):
"""
Returns the score at a given percentile relative to the distribution
given by inlist.
Usage: lscoreatpercentile(inlist,percent)
"""
if percent > 1:
print("\nDividing percent>1 by 100 in lscoreatpercentile().\n")
percent = percent / 100.0
targetcf = percent*len(inlist)
h, lrl, binsize, extras = histogram(inlist)
cumhist = cumsum(copy.deepcopy(h))
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def lpercentileofscore(inlist, score, histbins=10, defaultlimits=None):
"""
Returns the percentile value of a score relative to the distribution
given by inlist. Formula depends on the values used to histogram the data(!).
Usage: lpercentileofscore(inlist,score,histbins=10,defaultlimits=None)
"""
h, lrl, binsize, extras = histogram(inlist, histbins, defaultlimits)
cumhist = cumsum(copy.deepcopy(h))
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inlist)) * 100
return pct
def lhistogram(inlist, numbins=10, defaultreallimits=None, printextras=0):
"""
Returns (i) a list of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. If no sequence object
is given for defaultreallimits, the routine picks (usually non-pretty) bins
spanning all the numbers in the inlist.
Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0)
Returns: list of bin values, lowerreallimit, binsize, extrapoints
"""
if (defaultreallimits is not None):
if type(defaultreallimits) not in [list, tuple] or len(defaultreallimits) == 1: # only one limit given, assumed to be lower one & upper is calc'd
lowerreallimit = defaultreallimits
upperreallimit = 1.0001 * max(inlist)
else: # assume both limits given
lowerreallimit = defaultreallimits[0]
upperreallimit = defaultreallimits[1]
binsize = (upperreallimit-lowerreallimit)/float(numbins)
else: # no limits given for histogram, both must be calc'd
estbinwidth = (max(inlist)-min(inlist))/float(numbins) + 1 # 1=>cover all
binsize = (max(inlist)-min(inlist)+estbinwidth)/float(numbins)
lowerreallimit = min(inlist) - binsize/2 # lower real limit,1st bin
bins = [0]*(numbins)
extrapoints = 0
for num in inlist:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit)/float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except Exception:
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print('\nPoints outside given histogram range =', extrapoints)
return (bins, lowerreallimit, binsize, extrapoints)
def lcumfreq(inlist, numbins=10, defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Usage: lcumfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h, l, b, e = histogram(inlist, numbins, defaultreallimits)
cumhist = cumsum(copy.deepcopy(h))
return cumhist, l, b, e
def lrelfreq(inlist, numbins=10, defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Usage: lrelfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h, l, b, e = histogram(inlist, numbins, defaultreallimits)
for i in range(len(h)):
h[i] = h[i]/float(len(inlist))
return h, l, b, e
# VARIABILITY FUNCTIONS
def lobrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. From
Maxwell and Delaney, p.112.
Usage: lobrientransform(*args)
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = [0.0]*k
v = [0.0]*k
m = [0.0]*k
nargs = []
for i in range(k):
nargs.append(copy.deepcopy(args[i]))
n[i] = float(len(nargs[i]))
v[i] = var(nargs[i])
m[i] = mean(nargs[i])
for j in range(k):
for i in range(n[j]):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - mean(nargs[j]) > TINY:
check = 0
if check != 1:
raise ValueError('Problem in obrientransform.')
else:
return nargs
def lsamplevar(inlist):
"""
Returns the variance of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample variance only).
Usage: lsamplevar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = []
for item in inlist:
deviations.append(item-mn)
return ss(deviations)/float(n)
def lsamplestdev(inlist):
"""
Returns the standard deviation of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample stdev only).
Usage: lsamplestdev(inlist)
"""
return math.sqrt(samplevar(inlist))
def lvar(inlist):
"""
Returns the variance of the values in the passed list using N-1
for the denominator (i.e., for estimating population variance).
Usage: lvar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = [0]*len(inlist)
for i in range(len(inlist)):
deviations[i] = inlist[i] - mn
return ss(deviations)/float(n-1)
def lstdev(inlist):
"""
Returns the standard deviation of the values in the passed list
using N-1 in the denominator (i.e., to estimate population stdev).
Usage: lstdev(inlist)
"""
return math.sqrt(var(inlist))
def lsterr(inlist):
"""
Returns the standard error of the values in the passed list using N-1
in the denominator (i.e., to estimate population standard error).
Usage: lsterr(inlist)
"""
return stdev(inlist) / float(math.sqrt(len(inlist)))
def lsem(inlist):
"""
Returns the estimated standard error of the mean (sx-bar) of the
values in the passed list. sem = stdev / sqrt(n)
Usage: lsem(inlist)
"""
sd = stdev(inlist)
n = len(inlist)
return sd/math.sqrt(n)
def lz(inlist, score):
"""
Returns the z-score for a given input score, given that score and the
list from which that score came. Not appropriate for population calculations.
Usage: lz(inlist, score)
"""
z = (score-mean(inlist))/samplestdev(inlist)
return z
def lzs(inlist):
"""
Returns a list of z-scores, one for each score in the passed list.
Usage: lzs(inlist)
"""
zscores = []
for item in inlist:
zscores.append(z(inlist, item))
return zscores
# TRIMMING FUNCTIONS
def ltrimboth(l, proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost'
10% of scores. Assumes list is sorted by magnitude. Slices off LESS if
proportion results in a non-integer slice index (i.e., conservatively
slices off proportiontocut).
Usage: ltrimboth (l,proportiontocut)
Returns: trimmed version of list l
"""
lowercut = int(proportiontocut*len(l))
uppercut = len(l) - lowercut
return l[lowercut:uppercut]
def ltrim1(l, proportiontocut, tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
list (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off proportiontocut).
Usage: ltrim1 (l,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of list l
"""
if tail == 'right':
lowercut = 0
uppercut = len(l) - int(proportiontocut*len(l))
elif tail == 'left':
lowercut = int(proportiontocut*len(l))
uppercut = len(l)
return l[lowercut:uppercut]
# CORRELATION FUNCTIONS
def lpaired(x, y):
"""
Interactively determines the type of data and then runs the
appropriated statistic for paired group data.
Usage: lpaired(x,y)
Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i', 'r', 'I', 'R', 'c', 'C']:
print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ')
samples = input()
if samples in ['i', 'I', 'r', 'R']:
print('\nComparing variances ...', end=' ')
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x, y)
f, p = F_oneway(pstat.colex(r, 0), pstat.colex(r, 1))
if p < 0.05:
vartype = 'unequal, p='+str(round(p, 4))
else:
vartype = 'equal'
print(vartype)
if samples in ['i', 'I']:
if vartype[0] == 'e':
t, p = ttest_ind(x, y, 0)
print('\nIndependent samples t-test: ', round(t, 4), round(p, 4))
else:
if len(x) > 20 or len(y) > 20:
z, p = ranksums(x, y)
print('\nRank Sums test (NONparametric, n>20): ', round(z, 4), round(p, 4))
else:
u, p = mannwhitneyu(x, y)
print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u, 4), round(p, 4))
else: # RELATED SAMPLES
if vartype[0] == 'e':
t, p = ttest_rel(x, y, 0)
print('\nRelated samples t-test: ', round(t, 4), round(p, 4))
else:
t, p = ranksums(x, y)
print('\nWilcoxon T-test (NONparametric): ', round(t, 4), round(p, 4))
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c', 'C', 'r', 'R', 'd', 'D']:
print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ')
corrtype = input()
if corrtype in ['c', 'C']:
m, b, r, p, see = linregress(x, y)
print('\nLinear regression for continuous variables ...')
lol = [['Slope', 'Intercept', 'r', 'Prob', 'SEestimate'], [round(m, 4), round(b, 4), round(r, 4), round(p, 4), round(see, 4)]]
pstat.printcc(lol)
elif corrtype in ['r', 'R']:
r, p = spearmanr(x, y)
print('\nCorrelation for ranked variables ...')
print("Spearman's r: ", round(r, 4), round(p, 4))
else: # DICHOTOMOUS
r, p = pointbiserialr(x, y)
print('\nAssuming x contains a dichotomous variable ...')
print('Point Biserial r: ', round(r, 4), round(p, 4))
print('\n\n')
return None
def lpearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (2nd), p.195.
Usage: lpearsonr(x,y) where x and y are equal-length lists
Returns: Pearson's r value, two-tailed p-value
"""
TINY = 1.0e-30
if len(x) != len(y):
raise ValueError('Input values not paired in pearsonr. Aborting.')
n = len(x)
x = [float(_) for _ in x]
y = [float(_) for _ in y]
r_num = n*(summult(x, y)) - sum(x)*sum(y)
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = (r_num / r_den) # denominator already a float
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df, 0.5, df/float(df+t*t))
return r, prob
def lspearmanr(x, y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: lspearmanr(x,y) where x and y are equal-length lists
Returns: Spearman's r, two-tailed p-value
"""
if len(x) != len(y):
raise ValueError('Input values not paired in spearmanr. Aborting.')
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = sumdiffsquared(rankx, ranky)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = betai(0.5*df, 0.5, df/(df+t*t)) # t already a float
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They are close to tables, but not exact. (?)
return rs, probrs
def lpointbiserialr(x, y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
Usage: lpointbiserialr(x,y) where x,y are equal-length lists
Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
if len(x) != len(y):
raise ValueError('INPUT VALUES NOT PAIRED IN pointbiserialr. ABORTING.')
data = pstat.abut(x, y)
categories = pstat.unique(x)
if len(categories) != 2:
raise ValueError("Exactly 2 categories required for pointbiserialr().")
else: # there are 2 categories, continue
codemap = pstat.abut(categories, range(2))
pstat.recode(data, codemap, 0) # recoded
x = pstat.linexand(data, 0, categories[0])
y = pstat.linexand(data, 0, categories[1])
xmean = mean(pstat.colex(x, 1))
ymean = mean(pstat.colex(y, 1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/samplestdev(pstat.colex(data, 1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = betai(0.5*df, 0.5, df/(df+t*t)) # t already a float
return rpb, prob
def lkendalltau(x, y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-routine.@@@
Usage: lkendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j, len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither list has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss - 1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
def llinregress(x, y):
"""
Calculates a regression line on x,y pairs.
Usage: llinregress(x,y) x,y are equal-length lists of x-y coordinates
Returns: slope, intercept, r, two-tailed prob, sterr-of-estimate
"""
TINY = 1.0e-20
if len(x) != len(y):
raise ValueError('Input values not paired in linregress. Aborting.')
n = len(x)
x = [float(_) for _ in x]
y = [float(_) for _ in y]
xmean = mean(x)
ymean = mean(y)
r_num = float(n*(summult(x, y)) - sum(x)*sum(y))
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = r_num / r_den
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df, 0.5, df/(df+t*t))
slope = r_num / float(n*ss(x) - square_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*samplestdev(y)
return slope, intercept, r, prob, sterrest
# INFERENTIAL STATISTICS
def lttest_1samp(a, popmean, printit=0, name='Sample', writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
using the given writemode (default=append). Returns t-value, and prob.
Usage: lttest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
x = mean(a)
v = var(a)
n = len(a)
df = n-1
svar = ((n-1)*v)/float(df)
t = (x-popmean)/math.sqrt(svar*(1.0/n))
prob = betai(0.5*df, 0.5, float(df)/(df+t*t))
if printit != 0:
statname = 'Single-sample T-test.'
outputpairedstats(printit, writemode,
'Population', '--', popmean, 0, 0, 0,
name, n, x, v, min(a), max(a),
statname, t, prob)
return t, prob
def lttest_ind(a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of
scores a, and b. From Numerical Recipies, p.483. If printit=1, results
are printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Returns t-value,
and prob.
Usage: lttest_ind(a,b,printit=0,name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed prob
"""
x1 = mean(a)
x2 = mean(b)
v1 = stdev(a)**2
v2 = stdev(b)**2
n1 = len(a)
n2 = len(b)
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2)/float(df)
t = (x1-x2)/math.sqrt(svar*(1.0/n1 + 1.0/n2))
prob = betai(0.5*df, 0.5, df/(df+t*t))
if printit != 0:
statname = 'Independent samples T-test.'
outputpairedstats(printit, writemode,
name1, n1, x1, v1, min(a), max(a),
name2, n2, x2, v2, min(b), max(b),
statname, t, prob)
return t, prob
def lttest_rel(a, b, printit=0, name1='Sample1', name2='Sample2', writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores,
a and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output to
'filename' using the given writemode (default=append). Returns t-value,
and prob.
Usage: lttest_rel(a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a')
Returns: t-value, two-tailed prob
"""
if len(a) != len(b):
raise ValueError('Unequal length lists in ttest_rel.')
x1 = mean(a)
x2 = mean(b)
v1 = var(a)
v2 = var(b)
n = len(a)
cov = 0
for i in range(len(a)):
cov = cov + (a[i]-x1) * (b[i]-x2)
df = n-1
cov = cov / float(df)
sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))
t = (x1-x2)/sd
prob = betai(0.5*df, 0.5, df/(df+t*t))
if printit != 0:
statname = 'Related samples T-test.'
outputpairedstats(printit, writemode,
name1, n, x1, v1, min(a), max(a),
name2, n, x2, v2, min(b), max(b),
statname, t, prob)
return t, prob
def lchisquare(f_obs, f_exp=None):
"""
Calculates a one-way chi square for list of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
Usage: lchisquare(f_obs, f_exp=None) f_obs = list of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs) # number of groups
if f_exp is None:
f_exp = [sum(f_obs)/float(k)] * len(f_obs) # create k bins with = freq.
chisq = 0
for i in range(len(f_obs)):
chisq = chisq + (f_obs[i]-f_exp[i])**2 / float(f_exp[i])
return chisq, chisqprob(chisq, k-1)
def lks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. From
Numerical Recipies in C, page 493.
Usage: lks_2samp(data1,data2) data1&2 are lists of values for 2 conditions
Returns: KS D-value, associated p-value
"""
j1 = 0
j2 = 0
fn1 = 0.0
fn2 = 0.0
n1 = len(data1)
n2 = len(data2)
en1 = n1
en2 = n2
d = 0.0
data1.sort()
data2.sort()
while j1 < n1 and j2 < n2:
d1 = data1[j1]
d2 = data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if math.fabs(dt) > math.fabs(d):
d = dt
try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = ksprob((en+0.12+0.11/en)*abs(d))
except Exception:
prob = 1.0
return d, prob
def lmannwhitneyu(x, y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. NOTE: Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U found in the tables. Equivalent to Kruskal-Wallis H with
just 2 groups.
Usage: lmannwhitneyu(data)
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
ranked = rankdata(x+y)
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1, u2)
smallu = min(u1, u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError('All numbers are identical in lmannwhitneyu')
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - zprob(z)
def ltiecorrect(rankvals):
"""
Corrects for ties in Mann Whitney U and Kruskal Wallis H tests. See
Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences.
New York: McGraw-Hill. Code adapted from |Stat rankind.c code.
Usage: ltiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted, posn = shellsort(rankvals)
n = len(sorted)
T = 0.0
i = 0
while (i < n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i < n-1) and (sorted[i] == sorted[i+1]):
nties = nties + 1
i = i + 1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def lranksums(x, y):
"""
Calculates the rank sums statistic on the provided scores and
returns the result. Use only when the n in each condition is > 20 and you
have 2 independent samples of ranks.
Usage: lranksums(x,y)
Returns: a z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = x+y
ranked = rankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 - zprob(abs(z)))
return z, prob
def lwilcoxont(x, y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: lwilcoxont(x,y)
Returns: a t-statistic, two-tail probability estimate
"""
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxont. Aborting.')
d = []
for i in range(len(x)):
diff = x[i] - y[i]
if diff != 0:
d.append(diff)
count = len(d)
absd = [abs(_) for _ in d]
absranked = rankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
if d[i] < 0:
r_minus = r_minus + absranked[i]
else:
r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
mn = count * (count+1) * 0.25
se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = math.fabs(wt-mn) / se
prob = 2*(1.0 - zprob(abs(z)))
return wt, prob
def lkruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H-test for 3 or more independent samples
and returns the result.
Usage: lkruskalwallish(*args)
Returns: H-statistic (corrected for ties), associated p-value
"""
args = list(args)
n = [0]*len(args)
all = []
n = [len(_) for _ in args]
for i in range(len(args)):
all = all + args[i]
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in lkruskalwallish')
h = h / float(T)
return h, chisqprob(h, df)
def lfriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for repeated
measures and returns the result, along with the associated probability
value. It assumes 3 or more repeated measures. Only 3 levels requires a
minimum of 10 subjects in the study. Four levels requires 5 subjects per
level(??).
Usage: lfriedmanchisquare(*args)
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError('Less than 3 levels. Friedman test not appropriate.')
n = len(args[0])
data = pstat.abut(*tuple(args))
for i in range(len(data)):
data[i] = rankdata(data[i])
ssbn = 0
for i in range(k):
ssbn = ssbn + sum(args[i])**2
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, chisqprob(chisq, k-1)
# PROBABILITY CALCULATIONS
def lchisqprob(chisq, df):
"""
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df)
"""
BIG = 20.0
def ex(x):
BIG = 20.0
if x < -BIG:
return 0.0
else:
return math.exp(x)
if chisq <= 0 or df < 1:
return 1.0
a = 0.5 * chisq
if df % 2 == 0:
even = 1
else:
even = 0
if df > 1:
y = ex(-a)
if even:
s = y
else:
s = 2.0 * zprob(-math.sqrt(chisq))
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = 1.0
else:
z = 0.5
if a > BIG:
if even:
e = 0.0
else:
e = math.log(math.sqrt(math.pi))
c = math.log(a)
while (z <= chisq):
e = math.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
return s
else:
if even:
e = 1.0
else:
e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
c = 0.0
while (z <= chisq):
e = e * (a/float(z))
c = c + e
z = z + 1.0
return (c*y+s)
else:
return s
def lerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional
error everywhere less than 1.2e-7. Adapted from Numerical Recipies.
Usage: lerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * math.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
if x >= 0:
return ans
else:
return 2.0 - ans
def lzprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat.
Usage: lzprob(z)
"""
Z_MAX = 6.0 # maximum meaningful z-value
if z == 0.0:
x = 0.0
else:
y = 0.5 * math.fabs(z)
if y >= (Z_MAX*0.5):
x = 1.0
elif (y < 1.0):
w = y*y
x = ((((((((0.000124818987 * w
- 0.001075204047) * w + 0.005198775019) * w
- 0.019198292004) * w + 0.059054035642) * w
- 0.151968751364) * w + 0.319152932694) * w
- 0.531923007300) * w + 0.797884560593) * y * 2.0
else:
y = y - 2.0
x = (((((((((((((-0.000045255659 * y
+ 0.000152529290) * y - 0.000019538132) * y
- 0.000676904986) * y + 0.001390604284) * y
- 0.000794620820) * y - 0.002034254874) * y
+ 0.006549791214) * y - 0.010557625006) * y
+ 0.011630447319) * y - 0.009279453341) * y
+ 0.005353579108) * y - 0.002141268741) * y
+ 0.000535310849) * y + 0.999936657524
if z > 0.0:
prob = ((x+1.0)*0.5)
else:
prob = ((1.0-x)*0.5)
return prob
def lksprob(alam):
"""
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from
Numerical Recipies.
Usage: lksprob(alam)
"""
fac = 2.0
sum = 0.0
termbf = 0.0
a2 = -2.0*alam*alam
for j in range(1, 201):
term = fac*math.exp(a2*j*j)
sum = sum + term
if math.fabs(term) <= (0.001*termbf) or math.fabs(term) < (1.0e-8*sum):
return sum
fac = -fac
termbf = math.fabs(term)
return 1.0 # Get here only if fails to converge; was 0.0!!
def lfprob(dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
return p
def lbetacf(a, b, x):
"""
This function evaluates the continued fraction form of the incomplete
Beta function, betai. (Adapted from: Numerical Recipies in C.)
Usage: lbetacf(a,b,x)
"""
ITMAX = 200
EPS = 3.0e-7
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
if (abs(az-aold) < (EPS*abs(az))):
return az
print('a or b too big, or ITMAX too small in Betacf.')
def lgammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
(Adapted from: Numerical Recipies in C.)
Usage: lgammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*math.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + math.log(2.50662827465*ser)
def lbetai(a, b, x):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented here,
using the betacf function. (Adapted from: Numerical Recipies in C.)
Usage: lbetai(a,b,x)
"""
if (x < 0.0 or x > 1.0):
raise ValueError('Bad x in lbetai')
if (x == 0.0 or x == 1.0):
bt = 0.0
else:
bt = math.exp(gammln(a+b)-gammln(a)-gammln(b)+a*math.log(x)+b
* math.log(1.0-x))
if (x < (a+1.0)/(a+b+2.0)):
return bt*betacf(a, b, x)/float(a)
else:
return 1.0-bt*betacf(b, a, 1.0-x)/float(b)
# ANOVA CALCULATIONS
def lF_oneway(*lists):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: F_oneway(*lists) where *lists is any number of lists, one per
treatment group
Returns: F value, one-tailed p-value
"""
a = len(lists) # ANOVA on 'a' groups, each in it's own list
alldata = []
for i in range(len(lists)):
alldata = alldata + lists[i]
alldata = N.array(alldata)
bign = len(alldata)
sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
ssbn = 0
for list in lists:
ssbn = ssbn + asquare_of_sums(N.array(list))/float(len(list))
ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = a-1
dfwn = bign - a
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn, dfwn, f)
return f, prob
def lF_value(ER, EF, dfnum, dfden):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR-dfF = degrees of freedom of the numerator
dfF = degrees of freedom associated with the denominator/Full model
Usage: lF_value(ER,EF,dfnum,dfden)
"""
return ((ER-EF)/float(dfnum) / (EF/float(dfden)))
# SUPPORT FUNCTIONS
def writecc(listoflists, file, writetype='w', extra=2):
"""
Writes a list of lists to a file in columns, customized by the max
size of items within the columns (max size of items in col, +2 characters)
to specified file. File-overwrite is the default.
Usage: writecc (listoflists,file,writetype='w',extra=2)
Returns: None
"""
if type(listoflists[0]) not in [list, tuple]:
listoflists = [listoflists]
outfile = open(file, writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i] == '\n' or listoflists[i] == 'dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for col in range(len(list2print[0])):
items = pstat.colex(list2print, col)
items = [pstat.makestr(_) for _ in items]
maxsize[col] = max(map(len, items)) + extra
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
outfile.write(pstat.lineincustcols(dashes, maxsize))
else:
outfile.write(pstat.lineincustcols(row, maxsize))
outfile.write('\n')
outfile.close()
return None
def lincr(l, cap): # to increment a list up to a max-list of 'cap'
"""
Simulate a counting system from an n-dimensional list.
Usage: lincr(l,cap) l=list to increment, cap=max values for each list pos'n
Returns: next set of values for list l, OR -1 (if overflow)
"""
l[0] = l[0] + 1 # e.g., [0,0,0] --> [2,4,3] (=cap)
for i in range(len(l)):
if l[i] > cap[i] and i < len(l)-1: # if carryover AND not done
l[i] = 0
l[i+1] = l[i+1] + 1
elif l[i] > cap[i] and i == len(l)-1: # overflow past last column, must be finished
l = -1
return l
def lsum(inlist):
"""
Returns the sum of the items in the passed list.
Usage: lsum(inlist)
"""
s = 0
for item in inlist:
s = s + item
return s
def lcumsum(inlist):
"""
Returns a list consisting of the cumulative sum of the items in the
passed list.
Usage: lcumsum(inlist)
"""
newlist = copy.deepcopy(inlist)
for i in range(1, len(newlist)):
newlist[i] = newlist[i] + newlist[i-1]
return newlist
def lss(inlist):
"""
Squares each value in the passed list, adds up these squares and
returns the result.
Usage: lss(inlist)
"""
ss = 0
for item in inlist:
ss = ss + item*item
return ss
def lsummult(list1, list2):
"""
Multiplies elements in list1 and list2, element by element, and
returns the sum of all resulting multiplications. Must provide equal
length lists.
Usage: lsummult(list1,list2)
"""
if len(list1) != len(list2):
raise ValueError("Lists not equal length in summult.")
s = 0
for item1, item2 in pstat.abut(list1, list2):
s = s + item1*item2
return s
def lsumdiffsquared(x, y):
"""
Takes pairwise differences of the values in lists x and y, squares
these differences, and returns the sum of these squares.
Usage: lsumdiffsquared(x,y)
Returns: sum[(x[i]-y[i])**2]
"""
sds = 0
for i in range(len(x)):
sds = sds + (x[i]-y[i])**2
return sds
def lsquare_of_sums(inlist):
"""
Adds the values in the passed list, squares the sum, and returns
the result.
Usage: lsquare_of_sums(inlist)
Returns: sum(inlist[i])**2
"""
s = sum(inlist)
return float(s)*s
def lshellsort(inlist):
"""
Shellsort algorithm. Sorts a 1D-list.
Usage: lshellsort(inlist)
Returns: sorted-inlist, sorting-index-vector (for original list)
"""
n = len(inlist)
svec = copy.deepcopy(inlist)
ivec = list(range(n))
gap = n/2 # integer division needed
while gap > 0:
for i in range(gap, n):
for j in range(i-gap, -1, -gap):
while j >= 0 and svec[j] > svec[j+gap]:
temp = svec[j]
svec[j] = svec[j+gap]
svec[j+gap] = temp
itemp = ivec[j]
ivec[j] = ivec[j+gap]
ivec[j+gap] = itemp
gap = gap / 2 # integer division needed
# svec is now sorted inlist, and ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
def lrankdata(inlist):
"""
Ranks the data in inlist, dealing with ties appropritely. Assumes
a 1D inlist. Adapted from Gary Perlman's |Stat ranksort.
Usage: lrankdata(inlist)
Returns: a list of length equal to inlist, containing rank scores
"""
n = len(inlist)
svec, ivec = shellsort(inlist)
sumranks = 0
dupcount = 0
newlist = [0]*n
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i == n-1 or svec[i] != svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1, i+1):
newlist[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newlist
def outputpairedstats(fname, writemode, name1, n1, m1, se1, min1, max1, name2, n2, m2, se2, min2, max2, statname, stat, prob):
"""
Prints or write to a file stats for two groups, using the name, n,
mean, sterr, min and max for each group, as well as the statistic name,
its value, and the associated p-value.
Usage: outputpairedstats(fname,writemode,
name1,n1,mean1,stderr1,min1,max1,
name2,n2,mean2,stderr2,min2,max2,
statname,stat,prob)
Returns: None
"""
suffix = '' # for *s after the p-value
try:
prob.shape
prob = prob[0]
except Exception:
pass
if prob < 0.001:
suffix = ' ***'
elif prob < 0.01:
suffix = ' **'
elif prob < 0.05:
suffix = ' *'
title = [['Name', 'N', 'Mean', 'SD', 'Min', 'Max']]
lofl = title+[[name1, n1, round(m1, 3), round(math.sqrt(se1), 3), min1, max1],
[name2, n2, round(m2, 3), round(math.sqrt(se2), 3), min2, max2]]
if not isinstance(fname, str) or len(fname) == 0:
print()
print(statname)
print()
pstat.printcc(lofl)
print()
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except Exception:
pass
print('Test statistic = ', round(stat, 3), ' p = ', round(prob, 3), suffix)
print()
else:
file = open(fname, writemode)
file.write('\n'+statname+'\n\n')
file.close()
writecc(lofl, fname, 'a')
file = open(fname, 'a')
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except Exception:
pass
file.write(pstat.list2string(['\nTest statistic = ', round(stat, 4), ' p = ', round(prob, 4), suffix, '\n\n']))
file.close()
return None
def lfindwithin(data):
"""
Returns an integer representing a binary vector, where 1=within-
subject factor, 0=between. Input equals the entire data 2D list (i.e.,
column 0=random factor, column -1=measured values (those two are skipped).
Note: input data is in |Stat format ... a list of lists ("2D list") with
one row per measured value, first column=subject identifier, last column=
score, one in-between column per factor (these columns contain level
designations on each factor). See also stats.anova.__doc__.
Usage: lfindwithin(data) data in |Stat format
"""
numfact = len(data[0])-1
withinvec = 0
for col in range(1, numfact):
examplelevel = pstat.unique(pstat.colex(data, col))[0]
rows = pstat.linexand(data, col, examplelevel) # get 1 level of this factor
factsubjs = pstat.unique(pstat.colex(rows, 0))
allsubjs = pstat.unique(pstat.colex(data, 0))
if len(factsubjs) == len(allsubjs): # fewer Ss than scores on this factor?
withinvec = withinvec + (1 << col)
return withinvec
# DISPATCH LISTS AND TUPLES TO ABOVE FCNS
# CENTRAL TENDENCY:
geometricmean = Dispatch((lgeometricmean, (list, tuple)), )
harmonicmean = Dispatch((lharmonicmean, (list, tuple)), )
mean = Dispatch((lmean, (list, tuple)), )
median = Dispatch((lmedian, (list, tuple)), )
medianscore = Dispatch((lmedianscore, (list, tuple)), )
mode = Dispatch((lmode, (list, tuple)), )
# MOMENTS:
moment = Dispatch((lmoment, (list, tuple)), )
variation = Dispatch((lvariation, (list, tuple)), )
skew = Dispatch((lskew, (list, tuple)), )
kurtosis = Dispatch((lkurtosis, (list, tuple)), )
describe = Dispatch((ldescribe, (list, tuple)), )
# FREQUENCY STATISTICS:
itemfreq = Dispatch((litemfreq, (list, tuple)), )
scoreatpercentile = Dispatch((lscoreatpercentile, (list, tuple)), )
percentileofscore = Dispatch((lpercentileofscore, (list, tuple)), )
histogram = Dispatch((lhistogram, (list, tuple)), )
cumfreq = Dispatch((lcumfreq, (list, tuple)), )
relfreq = Dispatch((lrelfreq, (list, tuple)), )
# VARIABILITY:
obrientransform = Dispatch((lobrientransform, (list, tuple)), )
samplevar = Dispatch((lsamplevar, (list, tuple)), )
samplestdev = Dispatch((lsamplestdev, (list, tuple)), )
var = Dispatch((lvar, (list, tuple)), )
stdev = Dispatch((lstdev, (list, tuple)), )
sterr = Dispatch((lsterr, (list, tuple)), )
sem = Dispatch((lsem, (list, tuple)), )
z = Dispatch((lz, (list, tuple)), )
zs = Dispatch((lzs, (list, tuple)), )
# TRIMMING FCNS:
trimboth = Dispatch((ltrimboth, (list, tuple)), )
trim1 = Dispatch((ltrim1, (list, tuple)), )
# CORRELATION FCNS:
paired = Dispatch((lpaired, (list, tuple)), )
pearsonr = Dispatch((lpearsonr, (list, tuple)), )
spearmanr = Dispatch((lspearmanr, (list, tuple)), )
pointbiserialr = Dispatch((lpointbiserialr, (list, tuple)), )
kendalltau = Dispatch((lkendalltau, (list, tuple)), )
linregress = Dispatch((llinregress, (list, tuple)), )
# INFERENTIAL STATS:
ttest_1samp = Dispatch((lttest_1samp, (list, tuple)), )
ttest_ind = Dispatch((lttest_ind, (list, tuple)), )
ttest_rel = Dispatch((lttest_rel, (list, tuple)), )
chisquare = Dispatch((lchisquare, (list, tuple)), )
ks_2samp = Dispatch((lks_2samp, (list, tuple)), )
mannwhitneyu = Dispatch((lmannwhitneyu, (list, tuple)), )
ranksums = Dispatch((lranksums, (list, tuple)), )
tiecorrect = Dispatch((ltiecorrect, (list, tuple)), )
wilcoxont = Dispatch((lwilcoxont, (list, tuple)), )
kruskalwallish = Dispatch((lkruskalwallish, (list, tuple)), )
friedmanchisquare = Dispatch((lfriedmanchisquare, (list, tuple)), )
# PROBABILITY CALCS:
chisqprob = Dispatch((lchisqprob, (int, float)), )
zprob = Dispatch((lzprob, (int, float)), )
ksprob = Dispatch((lksprob, (int, float)), )
fprob = Dispatch((lfprob, (int, float)), )
betacf = Dispatch((lbetacf, (int, float)), )
betai = Dispatch((lbetai, (int, float)), )
erfcc = Dispatch((lerfcc, (int, float)), )
gammln = Dispatch((lgammln, (int, float)), )
# ANOVA FUNCTIONS:
F_oneway = Dispatch((lF_oneway, (list, tuple)), )
F_value = Dispatch((lF_value, (list, tuple)), )
# SUPPORT FUNCTIONS:
incr = Dispatch((lincr, (list, tuple)), )
sum = Dispatch((lsum, (list, tuple)), )
cumsum = Dispatch((lcumsum, (list, tuple)), )
ss = Dispatch((lss, (list, tuple)), )
summult = Dispatch((lsummult, (list, tuple)), )
square_of_sums = Dispatch((lsquare_of_sums, (list, tuple)), )
sumdiffsquared = Dispatch((lsumdiffsquared, (list, tuple)), )
shellsort = Dispatch((lshellsort, (list, tuple)), )
rankdata = Dispatch((lrankdata, (list, tuple)), )
findwithin = Dispatch((lfindwithin, (list, tuple)), )
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
# ============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
try: # DEFINE THESE *ONLY* IF NUMERIC IS AVAILABLE
import Numeric
N = Numeric
import LinearAlgebra
LA = LinearAlgebra
# ACENTRAL TENDENCY
def ageometricmean(inarray, dimension=None, keepdims=0):
"""
Calculates the geometric mean of the values in the passed array.
That is: n-th root of (x1 * x2 * ... * xn). Defaults to ALL values in
the passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: ageometricmean(inarray,dimension=None,keepdims=0)
Returns: geometric mean computed over dim(s) listed in dimension
"""
inarray = N.array(inarray, N.Float)
if dimension is None:
inarray = N.ravel(inarray)
size = len(inarray)
mult = N.power(inarray, 1.0/size)
mult = N.multiply.reduce(mult)
elif type(dimension) in [int, float]:
size = inarray.shape[dimension]
mult = N.power(inarray, 1.0/size)
mult = N.multiply.reduce(mult, dimension)
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
N.reshape(sum, shp)
else: # must be a SEQUENCE of dims to average over
dims = sorted(dimension)
dims.reverse()
size = N.array(N.multiply.reduce(N.take(inarray.shape, dims)), N.Float)
mult = N.power(inarray, 1.0/size)
for dim in dims:
mult = N.multiply.reduce(mult, dim)
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
mult = N.reshape(mult, shp)
return mult
def aharmonicmean(inarray, dimension=None, keepdims=0):
"""
Calculates the harmonic mean of the values in the passed array.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Defaults to ALL values in
the passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: aharmonicmean(inarray,dimension=None,keepdims=0)
Returns: harmonic mean computed over dim(s) in dimension
"""
inarray = inarray.astype(N.Float)
if dimension is None:
inarray = N.ravel(inarray)
size = len(inarray)
s = N.add.reduce(1.0 / inarray)
elif type(dimension) in [int, float]:
size = float(inarray.shape[dimension])
s = N.add.reduce(1.0/inarray, dimension)
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
s = N.reshape(s, shp)
else: # must be a SEQUENCE of dims to average over
dims = sorted(dimension)
nondims = []
for i in range(len(inarray.shape)):
if i not in dims:
nondims.append(i)
tinarray = N.transpose(inarray, nondims+dims) # put keep-dims first
idx = [0] * len(nondims)
if idx == []:
size = len(N.ravel(inarray))
s = asum(1.0 / inarray)
if keepdims == 1:
s = N.reshape([s], N.ones(len(inarray.shape)))
else:
idx[0] = -1
loopcap = N.array(tinarray.shape[0:len(nondims)]) - 1
s = N.zeros(loopcap+1, N.Float)
while incr(idx, loopcap) != -1:
s[idx] = asum(1.0/tinarray[idx])
size = N.multiply.reduce(N.take(inarray.shape, dims))
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
s = N.reshape(s, shp)
return size / s
def amean(inarray, dimension=None, keepdims=0):
"""
Calculates the arithmatic mean of the values in the passed array.
That is: 1/n * (x1 + x2 + ... + xn). Defaults to ALL values in the
passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: amean(inarray,dimension=None,keepdims=0)
Returns: arithematic mean calculated over dim(s) in dimension
"""
if inarray.typecode() in ['l', 's', 'b']:
inarray = inarray.astype(N.Float)
if dimension is None:
inarray = N.ravel(inarray)
sum = N.add.reduce(inarray)
denom = float(len(inarray))
elif type(dimension) in [int, float]:
sum = asum(inarray, dimension)
denom = float(inarray.shape[dimension])
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
sum = N.reshape(sum, shp)
else: # must be a TUPLE of dims to average over
dims = sorted(dimension)
dims.reverse()
sum = inarray * 1.0
for dim in dims:
sum = N.add.reduce(sum, dim)
denom = N.array(N.multiply.reduce(N.take(inarray.shape, dims)), N.Float)
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
sum = N.reshape(sum, shp)
return sum/denom
def amedian(inarray, numbins=1000):
"""
Calculates the COMPUTED median value of an array of numbers, given the
number of bins to use for the histogram (more bins approaches finding the
precise median value of the array; default number of bins = 1000). From
G.W. Heiman's Basic Stats, or CRC Probability & Statistics.
NOTE: THIS ROUTINE ALWAYS uses the entire passed array (flattens it first).
Usage: amedian(inarray,numbins=1000)
Returns: median calculated over ALL values in inarray
"""
inarray = N.ravel(inarray)
(hist, smallest, binsize, extras) = ahistogram(inarray, numbins)
cumhist = N.cumsum(hist) # make cumulative histogram
otherbins = N.greater_equal(cumhist, len(inarray)/2.0)
otherbins = list(otherbins) # list of 0/1s, 1s start at median bin
cfbin = otherbins.index(1) # get 1st(!) index holding 50%ile score
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = N.add.reduce(hist[0:cfbin]) # cum. freq. below bin
freq = hist[cfbin] # frequency IN the 50%ile bin
median = LRL + ((len(inarray)/2.0-cfbelow)/float(freq))*binsize # MEDIAN
return median
def amedianscore(inarray, dimension=None):
"""
Returns the 'middle' score of the passed array. If there is an even
number of scores, the mean of the 2 middle scores is returned. Can function
with 1D arrays, or on the FIRST dimension of 2D arrays (i.e., dimension can
be None, to pre-flatten the array, or else dimension must equal 0).
Usage: amedianscore(inarray,dimension=None)
Returns: 'middle' score of the array, or the mean of the 2 middle scores
"""
if dimension is None:
inarray = N.ravel(inarray)
dimension = 0
inarray = N.sort(inarray, dimension)
if inarray.shape[dimension] % 2 == 0: # if even number of elements
indx = inarray.shape[dimension]/2 # integer division correct
median = N.asarray(inarray[indx]+inarray[indx-1]) / 2.0
else:
indx = inarray.shape[dimension] / 2 # integer division correct
median = N.take(inarray, [indx], dimension)
if median.shape == (1,):
median = median[0]
return median
def amode(a, dimension=None):
"""
Returns an array of the modal (most common) score in the passed array.
If there is more than one such score, ONLY THE FIRST is returned.
The bin-count for the modal values is also returned. Operates on whole
array (dimension=None), or on a given dimension.
Usage: amode(a, dimension=None)
Returns: array of bin-counts for mode(s), array of corresponding modal values
"""
if dimension is None:
a = N.ravel(a)
dimension = 0
scores = pstat.aunique(N.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[dimension] = 1
oldmostfreq = N.zeros(testshape)
oldcounts = N.zeros(testshape)
for score in scores:
template = N.equal(a, score)
counts = asum(template, dimension, 1)
mostfrequent = N.where(N.greater(counts, oldcounts), score, oldmostfreq)
oldcounts = N.where(N.greater(counts, oldcounts), counts, oldcounts)
oldmostfreq = mostfrequent
return oldcounts, mostfrequent
def atmean(a, limits=None, inclusive=(1, 1)):
"""
Returns the arithmetic mean of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
inclusive list/tuple determines whether the lower and upper limiting bounds
(respectively) are open/exclusive (0) or closed/inclusive (1).
Usage: atmean(a,limits=None,inclusive=(1,1))
"""
if a.typecode() in ['l', 's', 'b']:
a = a.astype(N.Float)
if limits is None:
return mean(a)
assert type(limits) in [list, tuple, N.ArrayType], "Wrong type for limits in atmean"
if inclusive[0]:
lowerfcn = N.greater_equal
else:
lowerfcn = N.greater
if inclusive[1]:
upperfcn = N.less_equal
else:
upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError("No array values within given limits (atmean).")
elif limits[0] is None and limits[1] is not None:
mask = upperfcn(a, limits[1])
elif limits[0] is not None and limits[1] is None:
mask = lowerfcn(a, limits[0])
elif limits[0] is not None and limits[1] is not None:
mask = lowerfcn(a, limits[0])*upperfcn(a, limits[1])
s = float(N.add.reduce(N.ravel(a*mask)))
n = float(N.add.reduce(N.ravel(mask)))
return s/n
def atvar(a, limits=None, inclusive=(1, 1)):
"""
Returns the sample variance of values in an array, (i.e., using N-1),
ignoring values strictly outside the sequence passed to 'limits'.
Note: either limit in the sequence, or the value of limits itself,
can be set to None. The inclusive list/tuple determines whether the lower
and upper limiting bounds (respectively) are open/exclusive (0) or
closed/inclusive (1).
Usage: atvar(a,limits=None,inclusive=(1,1))
"""
a = a.astype(N.Float)
if limits is None or limits == [None, None]:
term1 = N.add.reduce(N.ravel(a*a))
n = float(len(N.ravel(a))) - 1
term2 = N.add.reduce(N.ravel(a))**2 / n
print(term1, term2, n)
return (term1 - term2) / n
assert type(limits) in [list, tuple, N.ArrayType], "Wrong type for limits in atvar"
if inclusive[0]:
lowerfcn = N.greater_equal
else:
lowerfcn = N.greater
if inclusive[1]:
upperfcn = N.less_equal
else:
upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError("No array values within given limits (atvar).")
elif limits[0] is None and limits[1] is not None:
mask = upperfcn(a, limits[1])
elif limits[0] is not None and limits[1] is None:
mask = lowerfcn(a, limits[0])
elif limits[0] is not None and limits[1] is not None:
mask = lowerfcn(a, limits[0])*upperfcn(a, limits[1])
term1 = N.add.reduce(N.ravel(a*a*mask))
n = float(N.add.reduce(N.ravel(mask))) - 1
term2 = N.add.reduce(N.ravel(a*mask))**2 / n
print(term1, term2, n)
return (term1 - term2) / n
def atmin(a, lowerlimit=None, dimension=None, inclusive=1):
"""
Returns the minimum value of a, along dimension, including only values less
than (or equal to, if inclusive=1) lowerlimit. If the limit is set to None,
all values in the array are used.
Usage: atmin(a,lowerlimit=None,dimension=None,inclusive=1)
"""
if inclusive:
lowerfcn = N.greater
else:
lowerfcn = N.greater_equal
if dimension is None:
a = N.ravel(a)
dimension = 0
if lowerlimit is None:
lowerlimit = N.minimum.reduce(N.ravel(a))-11
biggest = N.maximum.reduce(N.ravel(a))
ta = N.where(lowerfcn(a, lowerlimit), a, biggest)
return N.minimum.reduce(ta, dimension)
def atmax(a, upperlimit, dimension=None, inclusive=1):
"""
Returns the maximum value of a, along dimension, including only values greater
than (or equal to, if inclusive=1) upperlimit. If the limit is set to None,
a limit larger than the max value in the array is used.
Usage: atmax(a,upperlimit,dimension=None,inclusive=1)
"""
if inclusive:
upperfcn = N.less
else:
upperfcn = N.less_equal
if dimension is None:
a = N.ravel(a)
dimension = 0
if upperlimit is None:
upperlimit = N.maximum.reduce(N.ravel(a))+1
smallest = N.minimum.reduce(N.ravel(a))
ta = N.where(upperfcn(a, upperlimit), a, smallest)
return N.maximum.reduce(ta, dimension)
def atstdev(a, limits=None, inclusive=(1, 1)):
"""
Returns the standard deviation of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
inclusive list/tuple determines whether the lower and upper limiting bounds
(respectively) are open/exclusive (0) or closed/inclusive (1).
Usage: atstdev(a,limits=None,inclusive=(1,1))
"""
return N.sqrt(tvar(a, limits, inclusive))
def atsem(a, limits=None, inclusive=(1, 1)):
"""
Returns the standard error of the mean for the values in an array,
(i.e., using N for the denominator), ignoring values strictly outside
the sequence passed to 'limits'. Note: either limit in the sequence,
or the value of limits itself, can be set to None. The inclusive list/tuple
determines whether the lower and upper limiting bounds (respectively) are
open/exclusive (0) or closed/inclusive (1).
Usage: atsem(a,limits=None,inclusive=(1,1))
"""
sd = tstdev(a, limits, inclusive)
if limits is None or limits == [None, None]:
n = float(len(N.ravel(a)))
assert type(limits) in [list, tuple, N.ArrayType], "Wrong type for limits in atsem"
if inclusive[0]:
lowerfcn = N.greater_equal
else:
lowerfcn = N.greater
if inclusive[1]:
upperfcn = N.less_equal
else:
upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError("No array values within given limits (atsem).")
elif limits[0] is None and limits[1] is not None:
mask = upperfcn(a, limits[1])
elif limits[0] is not None and limits[1] is None:
mask = lowerfcn(a, limits[0])
elif limits[0] is not None and limits[1] is not None:
mask = lowerfcn(a, limits[0])*upperfcn(a, limits[1])
N.add.reduce(N.ravel(a*a*mask))
n = float(N.add.reduce(N.ravel(mask)))
return sd/math.sqrt(n)
# AMOMENTS
def amoment(a, moment=1, dimension=None):
"""
Calculates the nth moment about the mean for a sample (defaults to the
1st moment). Generally used to calculate coefficients of skewness and
kurtosis. Dimension can equal None (ravel array first), an integer
(the dimension over which to operate), or a sequence (operate over
multiple dimensions).
Usage: amoment(a,moment=1,dimension=None)
Returns: appropriate moment along given dimension
"""
if dimension is None:
a = N.ravel(a)
dimension = 0
if moment == 1:
return 0.0
else:
mn = amean(a, dimension, 1) # 1=keepdims
s = N.power((a-mn), moment)
return amean(s, dimension)
def avariation(a, dimension=None):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: avariation(a,dimension=None)
"""
return 100.0*asamplestdev(a, dimension)/amean(a, dimension)
def askew(a, dimension=None):
"""
Returns the skewness of a distribution (normal ==> 0.0; >0 means extra
weight in left tail). Use askewtest() to see if it's close enough.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions).
Usage: askew(a, dimension=None)
Returns: skew of vals in a along dimension, returning ZERO where all vals equal
"""
denom = N.power(amoment(a, 2, dimension), 1.5)
zero = N.equal(denom, 0)
if isinstance(denom, N.ArrayType) and asum(zero) != 0:
print("Number of zeros in askew: ", asum(zero))
denom = denom + zero # prevent divide-by-zero
return N.where(zero, 0, amoment(a, 3, dimension)/denom)
def akurtosis(a, dimension=None):
"""
Returns the kurtosis of a distribution (normal ==> 3.0; >3 means
heavier in the tails, and usually more peaked). Use akurtosistest()
to see if it's close enough. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: akurtosis(a,dimension=None)
Returns: kurtosis of values in a along dimension, and ZERO where all vals equal
"""
denom = N.power(amoment(a, 2, dimension), 2)
zero = N.equal(denom, 0)
if isinstance(denom, N.ArrayType) and asum(zero) != 0:
print("Number of zeros in akurtosis: ", asum(zero))
denom = denom + zero # prevent divide-by-zero
return N.where(zero, 0, amoment(a, 4, dimension)/denom)
def adescribe(inarray, dimension=None):
"""
Returns several descriptive statistics of the passed array. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
Usage: adescribe(inarray,dimension=None)
Returns: n, (min,max), mean, standard deviation, skew, kurtosis
"""
if dimension is None:
inarray = N.ravel(inarray)
dimension = 0
n = inarray.shape[dimension]
mm = (N.minimum.reduce(inarray), N.maximum.reduce(inarray))
m = amean(inarray, dimension)
sd = astdev(inarray, dimension)
skew = askew(inarray, dimension)
kurt = akurtosis(inarray, dimension)
return n, mm, m, sd, skew, kurt
# NORMALITY TESTS
def askewtest(a, dimension=None):
"""
Tests whether the skew is significantly different from a normal
distribution. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions).
Usage: askewtest(a,dimension=None)
Returns: z-score and 2-tail z-probability
"""
if dimension is None:
a = N.ravel(a)
dimension = 0
b2 = askew(a, dimension)
n = float(a.shape[dimension])
y = b2 * N.sqrt(((n+1)*(n+3)) / (6.0*(n-2)))
beta2 = (3.0*(n*n+27*n-70)*(n+1)*(n+3)) / ((n-2.0)*(n+5)*(n+7)*(n+9))
W2 = -1 + N.sqrt(2*(beta2-1))
delta = 1/N.sqrt(N.log(N.sqrt(W2)))
alpha = N.sqrt(2/(W2-1))
y = N.where(N.equal(y, 0), 1, y)
Z = delta*N.log(y/alpha + N.sqrt((y/alpha)**2+1))
return Z, (1.0-zprob(Z))*2
def akurtosistest(a, dimension=None):
"""
Tests whether a dataset has normal kurtosis (i.e.,
kurtosis=3(n-1)/(n+1)) Valid only for n>20. Dimension can equal None
(ravel array first), an integer (the dimension over which to operate),
or a sequence (operate over multiple dimensions).
Usage: akurtosistest(a,dimension=None)
Returns: z-score and 2-tail z-probability, returns 0 for bad pixels
"""
if dimension is None:
a = N.ravel(a)
dimension = 0
n = float(a.shape[dimension])
if n < 20:
print("akurtosistest only valid for n>=20 ... continuing anyway, n=", n)
b2 = akurtosis(a, dimension)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5))
x = (b2-E)/N.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * N.sqrt((6.0*(n+3)*(n+5))
/ (n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + N.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*N.sqrt(2/(A-4.0))
denom = N.where(N.less(denom, 0), 99, denom)
term2 = N.where(N.equal(denom, 0), term1, N.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / N.sqrt(2/(9.0*A))
Z = N.where(N.equal(denom, 99), 0, Z)
return Z, (1.0-zprob(Z))*2
def anormaltest(a, dimension=None):
"""
Tests whether skew and/OR kurtosis of dataset differs from normal
curve. Can operate over multiple dimensions. Dimension can equal
None (ravel array first), an integer (the dimension over which to
operate), or a sequence (operate over multiple dimensions).
Usage: anormaltest(a,dimension=None)
Returns: z-score and 2-tail probability
"""
if dimension is None:
a = N.ravel(a)
dimension = 0
s, p = askewtest(a, dimension)
k, p = akurtosistest(a, dimension)
k2 = N.power(s, 2) + N.power(k, 2)
return k2, achisqprob(k2, 2)
# AFREQUENCY FUNCTIONS
def aitemfreq(a):
"""
Returns a 2D array of item frequencies. Column 1 contains item values,
column 2 contains their respective counts. Assumes a 1D array is passed.
Usage: aitemfreq(a)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = pstat.aunique(a)
scores = N.sort(scores)
freq = N.zeros(len(scores))
for i in range(len(scores)):
freq[i] = N.add.reduce(N.equal(a, scores[i]))
return N.array(pstat.aabut(scores, freq))
def ascoreatpercentile(inarray, percent):
"""
Usage: ascoreatpercentile(inarray,percent) 0<percent<100
Returns: score at given percentile, relative to inarray distribution
"""
percent = percent / 100.0
targetcf = percent*len(inarray)
h, lrl, binsize, extras = histogram(inarray)
cumhist = cumsum(h*1)
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def apercentileofscore(inarray, score, histbins=10, defaultlimits=None):
"""
Note: result of this function depends on the values used to histogram
the data(!).
Usage: apercentileofscore(inarray,score,histbins=10,defaultlimits=None)
Returns: percentile-position of score (0-100) relative to inarray
"""
h, lrl, binsize, extras = histogram(inarray, histbins, defaultlimits)
cumhist = cumsum(h*1)
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inarray)) * 100
return pct
def ahistogram(inarray, numbins=10, defaultlimits=None, printextras=1):
"""
Returns (i) an array of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. Defaultlimits
can be None (the routine picks bins spanning all the numbers in the
inarray) or a 2-sequence (lowerlimit, upperlimit). Returns all of the
following: array of bin values, lowerreallimit, binsize, extrapoints.
Usage: ahistogram(inarray,numbins=10,defaultlimits=None,printextras=1)
Returns: (array of bin counts, bin-minimum, min-width, #-points-outside-range)
"""
inarray = N.ravel(inarray) # flatten any >1D arrays
if (defaultlimits is not None):
lowerreallimit = defaultlimits[0]
upperreallimit = defaultlimits[1]
binsize = (upperreallimit-lowerreallimit) / float(numbins)
else:
Min = N.minimum.reduce(inarray)
Max = N.maximum.reduce(inarray)
estbinwidth = float(Max - Min)/float(numbins) + 1
binsize = (Max-Min+estbinwidth)/float(numbins)
lowerreallimit = Min - binsize/2.0 # lower real limit,1st bin
bins = N.zeros(numbins)
extrapoints = 0
for num in inarray:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit) / float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except Exception: # point outside lower/upper limits
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print('\nPoints outside given histogram range =', extrapoints)
return (bins, lowerreallimit, binsize, extrapoints)
def acumfreq(a, numbins=10, defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
lower and upper limits on values to include.
Usage: acumfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h, l, b, e = histogram(a, numbins, defaultreallimits)
cumhist = cumsum(h*1)
return cumhist, l, b, e
def arelfreq(a, numbins=10, defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
lower and upper limits on values to include.
Usage: arelfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h, l, b, e = histogram(a, numbins, defaultreallimits)
h = N.array(h/float(a.shape[0]))
return h, l, b, e
# AVARIABILITY FUNCTIONS
def aobrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
array in *args is one level of a factor. If an F_oneway() run on the
transformed data and found significant, variances are unequal. From
Maxwell and Delaney, p.112.
Usage: aobrientransform(*args) *args = 1D arrays, one per level of factor
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = N.zeros(k, N.Float)
v = N.zeros(k, N.Float)
m = N.zeros(k, N.Float)
nargs = []
for i in range(k):
nargs.append(args[i].astype(N.Float))
n[i] = float(len(nargs[i]))
v[i] = var(nargs[i])
m[i] = mean(nargs[i])
for j in range(k):
for i in range(n[j]):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - mean(nargs[j]) > TINY:
check = 0
if check != 1:
raise ValueError('Lack of convergence in obrientransform.')
else:
return N.array(nargs)
def asamplevar(inarray, dimension=None, keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
an integer (the dimension over which to operate), or a sequence
(operate over multiple dimensions). Set keepdims=1 to return an array
with the same number of dimensions as inarray.
Usage: asamplevar(inarray,dimension=None,keepdims=0)
"""
if dimension is None:
inarray = N.ravel(inarray)
dimension = 0
if dimension == 1:
mn = amean(inarray, dimension)[:, N.NewAxis]
else:
mn = amean(inarray, dimension, keepdims=1)
deviations = inarray - mn
if isinstance(dimension, list):
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
svar = ass(deviations, dimension, keepdims) / float(n)
return svar
def asamplestdev(inarray, dimension=None, keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
an integer (the dimension over which to operate), or a sequence
(operate over multiple dimensions). Set keepdims=1 to return an array
with the same number of dimensions as inarray.
Usage: asamplestdev(inarray,dimension=None,keepdims=0)
"""
return N.sqrt(asamplevar(inarray, dimension, keepdims))
def asignaltonoise(instack, dimension=0):
"""
Calculates signal-to-noise. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: asignaltonoise(instack,dimension=0):
Returns: array containing the value of (mean/stdev) along dimension,
or 0 when stdev=0
"""
m = mean(instack, dimension)
sd = stdev(instack, dimension)
return N.where(N.equal(sd, 0), 0, m/sd)
def avar(inarray, dimension=None, keepdims=0):
"""
Returns the estimated population variance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: avar(inarray,dimension=None,keepdims=0)
"""
if dimension is None:
inarray = N.ravel(inarray)
dimension = 0
mn = amean(inarray, dimension, 1)
deviations = inarray - mn
if isinstance(dimension, list):
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
var = ass(deviations, dimension, keepdims)/float(n-1)
return var
def astdev(inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard deviation of the values in
the passed array (i.e., N-1). Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions). Set keepdims=1 to return
an array with the same number of dimensions as inarray.
Usage: astdev(inarray,dimension=None,keepdims=0)
"""
return N.sqrt(avar(inarray, dimension, keepdims))
def asterr(inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard error of the values in the
passed array (i.e., N-1). Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions). Set keepdims=1 to return
an array with the same number of dimensions as inarray.
Usage: asterr(inarray,dimension=None,keepdims=0)
"""
if dimension is None:
inarray = N.ravel(inarray)
dimension = 0
return astdev(inarray, dimension, keepdims) / float(N.sqrt(inarray.shape[dimension]))
def asem(inarray, dimension=None, keepdims=0):
"""
Returns the standard error of the mean (i.e., using N) of the values
in the passed array. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: asem(inarray,dimension=None, keepdims=0)
"""
if dimension is None:
inarray = N.ravel(inarray)
dimension = 0
if isinstance(dimension, list):
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
s = asamplestdev(inarray, dimension, keepdims) / N.sqrt(n-1)
return s
def az(a, score):
"""
Returns the z-score of a given input score, given thearray from which
that score came. Not appropriate for population calculations, nor for
arrays > 1D.
Usage: az(a, score)
"""
z = (score-amean(a)) / asamplestdev(a)
return z
def azs(a):
"""
Returns a 1D array of z-scores, one for each score in the passed array,
computed relative to the passed array.
Usage: azs(a)
"""
zscores = []
for item in a:
zscores.append(z(a, item))
return N.array(zscores)
def azmap(scores, compare, dimension=0):
"""
Returns an array of z-scores the shape of scores (e.g., [x,y]), compared to
array passed to compare (e.g., [time,x,y]). Assumes collapsing over dim 0
of the compare array.
Usage: azs(scores, compare, dimension=0)
"""
mns = amean(compare, dimension)
sstd = asamplestdev(compare, 0)
return (scores - mns) / sstd
# ATRIMMING FUNCTIONS
def around(a, digits=1):
"""
Rounds all values in array a to 'digits' decimal places.
Usage: around(a,digits)
Returns: a, where each value is rounded to 'digits' decimals
"""
def ar(x, d=digits):
return round(x, d)
if not isinstance(a, N.ArrayType):
try:
a = N.array(a)
except Exception:
a = N.array(a, 'O')
shp = a.shape
if a.typecode() in ['f', 'F', 'd', 'D']:
b = N.ravel(a)
b = N.array([ar(_) for _ in b])
b.shape = shp
elif a.typecode() in ['o', 'O']:
b = N.ravel(a)*1
for i in range(len(b)):
if isinstance(b[i], float):
b[i] = round(b[i], digits)
b.shape = shp
else: # not a float, double or Object array
b = a*1
return b
def athreshold(a, threshmin=None, threshmax=None, newval=0):
"""
Like Numeric.clip() except that values <threshmid or >threshmax are replaced
by newval instead of by threshmin/threshmax (respectively).
Usage: athreshold(a,threshmin=None,threshmax=None,newval=0)
Returns: a, with values <threshmin or >threshmax replaced with newval
"""
mask = N.zeros(a.shape)
if threshmin is not None:
mask = mask + N.where(N.less(a, threshmin), 1, 0)
if threshmax is not None:
mask = mask + N.where(N.greater(a, threshmax), 1, 0)
mask = N.clip(mask, 0, 1)
return N.where(mask, newval, a)
def atrimboth(a, proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
array (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND
'rightmost' 10% of scores. You must pre-sort the array if you want
"proper" trimming. Slices off LESS if proportion results in a
non-integer slice index (i.e., conservatively slices off
proportiontocut).
Usage: atrimboth (a,proportiontocut)
Returns: trimmed version of array a
"""
lowercut = int(proportiontocut*len(a))
uppercut = len(a) - lowercut
return a[lowercut:uppercut]
def atrim1(a, proportiontocut, tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
array (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off proportiontocut).
Usage: atrim1(a,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of array a
"""
if string.lower(tail) == 'right':
lowercut = 0
uppercut = len(a) - int(proportiontocut*len(a))
elif string.lower(tail) == 'left':
lowercut = int(proportiontocut*len(a))
uppercut = len(a)
return a[lowercut:uppercut]
# ACORRELATION FUNCTIONS
def acovariance(X):
"""
Computes the covariance matrix of a matrix X. Requires a 2D matrix input.
Usage: acovariance(X)
Returns: covariance matrix of X
"""
if len(X.shape) != 2:
raise TypeError("acovariance requires 2D matrices")
n = X.shape[0]
mX = amean(X, 0)
return N.dot(N.transpose(X), X) / float(n) - N.multiply.outer(mX, mX)
def acorrelation(X):
"""
Computes the correlation matrix of a matrix X. Requires a 2D matrix input.
Usage: acorrelation(X)
Returns: correlation matrix of X
"""
C = acovariance(X)
V = N.diagonal(C)
return C / N.sqrt(N.multiply.outer(V, V))
def apaired(x, y):
"""
Interactively determines the type of data in x and y, and then runs the
appropriated statistic for paired group data.
Usage: apaired(x,y) x,y = the two arrays of values to be compared
Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i', 'r', 'I', 'R', 'c', 'C']:
print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ')
samples = input()
if samples in ['i', 'I', 'r', 'R']:
print('\nComparing variances ...', end=' ')
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x, y)
f, p = F_oneway(pstat.colex(r, 0), pstat.colex(r, 1))
if p < 0.05:
vartype = 'unequal, p='+str(round(p, 4))
else:
vartype = 'equal'
print(vartype)
if samples in ['i', 'I']:
if vartype[0] == 'e':
t, p = ttest_ind(x, y, None, 0)
print('\nIndependent samples t-test: ', round(t, 4), round(p, 4))
else:
if len(x) > 20 or len(y) > 20:
z, p = ranksums(x, y)
print('\nRank Sums test (NONparametric, n>20): ', round(z, 4), round(p, 4))
else:
u, p = mannwhitneyu(x, y)
print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u, 4), round(p, 4))
else: # RELATED SAMPLES
if vartype[0] == 'e':
t, p = ttest_rel(x, y, 0)
print('\nRelated samples t-test: ', round(t, 4), round(p, 4))
else:
t, p = ranksums(x, y)
print('\nWilcoxon T-test (NONparametric): ', round(t, 4), round(p, 4))
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c', 'C', 'r', 'R', 'd', 'D']:
print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ')
corrtype = input()
if corrtype in ['c', 'C']:
m, b, r, p, see = linregress(x, y)
print('\nLinear regression for continuous variables ...')
lol = [['Slope', 'Intercept', 'r', 'Prob', 'SEestimate'], [round(m, 4), round(b, 4), round(r, 4), round(p, 4), round(see, 4)]]
pstat.printcc(lol)
elif corrtype in ['r', 'R']:
r, p = spearmanr(x, y)
print('\nCorrelation for ranked variables ...')
print("Spearman's r: ", round(r, 4), round(p, 4))
else: # DICHOTOMOUS
r, p = pointbiserialr(x, y)
print('\nAssuming x contains a dichotomous variable ...')
print('Point Biserial r: ', round(r, 4), round(p, 4))
print('\n\n')
return None
def apearsonr(x, y, verbose=1):
"""
Calculates a Pearson correlation coefficient and returns p. Taken
from Heiman's Basic Statistics for the Behav. Sci (2nd), p.195.
Usage: apearsonr(x,y,verbose=1) where x,y are equal length arrays
Returns: Pearson's r, two-tailed p-value
"""
TINY = 1.0e-20
n = len(x)
r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
r = (r_num / r_den)
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df, 0.5, df/(df+t*t), verbose)
return r, prob
def aspearmanr(x, y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: aspearmanr(x,y) where x,y are equal-length arrays
Returns: Spearman's r, two-tailed p-value
"""
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = N.add.reduce((rankx-ranky)**2)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = abetai(0.5*df, 0.5, df/(df+t*t))
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They close to tables, but not exact.(?)
return rs, probrs
def apointbiserialr(x, y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
Usage: apointbiserialr(x,y) where x,y are equal length arrays
Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
categories = pstat.aunique(x)
data = pstat.aabut(x, y)
if len(categories) != 2:
raise ValueError("Exactly 2 categories required (in x) for pointbiserialr().")
else: # there are 2 categories, continue
codemap = pstat.aabut(categories, N.arange(2))
pstat.arecode(data, codemap, 0) # recoded
x = pstat.alinexand(data, 0, categories[0])
y = pstat.alinexand(data, 0, categories[1])
xmean = amean(pstat.acolex(x, 1))
ymean = amean(pstat.acolex(y, 1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/asamplestdev(pstat.acolex(data, 1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = abetai(0.5*df, 0.5, df/(df+t*t))
return rpb, prob
def akendalltau(x, y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-cases.@@@
Usage: akendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j, len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither array has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss - 1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
def alinregress(*args):
"""
Calculates a regression line on two arrays, x and y, corresponding to x,y
pairs. If a single 2D array is passed, alinregress finds dim with 2 levels
and splits data into x,y pairs along that dim.
Usage: alinregress(*args) args=2 equal-length arrays, or one 2D array
Returns: slope, intercept, r, two-tailed prob, sterr-of-the-estimate
"""
TINY = 1.0e-20
if len(args) == 1: # more than 1D array?
args = args[0]
if len(args) == 2:
x = args[0]
y = args[1]
else:
x = args[:, 0]
y = args[:, 1]
else:
x = args[0]
y = args[1]
n = len(x)
xmean = amean(x)
ymean = amean(y)
r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
r = r_num / r_den
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df, 0.5, df/(df+t*t))
slope = r_num / (float(n)*ass(x) - asquare_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*asamplestdev(y)
return slope, intercept, r, prob, sterrest
# AINFERENTIAL STATISTICS
def attest_1samp(a, popmean, printit=0, name='Sample', writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
using the given writemode (default=append). Returns t-value, and prob.
Usage: attest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
if not isinstance(a, N.ArrayType):
a = N.array(a)
x = amean(a)
v = avar(a)
n = len(a)
df = n-1
svar = ((n-1)*v) / float(df)
t = (x-popmean)/math.sqrt(svar*(1.0/n))
prob = abetai(0.5*df, 0.5, df/(df+t*t))
if printit != 0:
statname = 'Single-sample T-test.'
outputpairedstats(printit, writemode,
'Population', '--', popmean, 0, 0, 0,
name, n, x, v, N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
statname, t, prob)
return t, prob
def attest_ind(a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2', writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of scores
a, and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Dimension
can equal None (ravel array first), or an integer (the dimension over
which to operate on a and b).
Usage: attest_ind (a,b,dimension=None,printit=0,
Name1='Samp1',Name2='Samp2',writemode='a')
Returns: t-value, two-tailed p-value
"""
if dimension is None:
a = N.ravel(a)
b = N.ravel(b)
dimension = 0
x1 = amean(a, dimension)
x2 = amean(b, dimension)
v1 = avar(a, dimension)
v2 = avar(b, dimension)
n1 = a.shape[dimension]
n2 = b.shape[dimension]
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2) / float(df)
zerodivproblem = N.equal(svar, 0)
svar = N.where(zerodivproblem, 1, svar) # avoid zero-division in 1st place
t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!
t = N.where(zerodivproblem, 1.0, t) # replace NaN/wrong t-values with 1.0
probs = abetai(0.5*df, 0.5, float(df)/(df+t*t))
if isinstance(t, N.ArrayType):
probs = N.reshape(probs, t.shape)
if len(probs) == 1:
probs = probs[0]
if printit != 0:
if isinstance(t, N.ArrayType):
t = t[0]
if isinstance(probs, N.ArrayType):
probs = probs[0]
statname = 'Independent samples T-test.'
outputpairedstats(printit, writemode,
name1, n1, x1, v1, N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
name2, n2, x2, v2, N.minimum.reduce(N.ravel(b)),
N.maximum.reduce(N.ravel(b)),
statname, t, probs)
return
return t, probs
def attest_rel(a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2', writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores, a
and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Dimension
can equal None (ravel array first), or an integer (the dimension over
which to operate on a and b).
Usage: attest_rel(a,b,dimension=None,printit=0,
name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed p-value
"""
if dimension is None:
a = N.ravel(a)
b = N.ravel(b)
dimension = 0
if len(a) != len(b):
raise ValueError('Unequal length arrays.')
x1 = amean(a, dimension)
x2 = amean(b, dimension)
v1 = avar(a, dimension)
v2 = avar(b, dimension)
n = a.shape[dimension]
df = float(n-1)
d = (a-b).astype('d')
denom = N.sqrt((n*N.add.reduce(d*d, dimension) - N.add.reduce(d, dimension)**2) / df)
zerodivproblem = N.equal(denom, 0)
denom = N.where(zerodivproblem, 1, denom) # avoid zero-division in 1st place
t = N.add.reduce(d, dimension) / denom # N-D COMPUTATION HERE!!!!!!
t = N.where(zerodivproblem, 1.0, t) # replace NaN/wrong t-values with 1.0
probs = abetai(0.5*df, 0.5, float(df)/(df+t*t))
if isinstance(t, N.ArrayType):
probs = N.reshape(probs, t.shape)
if len(probs) == 1:
probs = probs[0]
if printit != 0:
statname = 'Related samples T-test.'
outputpairedstats(printit, writemode,
name1, n, x1, v1, N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
name2, n, x2, v2, N.minimum.reduce(N.ravel(b)),
N.maximum.reduce(N.ravel(b)),
statname, t, probs)
return
return t, probs
def achisquare(f_obs, f_exp=None):
"""
Calculates a one-way chi square for array of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
Usage: achisquare(f_obs, f_exp=None) f_obs = array of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs)
if f_exp is None:
f_exp = N.array([sum(f_obs)/float(k)] * len(f_obs), N.Float)
f_exp = f_exp.astype(N.Float)
chisq = N.add.reduce((f_obs-f_exp)**2 / f_exp)
return chisq, chisqprob(chisq, k-1)
def aks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. Modified from
Numerical Recipies in C, page 493. Returns KS D-value, prob. Not ufunc-
like.
Usage: aks_2samp(data1,data2) where data1 and data2 are 1D arrays
Returns: KS D-value, p-value
"""
j1 = 0 # N.zeros(data1.shape[1:]) TRIED TO MAKE THIS UFUNC-LIKE
j2 = 0 # N.zeros(data2.shape[1:])
fn1 = 0.0 # N.zeros(data1.shape[1:],N.Float)
fn2 = 0.0 # N.zeros(data2.shape[1:],N.Float)
n1 = data1.shape[0]
n2 = data2.shape[0]
en1 = n1*1
en2 = n2*1
d = N.zeros(data1.shape[1:], N.Float)
data1 = N.sort(data1, 0)
data2 = N.sort(data2, 0)
while j1 < n1 and j2 < n2:
d1 = data1[j1]
d2 = data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if abs(dt) > abs(d):
d = dt
try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = aksprob((en+0.12+0.11/en)*N.fabs(d))
except Exception:
prob = 1.0
return d, prob
def amannwhitneyu(x, y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. REMEMBER: Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
Usage: amannwhitneyu(x,y) where x,y are arrays of values for 2 conditions
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
ranked = rankdata(N.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1, u2)
smallu = min(u1, u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - zprob(z)
def atiecorrect(rankvals):
"""
Tie-corrector for ties in Mann Whitney U and Kruskal Wallis H tests.
See Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill. Code adapted from |Stat rankind.c
code.
Usage: atiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted, posn = ashellsort(N.array(rankvals))
n = len(sorted)
T = 0.0
i = 0
while (i < n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i < n-1) and (sorted[i] == sorted[i+1]):
nties = nties + 1
i = i + 1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def aranksums(x, y):
"""
Calculates the rank sums statistic on the provided scores and returns
the result.
Usage: aranksums(x,y) where x,y are arrays of values for 2 conditions
Returns: z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = N.concatenate((x, y))
ranked = arankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 - zprob(abs(z)))
return z, prob
def awilcoxont(x, y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: awilcoxont(x,y) where x,y are equal-length arrays for 2 conditions
Returns: t-statistic, two-tailed p-value
"""
if len(x) != len(y):
raise ValueError('Unequal N in awilcoxont. Aborting.')
d = x-y
d = N.compress(N.not_equal(d, 0), d) # Keep all non-zero differences
count = len(d)
absd = abs(d)
absranked = arankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
if d[i] < 0:
r_minus = r_minus + absranked[i]
else:
r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
mn = count * (count+1) * 0.25
se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = math.fabs(wt-mn) / se
z = math.fabs(wt-mn) / se
prob = 2*(1.0 - zprob(abs(z)))
return wt, prob
def akruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H and associated p-value for 3 or more
independent samples.
Usage: akruskalwallish(*args) args are separate arrays for 3+ conditions
Returns: H-statistic (corrected for ties), associated p-value
"""
assert len(args) == 3, "Need at least 3 groups in stats.akruskalwallish()"
args = list(args)
n = [0]*len(args)
n = [len(_) for _ in args]
all = []
for i in range(len(args)):
all = all + args[i].tolist()
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in akruskalwallish')
h = h / float(T)
return h, chisqprob(h, df)
def afriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for
repeated measures and returns the result, along with the associated
probability value. It assumes 3 or more repeated measures. Only 3
levels requires a minimum of 10 subjects in the study. Four levels
requires 5 subjects per level(??).
Usage: afriedmanchisquare(*args) args are separate arrays for 2+ conditions
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
data = pstat.aabut(*args)
data = data.astype(N.Float)
for i in range(len(data)):
data[i] = arankdata(data[i])
ssbn = asum(asum(args, 1)**2)
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, chisqprob(chisq, k-1)
# APROBABILITY CALCULATIONS
def achisqprob(chisq, df):
"""
Returns the (1-tail) probability value associated with the provided chi-square
value and df. Heavily modified from chisq.c in Gary Perlman's |Stat. Can
handle multiple dimensions.
Usage: achisqprob(chisq,df) chisq=chisquare stat., df=degrees of freedom
"""
BIG = 200.0
def ex(x):
BIG = 200.0
exponents = N.where(N.less(x, -BIG), -BIG, x)
return N.exp(exponents)
if not isinstance(chisq, N.ArrayType):
chisq = N.array([chisq])
if df < 1:
return N.ones(chisq.shape, N.float)
probs = N.zeros(chisq.shape, N.Float)
probs = N.where(N.less_equal(chisq, 0), 1.0, probs) # set prob=1 for chisq<0
a = 0.5 * chisq
if df > 1:
y = ex(-a)
if df % 2 == 0:
even = 1
s = y*1
s2 = s*1
else:
even = 0
s = 2.0 * azprob(-N.sqrt(chisq))
s2 = s*1
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = N.ones(probs.shape, N.Float)
else:
z = 0.5 * N.ones(probs.shape, N.Float)
if even:
e = N.zeros(probs.shape, N.Float)
else:
e = N.log(N.sqrt(N.pi)) * N.ones(probs.shape, N.Float)
c = N.log(a)
mask = N.zeros(probs.shape)
a_big = N.greater(a, BIG)
a_big_frozen = -1 * N.ones(probs.shape, N.Float)
totalelements = N.multiply.reduce(N.array(probs.shape))
while asum(mask) != totalelements:
e = N.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
# print z, e, s
newmask = N.greater(z, chisq)
a_big_frozen = N.where(newmask*N.equal(mask, 0)*a_big, s, a_big_frozen)
mask = N.clip(newmask+mask, 0, 1)
if even:
z = N.ones(probs.shape, N.Float)
e = N.ones(probs.shape, N.Float)
else:
z = 0.5 * N.ones(probs.shape, N.Float)
e = 1.0 / N.sqrt(N.pi) / N.sqrt(a) * N.ones(probs.shape, N.Float)
c = 0.0
mask = N.zeros(probs.shape)
a_notbig_frozen = -1 * N.ones(probs.shape, N.Float)
while asum(mask) != totalelements:
e = e * (a/z.astype(N.Float))
c = c + e
z = z + 1.0
# print '#2', z, e, c, s, c*y+s2
newmask = N.greater(z, chisq)
a_notbig_frozen = N.where(newmask*N.equal(mask, 0)*(1-a_big),
c*y+s2, a_notbig_frozen)
mask = N.clip(newmask+mask, 0, 1)
probs = N.where(N.equal(probs, 1), 1,
N.where(N.greater(a, BIG), a_big_frozen, a_notbig_frozen))
return probs
else:
return s
def aerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional error
everywhere less than 1.2e-7. Adapted from Numerical Recipies. Can
handle multiple dimensions.
Usage: aerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * N.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
return N.where(N.greater_equal(x, 0), ans, 2.0-ans)
def azprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat. Can handle multiple dimensions.
Usage: azprob(z) where z is a z-value
"""
def yfunc(y):
x = (((((((((((((-0.000045255659 * y
+ 0.000152529290) * y - 0.000019538132) * y
- 0.000676904986) * y + 0.001390604284) * y
- 0.000794620820) * y - 0.002034254874) * y
+ 0.006549791214) * y - 0.010557625006) * y
+ 0.011630447319) * y - 0.009279453341) * y
+ 0.005353579108) * y - 0.002141268741) * y
+ 0.000535310849) * y + 0.999936657524
return x
def wfunc(w):
x = ((((((((0.000124818987 * w
- 0.001075204047) * w + 0.005198775019) * w
- 0.019198292004) * w + 0.059054035642) * w
- 0.151968751364) * w + 0.319152932694) * w
- 0.531923007300) * w + 0.797884560593) * N.sqrt(w) * 2.0
return x
Z_MAX = 6.0 # maximum meaningful z-value
x = N.zeros(z.shape, N.Float) # initialize
y = 0.5 * N.fabs(z)
x = N.where(N.less(y, 1.0), wfunc(y*y), yfunc(y-2.0)) # get x's
x = N.where(N.greater(y, Z_MAX*0.5), 1.0, x) # kill those with big Z
prob = N.where(N.greater(z, 0), (x+1)*0.5, (1-x)*0.5)
return prob
def aksprob(alam):
"""
Returns the probability value for a K-S statistic computed via ks_2samp.
Adapted from Numerical Recipies. Can handle multiple dimensions.
Usage: aksprob(alam)
"""
if isinstance(alam, N.ArrayType):
frozen = -1 * N.ones(alam.shape, N.Float64)
alam = alam.astype(N.Float64)
arrayflag = 1
else:
frozen = N.array(-1.)
alam = N.array(alam, N.Float64)
mask = N.zeros(alam.shape)
fac = 2.0 * N.ones(alam.shape, N.Float)
sum = N.zeros(alam.shape, N.Float)
termbf = N.zeros(alam.shape, N.Float)
a2 = N.array(-2.0*alam*alam, N.Float64)
totalelements = N.multiply.reduce(N.array(mask.shape))
for j in range(1, 201):
if asum(mask) == totalelements:
break
exponents = (a2*j*j)
overflowmask = N.less(exponents, -746)
frozen = N.where(overflowmask, 0, frozen)
mask = mask+overflowmask
term = fac*N.exp(exponents)
sum = sum + term
newmask = N.where(N.less_equal(abs(term), (0.001*termbf))
+ N.less(abs(term), 1.0e-8*sum), 1, 0)
frozen = N.where(newmask*N.equal(mask, 0), sum, frozen)
mask = N.clip(mask+newmask, 0, 1)
fac = -fac
termbf = abs(term)
if arrayflag:
return N.where(N.equal(frozen, -1), 1.0, frozen) # 1.0 if doesn't converge
else:
return N.where(N.equal(frozen, -1), 1.0, frozen)[0] # 1.0 if doesn't converge
def afprob(dfnum, dfden, F):
"""
Returns the 1-tailed significance level (p-value) of an F statistic
given the degrees of freedom for the numerator (dfR-dfF) and the degrees
of freedom for the denominator (dfF). Can handle multiple dims for F.
Usage: afprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
if isinstance(F, N.ArrayType):
return abetai(0.5*dfden, 0.5*dfnum, dfden/(1.0*dfden+dfnum*F))
else:
return abetai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
def abetacf(a, b, x, verbose=1):
"""
Evaluates the continued fraction form of the incomplete Beta function,
betai. (Adapted from: Numerical Recipies in C.) Can handle multiple
dimensions for x.
Usage: abetacf(a,b,x,verbose=1)
"""
ITMAX = 200
EPS = 3.0e-7
arrayflag = 1
if isinstance(x, N.ArrayType):
frozen = N.ones(x.shape, N.Float) * -1 # start out w/ -1s, should replace all
else:
arrayflag = 0
frozen = N.array([-1])
x = N.array([x])
mask = N.zeros(x.shape)
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
if N.sum(N.ravel(N.equal(frozen, -1))) == 0:
break
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az*1
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
newmask = N.less(abs(az-aold), EPS*abs(az))
frozen = N.where(newmask*N.equal(mask, 0), az, frozen)
mask = N.clip(mask+newmask, 0, 1)
noconverge = asum(N.equal(frozen, -1))
if noconverge != 0 and verbose:
print('a or b too big, or ITMAX too small in Betacf for ', noconverge, ' elements')
if arrayflag:
return frozen
else:
return frozen[0]
def agammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
Adapted from: Numerical Recipies in C. Can handle multiple dims ... but
probably doesn't normally have to.
Usage: agammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*N.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + N.log(2.50662827465*ser)
def abetai(a, b, x, verbose=1):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented
here, using the betacf function. (Adapted from: Numerical Recipies in
C.) Can handle multiple dimensions.
Usage: abetai(a,b,x,verbose=1)
"""
TINY = 1e-15
if isinstance(a, N.ArrayType):
if asum(N.less(x, 0)+N.greater(x, 1)) != 0:
raise ValueError('Bad x in abetai')
x = N.where(N.equal(x, 0), TINY, x)
x = N.where(N.equal(x, 1.0), 1-TINY, x)
bt = N.where(N.equal(x, 0)+N.equal(x, 1), 0, -1)
exponents = (gammln(a+b)-gammln(a)-gammln(b)+a*N.log(x)+b * N.log(1.0-x))
# 746 (below) is the MAX POSSIBLE BEFORE OVERFLOW
exponents = N.where(N.less(exponents, -740), -740, exponents)
bt = N.exp(exponents)
if isinstance(x, N.ArrayType):
ans = N.where(N.less(x, (a+1)/(a+b+2.0)),
bt*abetacf(a, b, x, verbose)/float(a),
1.0-bt*abetacf(b, a, 1.0-x, verbose)/float(b))
else:
if x < (a+1)/(a+b+2.0):
ans = bt*abetacf(a, b, x, verbose)/float(a)
else:
ans = 1.0-bt*abetacf(b, a, 1.0-x, verbose)/float(b)
return ans
# AANOVA CALCULATIONS
import LinearAlgebra
LA = LinearAlgebra
def aglm(data, para):
"""
Calculates a linear model fit ... anova/ancova/lin-regress/t-test/etc. Taken
from:
Peterson et al. Statistical limitations in functional neuroimaging
I. Non-inferential methods and statistical models. Phil Trans Royal Soc
Lond B 354: 1239-1260.
Usage: aglm(data,para)
Returns: statistic, p-value ???
"""
if len(para) != len(data):
print("data and para must be same length in aglm")
return
n = len(para)
p = pstat.aunique(para)
x = N.zeros((n, len(p))) # design matrix
for l in range(len(p)):
x[:, l] = N.equal(para, p[l])
b = N.dot(N.dot(LA.inverse(N.dot(N.transpose(x), x)), # i.e., b=inv(X'X)X'Y
N.transpose(x)),
data)
diffs = (data - N.dot(x, b))
s_sq = 1./(n-len(p)) * N.dot(N.transpose(diffs), diffs)
if len(p) == 2: # ttest_ind
c = N.array([1, -1])
df = n-2
fact = asum(1.0/asum(x, 0)) # i.e., 1/n1 + 1/n2 + 1/n3 ...
t = N.dot(c, b) / N.sqrt(s_sq*fact)
probs = abetai(0.5*df, 0.5, float(df)/(df+t*t))
return t, probs
def aF_oneway(*args):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: aF_oneway (*args) where *args is 2 or more arrays, one per
treatment group
Returns: f-value, probability
"""
na = len(args) # ANOVA on 'na' groups, each in it's own array
alldata = []
alldata = N.concatenate(args)
bign = len(alldata)
sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
ssbn = 0
for a in args:
ssbn = ssbn + asquare_of_sums(N.array(a))/float(len(a))
ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = na-1
dfwn = bign - na
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn, dfwn, f)
return f, prob
def aF_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
"""
return ((ER-EF)/float(dfR-dfF) / (EF/float(dfF)))
def outputfstats(Enum, Eden, dfnum, dfden, f, prob):
Enum = round(Enum, 3)
Eden = round(Eden, 3)
dfnum = round(Enum, 3)
dfden = round(dfden, 3)
f = round(f, 3)
prob = round(prob, 3)
suffix = '' # for *s after the p-value
if prob < 0.001:
suffix = ' ***'
elif prob < 0.01:
suffix = ' **'
elif prob < 0.05:
suffix = ' *'
title = [['EF/ER', 'DF', 'Mean Square', 'F-value', 'prob', '']]
lofl = title+[[Enum, dfnum, round(Enum/float(dfnum), 3), f, prob, suffix],
[Eden, dfden, round(Eden/float(dfden), 3), '', '', '']]
pstat.printcc(lofl)
return
def F_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
where ER and EF are matrices from a multivariate F calculation.
"""
if type(ER) in [int, float]:
ER = N.array([[ER]])
if type(EF) in [int, float]:
EF = N.array([[EF]])
n_um = (LA.determinant(ER) - LA.determinant(EF)) / float(dfnum)
d_en = LA.determinant(EF) / float(dfden)
return n_um / d_en
# ASUPPORT FUNCTIONS
def asign(a):
"""
Usage: asign(a)
Returns: array shape of a, with -1 where a<0 and +1 where a>=0
"""
a = N.asarray(a)
if ((isinstance(a, type(1.4))) or (isinstance(a, type(1)))):
return a-a-N.less(a, 0)+N.greater(a, 0)
else:
return N.zeros(N.shape(a))-N.less(a, 0)+N.greater(a, 0)
def asum(a, dimension=None, keepdims=0):
"""
An alternative to the Numeric.add.reduce function, which allows one to
(1) collapse over multiple dimensions at once, and/or (2) to retain
all dimensions in the original array (squashing one down to size.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). If keepdims=1, the resulting array will have as many
dimensions as the input array.
Usage: asum(a, dimension=None, keepdims=0)
Returns: array summed along 'dimension'(s), same _number_ of dims if keepdims=1
"""
if isinstance(a, N.ArrayType) and a.typecode() in ['l', 's', 'b']:
a = a.astype(N.Float)
if dimension is None:
s = N.sum(N.ravel(a))
elif type(dimension) in [int, float]:
s = N.add.reduce(a, dimension)
if keepdims == 1:
shp = list(a.shape)
shp[dimension] = 1
s = N.reshape(s, shp)
else: # must be a SEQUENCE of dims to sum over
dims = sorted(dimension)
dims.reverse()
s = a * 1.0
for dim in dims:
s = N.add.reduce(s, dim)
if keepdims == 1:
shp = list(a.shape)
for dim in dims:
shp[dim] = 1
s = N.reshape(s, shp)
return s
def acumsum(a, dimension=None):
"""
Returns an array consisting of the cumulative sum of the items in the
passed array. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions, but this last one just barely makes sense).
Usage: acumsum(a,dimension=None)
"""
if dimension is None:
a = N.ravel(a)
dimension = 0
if type(dimension) in [list, tuple, N.ArrayType]:
dimension = sorted(dimension)
dimension.reverse()
for d in dimension:
a = N.add.accumulate(a, d)
return a
else:
return N.add.accumulate(a, dimension)
def ass(inarray, dimension=None, keepdims=0):
"""
Squares each value in the passed array, adds these squares & returns
the result. Unfortunate function name. :-) Defaults to ALL values in
the array. Dimension can equal None (ravel array first), an integer
(the dimension over which to operate), or a sequence (operate over
multiple dimensions). Set keepdims=1 to maintain the original number
of dimensions.
Usage: ass(inarray, dimension=None, keepdims=0)
Returns: sum-along-'dimension' for (inarray*inarray)
"""
if dimension is None:
inarray = N.ravel(inarray)
dimension = 0
return asum(inarray*inarray, dimension, keepdims)
def asummult(array1, array2, dimension=None, keepdims=0):
"""
Multiplies elements in array1 and array2, element by element, and
returns the sum (along 'dimension') of all resulting multiplications.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). A trivial function, but included for completeness.
Usage: asummult(array1,array2,dimension=None,keepdims=0)
"""
if dimension is None:
array1 = N.ravel(array1)
array2 = N.ravel(array2)
dimension = 0
return asum(array1*array2, dimension, keepdims)
def asquare_of_sums(inarray, dimension=None, keepdims=0):
"""
Adds the values in the passed array, squares that sum, and returns the
result. Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). If keepdims=1, the returned array will have the same
NUMBER of dimensions as the original.
Usage: asquare_of_sums(inarray, dimension=None, keepdims=0)
Returns: the square of the sum over dim(s) in dimension
"""
if dimension is None:
inarray = N.ravel(inarray)
dimension = 0
s = asum(inarray, dimension, keepdims)
if isinstance(s, N.ArrayType):
return s.astype(N.Float)*s
else:
return float(s)*s
def asumdiffsquared(a, b, dimension=None, keepdims=0):
"""
Takes pairwise differences of the values in arrays a and b, squares
these differences, and returns the sum of these squares. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
keepdims=1 means the return shape = len(a.shape) = len(b.shape)
Usage: asumdiffsquared(a,b)
Returns: sum[ravel(a-b)**2]
"""
if dimension is None:
N.ravel(a) # inarray
dimension = 0
return asum((a-b)**2, dimension, keepdims)
def ashellsort(inarray):
"""
Shellsort algorithm. Sorts a 1D-array.
Usage: ashellsort(inarray)
Returns: sorted-inarray, sorting-index-vector (for original array)
"""
n = len(inarray)
svec = inarray * 1.0
ivec = list(range(n))
gap = n/2 # integer division needed
while gap > 0:
for i in range(gap, n):
for j in range(i-gap, -1, -gap):
while j >= 0 and svec[j] > svec[j+gap]:
temp = svec[j]
svec[j] = svec[j+gap]
svec[j+gap] = temp
itemp = ivec[j]
ivec[j] = ivec[j+gap]
ivec[j+gap] = itemp
gap = gap / 2 # integer division needed
# svec is now sorted input vector, ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
def arankdata(inarray):
"""
Ranks the data in inarray, dealing with ties appropritely. Assumes
a 1D inarray. Adapted from Gary Perlman's |Stat ranksort.
Usage: arankdata(inarray)
Returns: array of length equal to inarray, containing rank scores
"""
n = len(inarray)
svec, ivec = ashellsort(inarray)
sumranks = 0
dupcount = 0
newarray = N.zeros(n, N.Float)
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i == n-1 or svec[i] != svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1, i+1):
newarray[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newarray
def afindwithin(data):
"""
Returns a binary vector, 1=within-subject factor, 0=between. Input
equals the entire data array (i.e., column 1=random factor, last
column = measured values.
Usage: afindwithin(data) data in |Stat format
"""
numfact = len(data[0])-2
withinvec = [0]*numfact
for col in range(1, numfact+1):
rows = pstat.linexand(data, col, pstat.unique(pstat.colex(data, 1))[0]) # get 1 level of this factor
if len(pstat.unique(pstat.colex(rows, 0))) < len(rows): # if fewer subjects than scores on this factor
withinvec[col-1] = 1
return withinvec
# RE-DEFINE DISPATCHES TO INCLUDE ARRAYS
# CENTRAL TENDENCY:
geometricmean = Dispatch((lgeometricmean, (list, tuple)), (ageometricmean, (N.ArrayType,)))
harmonicmean = Dispatch((lharmonicmean, (list, tuple)), (aharmonicmean, (N.ArrayType,)))
mean = Dispatch((lmean, (list, tuple)), (amean, (N.ArrayType,)))
median = Dispatch((lmedian, (list, tuple)), (amedian, (N.ArrayType,)))
medianscore = Dispatch((lmedianscore, (list, tuple)), (amedianscore, (N.ArrayType,)))
mode = Dispatch((lmode, (list, tuple)), (amode, (N.ArrayType,)))
tmean = Dispatch((atmean, (N.ArrayType,)))
tvar = Dispatch((atvar, (N.ArrayType,)))
tstdev = Dispatch((atstdev, (N.ArrayType,)))
tsem = Dispatch((atsem, (N.ArrayType,)))
# VARIATION:
moment = Dispatch((lmoment, (list, tuple)), (amoment, (N.ArrayType,)))
variation = Dispatch((lvariation, (list, tuple)), (avariation, (N.ArrayType,)))
skew = Dispatch((lskew, (list, tuple)), (askew, (N.ArrayType,)))
kurtosis = Dispatch((lkurtosis, (list, tuple)), (akurtosis, (N.ArrayType,)))
describe = Dispatch((ldescribe, (list, tuple)), (adescribe, (N.ArrayType,)))
# DISTRIBUTION TESTS
skewtest = Dispatch((askewtest, (list, tuple)), (askewtest, (N.ArrayType,)))
kurtosistest = Dispatch((akurtosistest, (list, tuple)), (akurtosistest, (N.ArrayType,)))
normaltest = Dispatch((anormaltest, (list, tuple)), (anormaltest, (N.ArrayType,)))
# FREQUENCY STATS:
itemfreq = Dispatch((litemfreq, (list, tuple)), (aitemfreq, (N.ArrayType,)))
scoreatpercentile = Dispatch((lscoreatpercentile, (list, tuple)), (ascoreatpercentile, (N.ArrayType,)))
percentileofscore = Dispatch((lpercentileofscore, (list, tuple)), (apercentileofscore, (N.ArrayType,)))
histogram = Dispatch((lhistogram, (list, tuple)), (ahistogram, (N.ArrayType,)))
cumfreq = Dispatch((lcumfreq, (list, tuple)), (acumfreq, (N.ArrayType,)))
relfreq = Dispatch((lrelfreq, (list, tuple)), (arelfreq, (N.ArrayType,)))
# VARIABILITY:
obrientransform = Dispatch((lobrientransform, (list, tuple)), (aobrientransform, (N.ArrayType,)))
samplevar = Dispatch((lsamplevar, (list, tuple)), (asamplevar, (N.ArrayType,)))
samplestdev = Dispatch((lsamplestdev, (list, tuple)), (asamplestdev, (N.ArrayType,)))
signaltonoise = Dispatch((asignaltonoise, (N.ArrayType,)),)
var = Dispatch((lvar, (list, tuple)), (avar, (N.ArrayType,)))
stdev = Dispatch((lstdev, (list, tuple)), (astdev, (N.ArrayType,)))
sterr = Dispatch((lsterr, (list, tuple)), (asterr, (N.ArrayType,)))
sem = Dispatch((lsem, (list, tuple)), (asem, (N.ArrayType,)))
z = Dispatch((lz, (list, tuple)), (az, (N.ArrayType,)))
zs = Dispatch((lzs, (list, tuple)), (azs, (N.ArrayType,)))
# TRIMMING FCNS:
threshold = Dispatch((athreshold, (N.ArrayType,)),)
trimboth = Dispatch((ltrimboth, (list, tuple)), (atrimboth, (N.ArrayType,)))
trim1 = Dispatch((ltrim1, (list, tuple)), (atrim1, (N.ArrayType,)))
# CORRELATION FCNS:
paired = Dispatch((lpaired, (list, tuple)), (apaired, (N.ArrayType,)))
pearsonr = Dispatch((lpearsonr, (list, tuple)), (apearsonr, (N.ArrayType,)))
spearmanr = Dispatch((lspearmanr, (list, tuple)), (aspearmanr, (N.ArrayType,)))
pointbiserialr = Dispatch((lpointbiserialr, (list, tuple)), (apointbiserialr, (N.ArrayType,)))
kendalltau = Dispatch((lkendalltau, (list, tuple)), (akendalltau, (N.ArrayType,)))
linregress = Dispatch((llinregress, (list, tuple)), (alinregress, (N.ArrayType,)))
# INFERENTIAL STATS:
ttest_1samp = Dispatch((lttest_1samp, (list, tuple)), (attest_1samp, (N.ArrayType,)))
ttest_ind = Dispatch((lttest_ind, (list, tuple)), (attest_ind, (N.ArrayType,)))
ttest_rel = Dispatch((lttest_rel, (list, tuple)), (attest_rel, (N.ArrayType,)))
chisquare = Dispatch((lchisquare, (list, tuple)), (achisquare, (N.ArrayType,)))
ks_2samp = Dispatch((lks_2samp, (list, tuple)), (aks_2samp, (N.ArrayType,)))
mannwhitneyu = Dispatch((lmannwhitneyu, (list, tuple)), (amannwhitneyu, (N.ArrayType,)))
tiecorrect = Dispatch((ltiecorrect, (list, tuple)), (atiecorrect, (N.ArrayType,)))
ranksums = Dispatch((lranksums, (list, tuple)), (aranksums, (N.ArrayType,)))
wilcoxont = Dispatch((lwilcoxont, (list, tuple)), (awilcoxont, (N.ArrayType,)))
kruskalwallish = Dispatch((lkruskalwallish, (list, tuple)), (akruskalwallish, (N.ArrayType,)))
friedmanchisquare = Dispatch((lfriedmanchisquare, (list, tuple)), (afriedmanchisquare, (N.ArrayType,)))
# PROBABILITY CALCS:
chisqprob = Dispatch((lchisqprob, (int, float)), (achisqprob, (N.ArrayType,)))
zprob = Dispatch((lzprob, (int, float)), (azprob, (N.ArrayType,)))
ksprob = Dispatch((lksprob, (int, float)), (aksprob, (N.ArrayType,)))
fprob = Dispatch((lfprob, (int, float)), (afprob, (N.ArrayType,)))
betacf = Dispatch((lbetacf, (int, float)), (abetacf, (N.ArrayType,)))
betai = Dispatch((lbetai, (int, float)), (abetai, (N.ArrayType,)))
erfcc = Dispatch((lerfcc, (int, float)), (aerfcc, (N.ArrayType,)))
gammln = Dispatch((lgammln, (int, float)), (agammln, (N.ArrayType,)))
# ANOVA FUNCTIONS:
F_oneway = Dispatch((lF_oneway, (list, tuple)), (aF_oneway, (N.ArrayType,)))
F_value = Dispatch((lF_value, (list, tuple)), (aF_value, (N.ArrayType,)))
# SUPPORT FUNCTIONS:
incr = Dispatch((lincr, (list, tuple, N.ArrayType)), )
sum = Dispatch((lsum, (list, tuple)), (asum, (N.ArrayType,)))
cumsum = Dispatch((lcumsum, (list, tuple)), (acumsum, (N.ArrayType,)))
ss = Dispatch((lss, (list, tuple)), (ass, (N.ArrayType,)))
summult = Dispatch((lsummult, (list, tuple)), (asummult, (N.ArrayType,)))
square_of_sums = Dispatch((lsquare_of_sums, (list, tuple)), (asquare_of_sums, (N.ArrayType,)))
sumdiffsquared = Dispatch((lsumdiffsquared, (list, tuple)), (asumdiffsquared, (N.ArrayType,)))
shellsort = Dispatch((lshellsort, (list, tuple)), (ashellsort, (N.ArrayType,)))
rankdata = Dispatch((lrankdata, (list, tuple)), (arankdata, (N.ArrayType,)))
findwithin = Dispatch((lfindwithin, (list, tuple)), (afindwithin, (N.ArrayType,)))
# END OF NUMERIC FUNCTION BLOCK
# END OF STATISTICAL FUNCTIONS
except ImportError:
pass
|
mit
| 3,670,309,789,992,234,000 | 36.216602 | 178 | 0.577693 | false |
Pikecillo/genna
|
external/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/mb_20010914.py
|
1
|
4041
|
# At the time of this writing,
# 4XSLT generates a traceback when you do an apply-templates on a result tree
# fragment. It should generate a friendly (but equally fatal) error.
#
from Xml.Xslt import test_harness
sheet_1 = """\
<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:exslt="http://exslt.org/common"
exclude-result-prefixes="exslt">
<xsl:output method="xml" indent="yes"/>
<!--
This template processes the root node of an arbitrary source tree
-->
<xsl:template match="/">
<!-- a result tree fragment containing a root node and an element -->
<xsl:variable name="myTree">
<myElement/>
</xsl:variable>
<!--
The output should be a myResult element that contains the result
of processing the nodes in the $myTree fragment.
-->
<myResult>
<xsl:apply-templates select="exslt:node-set($myTree)" mode="foo">
<xsl:with-param name="myParameter" select="'hello world'"/>
</xsl:apply-templates>
</myResult>
</xsl:template>
<!-- This template processes the root node of the fragment -->
<xsl:template match="/" mode="foo">
<xsl:param name="myParameter"/>
<note>
<xsl:text>Processing the root node of the fragment. </xsl:text>
<xsl:value-of select="$myParameter"/>
</note>
<xsl:apply-templates mode="foo"/> <!-- note we do not pass the parameter -->
</xsl:template>
<!-- This template processes the 'myElement' node of the fragment -->
<xsl:template match="myElement" mode="foo">
<xsl:param name="myParameter"/>
<note>
<xsl:text>Processing the 'myElement' node of the fragment. </xsl:text>
<xsl:value-of select="$myParameter"/>
</note>
<note>
<xsl:text>This element has </xsl:text>
<xsl:value-of select="count(ancestor::node())"/>
<xsl:text> ancestor(s).</xsl:text>
</note>
</xsl:template>
</xsl:stylesheet>"""
expected_1 = """\
<?xml version='1.0' encoding='UTF-8'?>
<myResult>
<note>Processing the root node of the fragment. hello world</note>
<note>Processing the 'myElement' node of the fragment. </note>
<note>This element has 1 ancestor(s).</note>
</myResult>"""
sheet_2 = """\
<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:exslt="http://exslt.org/common"
exclude-result-prefixes="exslt">
<xsl:output method="xml" indent="yes"/>
<!--
This template processes the root node of an arbitrary source tree
-->
<xsl:template match="/">
<!-- a result tree fragment containing a root node and an element -->
<xsl:variable name="myTree">
<myElement/>
</xsl:variable>
<!--
The output should be a myResult element that contains the result
of processing the nodes in the $myTree fragment.
-->
<myResult>
<xsl:apply-templates select="exslt:node-set($myTree)" mode="foo">
<xsl:with-param name="myParameter" select="'hello world'"/>
</xsl:apply-templates>
</myResult>
</xsl:template>
<!-- This template processes the 'myElement' node of the fragment -->
<xsl:template match="myElement" mode="foo">
<xsl:param name="myParameter"/>
<note>
<xsl:text>Processing the 'myElement' node of the fragment. </xsl:text>
<xsl:value-of select="$myParameter"/>
</note>
</xsl:template>
</xsl:stylesheet>"""
expected_2 = """\
<?xml version='1.0' encoding='UTF-8'?>
<myResult>
<note>Processing the 'myElement' node of the fragment. </note>
</myResult>"""
def Test(tester):
source = test_harness.FileInfo(string=sheet_1)
sheet = test_harness.FileInfo(string=sheet_1)
test_harness.XsltTest(tester, source, [sheet], expected_1,
title='Case 1')
source = test_harness.FileInfo(string=sheet_2)
sheet = test_harness.FileInfo(string=sheet_2)
test_harness.XsltTest(tester, source, [sheet], expected_2,
title='Case 2')
return
|
gpl-2.0
| -5,982,231,990,274,553,000 | 30.084615 | 80 | 0.642415 | false |
prophile/shipwreck
|
buildsim.py
|
1
|
6017
|
import random
buildings = []
current_turn = 0
queued_shipments = []
MAX_SHIPMENT = 10
from fractions import Fraction
messages = []
def queue_shipment(source, amount, target, turns):
messages.append("Shipping {0} from {1} to {2}".format(amount, source.name, target.name))
queued_shipments.append((amount, target, current_turn + turns))
source.level -= amount
target.inflight += amount
class ScreenClearer:
def __repr__(self):
import os
os.system('cls' if os.name == 'nt' else 'clear')
return ''
cls = ScreenClearer()
class Building:
def __init__(self, name):
self.name = name
self.level = 0
self.usage = 0
self.position = 0
self.inflight = 0
self.warehouse = False
self.generation = None
self._period = 0
self.operating = False
self._demand_bias = 0
self.capacity = 500
@property
def demand(self):
if self.warehouse:
return 25
source = None
for building in buildings:
if building.warehouse:
source = building
break
else:
# Guess!
return 3*self.usage + self._demand_bias
return int(self.usage * (3 + abs(self.position - source.position)//3) * 1.6) + self._demand_bias
def tick(self, n):
self.operating = True
if self.generation is not None:
if self.level >= self.usage:
self.level -= self.usage
self._period += 1
(production, period) = self.generation
if self._period > period:
self.level += production
self._period = 0
messages.append("Produced {0} at {1}".format(production, self.name))
else:
self.operating = False
else:
if self.warehouse and self.level < self.usage:
print("Out of food.")
exit(0)
elif self.level >= self.usage:
self.level -= self.usage
else:
self.operating = False
if not self.operating and random.random() < 0.35:
self._demand_bias += 1
if self.operating and self._demand_bias > 0 and random.random() < 0.002:
self._demand_bias -= 1
if self.level > self.capacity:
messages.append("{0} dumping {1} units due to overcapacity".format(self.name, self.level - self.capacity))
self.level = self.capacity
if self.level <= self.demand:
return
possible_targets = []
for bld in buildings:
if bld is self:
continue
if random.random() < 0.65:
possible_targets.append(bld)
targets = list(sorted(possible_targets, key = lambda x: abs(self.position - x.position)))
for potential in targets:
if potential.level + potential.inflight < potential.demand:
# ship to them
amount = min(self.level - self.demand, int((potential.demand - potential.level) * 1.5), MAX_SHIPMENT)
queue_shipment(self, amount, potential, abs(potential.position - self.position) // 3)
break
else:
if random.random() < 0.3:
# ship to a warehouse
for potential in targets:
if potential.warehouse:
amount = min(self.level - self.demand, MAX_SHIPMENT)
queue_shipment(self, amount, potential, abs(potential.position - self.position) // 3)
break
hq = Building('HQ')
hq.level = 30
hq.usage = 1
hq.warehouse = True
hq.position = 0
farm1 = Building('Farm')
farm1.generation = (10, 7)
farm1.position = 6
farm2 = Building('Farm')
farm2.level = 300
farm2.position = -10
farm2.generation = (10, 7)
farm3 = Building('Farm')
farm3.position = -22
farm3.generation = (10, 7)
farm4 = Building('Pig Farm')
farm4.position = -44
farm4.generation = (3, 1)
passive = Building('Forager')
passive.position = -70
passive.generation = (1, 5)
workhouse = Building('Workhouse')
workhouse.position = 40
workhouse.usage = 2
forester = Building('Forester')
forester.position = 4
forester.usage = 1
woodcutter = Building('Woodcutter')
woodcutter.position = 6
woodcutter.usage = 1
buildings.extend([hq, farm1, farm2, farm3, farm4, passive, workhouse, forester, woodcutter])
import sys
import time
while True:
print(cls)
# Calculate totals
total_demand = 0
total_supply = 0
for bld in buildings:
total_demand += bld.usage
if bld.generation is not None:
production, period = bld.generation
total_supply += Fraction(production, period)
if total_supply == total_demand:
print("INFO: Supply matches demand.")
else:
if total_supply > total_demand:
print("WARNING: supply exceeds demand, will stockpile until eternity")
elif total_supply < total_demand:
print("WARNING: demand exceeds supply, will starve")
print("Supply: {0}".format(float(total_supply)))
print("Demand: {0}".format(float(total_demand)))
# process deliveries
new_deliveries = []
for (amount, target, due) in queued_shipments:
if due <= current_turn:
target.level += amount
target.inflight -= amount
else:
new_deliveries.append((amount, target, due))
queued_shipments = new_deliveries
# tick buildings
for building in buildings:
building.tick(current_turn)
# display
for building in buildings:
print("{0}{2}\t\t{1}\t[demand = {3}]".format(building.name, building.level, '' if building.operating else '[x]', building.demand))
for message in messages:
print(message)
messages.clear()
# increment turn counter
current_turn += 1
# Sleep
sys.stdout.flush()
time.sleep(0.05)
|
mit
| -3,679,701,480,851,682,000 | 29.85641 | 138 | 0.580688 | false |
malja/cvut-python
|
cviceni04/domaci_priprava.py
|
1
|
2485
|
# Zadání:
#########
#
# Implementujte následující úlohy:
#
# - swap(a,b), která navzájem vymění tyto dva prvky
# - Napište funkci, která vypisuje 1,-1,1,-1 …
# - Napište funkci, která vypisuje výsledek operace (−1)^k pro k=0,…,100.
# Zamyslete se nad rychlostem výpočtu, navrhněte alternativní postupy
# - Napište funkci min(a,b), která vrací minimum ze těchto dvou prvků
# - Napište funkci max(a,b)
# - Napište funkci area(radius), která vypočítá obsah kruhu o zadaném poloměru
# - Napište funkci d2r(angle), která převede stupně na radiány
# - Napište funkci r2d(angle), která převede radiány na stupně
# - Napište funkci normalize(angle), která převede zadaný úhel (v radiánech)
# do intervalu <0,2π).
# - Napište funkci pro výpis pole:
# - s využitím cyklu for
# - s využitím cyklu while
###############################################################################
import math
def swap( a, b ):
"""
Prohodí prvky a, b.
"""
a,b = b,a
def plusminus():
"""
Do nekonečna a ještě dál vypisuje 1, -1, 1, ...
"""
i = 1
while True:
print(i)
i = -i
def weirdo( iterations = 100 ):
"""
Vypisuje výsledek (-1)^k. Teoreticky pomalejší verze.
"""
for i in range( iterations+1 ):
print( (-1)^i )
def weirdo2( iterations = 100):
"""
Vypisuje výsledek (-1)^k. Teoreticky rychlejší verze.
"""
for i in range( iterations+1 ):
if i%2 == 0:
print( 1 )
else:
print( -1 )
def min( a, b ):
"""
Vrátí menší ze dvou hodnot.
"""
return a if a < b else b
def max( a, b ):
"""
Vrátí větší ze dvou hodnot
"""
return a if a > b else b
def area( radius ):
"""
Vypočítá obsah kruhu.
"""
return math.pi * radius ** 2
def d2r( angle ):
"""
Převede stupně na radiány
"""
return angle * ( math.pi/180 )
def r2d( angle ):
"""
Převede radiány na stupně
"""
return angle * (180/math.pi)
def normalize( angle ):
"""
Převede zadané radiany na interval <0, 2pi)
"""
return angle%(2**math.pi)
def printArray( array ):
"""
Vypíše prvky pole pomocí for
"""
for element in array:
print( element )
def printArray2( array ):
"""
Vypíše prvky pole pomocí while
"""
i = 0
while i < len(array):
print( i )
i += 1
|
mit
| 2,412,997,842,232,370,700 | 20.504505 | 79 | 0.557604 | false |
google/apitools
|
apitools/gen/gen_client_test.py
|
1
|
5072
|
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for gen_client module."""
import os
import unittest
from apitools.gen import gen_client
from apitools.gen import test_utils
def GetTestDataPath(*path):
return os.path.join(os.path.dirname(__file__), 'testdata', *path)
def _GetContent(file_path):
with open(file_path) as f:
return f.read()
class ClientGenCliTest(unittest.TestCase):
def testHelp_NotEnoughArguments(self):
with self.assertRaisesRegexp(SystemExit, '0'):
with test_utils.CaptureOutput() as (_, err):
gen_client.main([gen_client.__file__, '-h'])
err_output = err.getvalue()
self.assertIn('usage:', err_output)
self.assertIn('error: too few arguments', err_output)
def testGenClient_SimpleDocNoInit(self):
with test_utils.TempDir() as tmp_dir_path:
gen_client.main([
gen_client.__file__,
'--init-file', 'none',
'--infile', GetTestDataPath('dns', 'dns_v1.json'),
'--outdir', tmp_dir_path,
'--overwrite',
'--root_package', 'google.apis',
'client'
])
expected_files = (
set(['dns_v1_client.py', 'dns_v1_messages.py']))
self.assertEquals(expected_files, set(os.listdir(tmp_dir_path)))
def testGenClient_SimpleDocEmptyInit(self):
with test_utils.TempDir() as tmp_dir_path:
gen_client.main([
gen_client.__file__,
'--init-file', 'empty',
'--infile', GetTestDataPath('dns', 'dns_v1.json'),
'--outdir', tmp_dir_path,
'--overwrite',
'--root_package', 'google.apis',
'client'
])
expected_files = (
set(['dns_v1_client.py', 'dns_v1_messages.py', '__init__.py']))
self.assertEquals(expected_files, set(os.listdir(tmp_dir_path)))
init_file = _GetContent(os.path.join(tmp_dir_path, '__init__.py'))
self.assertEqual("""\"""Package marker file.\"""
from __future__ import absolute_import
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
""", init_file)
def testGenClient_SimpleDocWithV4(self):
with test_utils.TempDir() as tmp_dir_path:
gen_client.main([
gen_client.__file__,
'--infile', GetTestDataPath('dns', 'dns_v1.json'),
'--outdir', tmp_dir_path,
'--overwrite',
'--apitools_version', '0.4.12',
'--root_package', 'google.apis',
'client'
])
self.assertEquals(
set(['dns_v1_client.py', 'dns_v1_messages.py', '__init__.py']),
set(os.listdir(tmp_dir_path)))
def testGenClient_SimpleDocWithV5(self):
with test_utils.TempDir() as tmp_dir_path:
gen_client.main([
gen_client.__file__,
'--infile', GetTestDataPath('dns', 'dns_v1.json'),
'--outdir', tmp_dir_path,
'--overwrite',
'--apitools_version', '0.5.0',
'--root_package', 'google.apis',
'client'
])
self.assertEquals(
set(['dns_v1_client.py', 'dns_v1_messages.py', '__init__.py']),
set(os.listdir(tmp_dir_path)))
def testGenPipPackage_SimpleDoc(self):
with test_utils.TempDir() as tmp_dir_path:
gen_client.main([
gen_client.__file__,
'--infile', GetTestDataPath('dns', 'dns_v1.json'),
'--outdir', tmp_dir_path,
'--overwrite',
'--root_package', 'google.apis',
'pip_package'
])
self.assertEquals(
set(['apitools', 'setup.py']),
set(os.listdir(tmp_dir_path)))
def testGenProto_SimpleDoc(self):
with test_utils.TempDir() as tmp_dir_path:
gen_client.main([
gen_client.__file__,
'--infile', GetTestDataPath('dns', 'dns_v1.json'),
'--outdir', tmp_dir_path,
'--overwrite',
'--root_package', 'google.apis',
'proto'
])
self.assertEquals(
set(['dns_v1_messages.proto', 'dns_v1_services.proto']),
set(os.listdir(tmp_dir_path)))
|
apache-2.0
| 5,212,766,634,785,581,000 | 35.489209 | 79 | 0.527208 | false |
bert/geda-gaf
|
xorn/tests/cpython/Setup.py
|
1
|
2264
|
# Copyright (C) 2013-2017 Roland Lutz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import xorn.storage
def setup():
global line_data, box_data, circle_data, net_data
rev0 = xorn.storage.Revision(None)
assert rev0 is not None
rev0.finalize()
# first change
rev1 = xorn.storage.Revision(rev0)
assert rev1 is not None
line_data = xorn.storage.Line()
line_data.x = 0
line_data.y = 1
line_data.width = 3
line_data.height = 2
line_data.color = 3
line_data.line.width = 1
ob0 = rev1.add_object(line_data)
assert ob0 is not None
rev1.finalize()
# second change
rev2 = xorn.storage.Revision(rev1)
assert rev2 is not None
box_data = xorn.storage.Box()
box_data.x = 1
box_data.y = 1
box_data.width = 2
box_data.height = 2
box_data.color = 3
box_data.line.width = 1
ob1a = rev2.add_object(box_data)
assert ob1a is not None
circle_data = xorn.storage.Circle()
circle_data.x = -1
circle_data.y = -1
circle_data.radius = 2
circle_data.color = 3
circle_data.line.width = 1
circle_data.fill.type = 1
ob1b = rev2.add_object(circle_data)
assert ob1b is not None
rev2.finalize()
# third change
rev3 = xorn.storage.Revision(rev2)
assert rev3 is not None
net_data = xorn.storage.Net()
net_data.x = 0
net_data.y = 1
net_data.width = 3
net_data.height = 2
net_data.color = 4
rev3.set_object_data(ob0, net_data)
rev3.delete_object(ob1a)
rev3.finalize()
return rev0, rev1, rev2, rev3, ob0, ob1a, ob1b
|
gpl-2.0
| 2,625,262,142,088,456,700 | 24.727273 | 73 | 0.668286 | false |
jonroberts/nasaMining
|
bak/textrank.py
|
1
|
6131
|
"""
*** FROM https://github.com/davidadamojr/TextRank/blob/master/textrank.py ***
From this paper: http://acl.ldc.upenn.edu/acl2004/emnlp/pdf/Mihalcea.pdf
External dependencies: nltk, numpy, networkx
Based on https://gist.github.com/voidfiles/1646117
"""
import nltk
import itertools
from operator import itemgetter
import networkx as nx
import os
# apply syntactic filters based on POS tags
def filter_for_tags(tagged, tags=['NN', 'JJ', 'NNP']):
return [item for item in tagged if item[1] in tags]
def normalize(tagged):
return [(item[0].replace('.', ''), item[1]) for item in tagged]
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in itertools.ifilterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def lDistance(firstString, secondString):
"Function to find the Levenshtein distance between two words/sentences - gotten from http://rosettacode.org/wiki/Levenshtein_distance#Python"
if len(firstString) > len(secondString):
firstString, secondString = secondString, firstString
distances = range(len(firstString) + 1)
for index2, char2 in enumerate(secondString):
newDistances = [index2 + 1]
for index1, char1 in enumerate(firstString):
if char1 == char2:
newDistances.append(distances[index1])
else:
newDistances.append(1 + min((distances[index1], distances[index1 + 1], newDistances[-1])))
distances = newDistances
return distances[-1]
def buildGraph(nodes):
"nodes - list of hashables that represents the nodes of the graph"
gr = nx.Graph() #initialize an undirected graph
gr.add_nodes_from(nodes)
nodePairs = list(itertools.combinations(nodes, 2))
#add edges to the graph (weighted by Levenshtein distance)
for pair in nodePairs:
firstString = pair[0]
secondString = pair[1]
levDistance = lDistance(firstString, secondString)
gr.add_edge(firstString, secondString, weight=levDistance)
return gr
def extractKeyphrases(text):
#tokenize the text using nltk
wordTokens = nltk.word_tokenize(text)
#assign POS tags to the words in the text
tagged = nltk.pos_tag(wordTokens)
textlist = [x[0] for x in tagged]
tagged = filter_for_tags(tagged)
tagged = normalize(tagged)
unique_word_set = unique_everseen([x[0] for x in tagged])
word_set_list = list(unique_word_set)
#this will be used to determine adjacent words in order to construct keyphrases with two words
graph = buildGraph(word_set_list)
#pageRank - initial value of 1.0, error tolerance of 0,0001,
calculated_page_rank = nx.pagerank(graph, weight='weight')
#most important words in ascending order of importance
keyphrases = sorted(calculated_page_rank, key=calculated_page_rank.get, reverse=True)
#the number of keyphrases returned will be relative to the size of the text (a third of the number of vertices)
aThird = len(word_set_list) / 3
keyphrases = keyphrases[0:aThird + 1]
#take keyphrases with multiple words into consideration as done in the paper - if two words are adjacent in the text and are selected as keywords, join them
#together
modifiedKeyphrases = set([])
dealtWith = set([]) #keeps track of individual keywords that have been joined to form a keyphrase
i = 0
j = 1
while j < len(textlist):
firstWord = textlist[i]
secondWord = textlist[j]
if firstWord in keyphrases and secondWord in keyphrases:
keyphrase = firstWord + ' ' + secondWord
modifiedKeyphrases.add(keyphrase)
dealtWith.add(firstWord)
dealtWith.add(secondWord)
else:
if firstWord in keyphrases and firstWord not in dealtWith:
modifiedKeyphrases.add(firstWord)
#if this is the last word in the text, and it is a keyword,
#it definitely has no chance of being a keyphrase at this point
if j == len(textlist) - 1 and secondWord in keyphrases and secondWord not in dealtWith:
modifiedKeyphrases.add(secondWord)
i = i + 1
j = j + 1
return modifiedKeyphrases
def extractSentences(text):
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
sentenceTokens = sent_detector.tokenize(text.strip())
graph = buildGraph(sentenceTokens)
calculated_page_rank = nx.pagerank(graph, weight='weight')
#most important sentences in ascending order of importance
sentences = sorted(calculated_page_rank, key=calculated_page_rank.get, reverse=True)
#return a 100 word summary
summary = ' '.join(sentences)
summaryWords = summary.split()
summaryWords = summaryWords[0:101]
summary = ' '.join(summaryWords)
return summary
def writeFiles(summary, keyphrases, fileName):
"outputs the keyphrases and summaries to appropriate files"
print "Generating output to " + 'keywords/' + fileName
keyphraseFile = open('keywords/' + fileName, 'w')
for keyphrase in keyphrases:
keyphraseFile.write(keyphrase + '\n')
keyphraseFile.close()
print "Generating output to " + 'summaries/' + fileName
summaryFile = open('summaries/' + fileName, 'w')
summaryFile.write(summary)
summaryFile.close()
print "-"
# #retrieve each of the articles
# articles = os.listdir("articles")
# for article in articles:
# print 'Reading articles/' + article
# articleFile = open('articles/' + article, 'r')
# text = articleFile.read()
# keyphrases = extractKeyphrases(text)
# summary = extractSentences(text)
# writeFiles(summary, keyphrases, article)
|
mit
| 4,441,759,738,965,631,000 | 33.835227 | 160 | 0.672484 | false |
Fenykepy/phiroom
|
src/api/contact/tests.py
|
1
|
16072
|
from django.core import mail
from django.test import TestCase
from django.core.cache import cache
from rest_framework.test import APIClient, APITestCase
from contact.models import Description, Message
from user.models import User
from stats.models import Hit
from user.tests import create_test_users, login
from phiroom.tests_utils import test_status_codes
def create_test_messages(instance):
"""Create messages for tests.
Run create_test_users first.
"""
instance.mesg = Message.objects.create(
name=instance.staffUser.username,
user=instance.staffUser,
mail=instance.staffUser.email,
website=instance.staffUser.website_link,
subject="contact",
message="Hello",
)
instance.mesg2 = Message.objects.create(
name="Bill",
mail="Bill@bill.com",
subject="contact",
message="Hello",
forward=False
)
class DescriptionModelTest(TestCase):
"""Description model test class."""
fixtures = ["initial_data"]
def setUp(self):
# create users
create_test_users(self)
def test_description_creation(self):
# create a description
desc = Description.objects.create(
title="Contact",
source="My beautiful source \n##titre##",
author=self.staffUser
)
# assert description has been saved in db
desc = Description.objects.get(pk=2)
self.assertEqual(desc.title, "Contact")
self.assertEqual(desc.author, self.staffUser)
self.assertEqual(desc.source,
"My beautiful source \n##titre##")
self.assertEqual(desc.content,
"<p>My beautiful source </p>\n<h2>titre</h2>")
self.assertTrue(desc.date_update)
# assert updating description always save as a new one
desc.title = "New contact"
desc.author = self.normalUser
desc.save()
desc2 = Description.objects.get(pk=3)
self.assertEqual(desc2.title, "New contact")
self.assertEqual(desc2.author, self.normalUser)
# assert latest always returns latest description
latest = Description.objects.latest()
self.assertEqual(desc2, latest)
class MessageModelTest(TestCase):
"""Message model test class."""
fixtures = ["initial_data"]
def setUp(self):
# create users
create_test_users(self)
def test_message_creation(self):
# create a new message
mesg = Message(
name="Tom",
mail="tom@tom.com",
subject="My subject",
message="My message",
forward=True
)
mesg.save()
# assert it has been saved in db
mesg = Message.objects.get(pk=1)
self.assertEqual(mesg.name, "Tom")
self.assertEqual(mesg.mail, "tom@tom.com")
self.assertEqual(mesg.subject, "My subject")
self.assertEqual(mesg.message, "My message")
self.assertTrue(mesg.forward)
self.assertTrue(mesg.date)
# create a test message with existing user
mesg = Message(
subject="A second subject",
message="A second message",
user=self.normalUser,
forward=False
)
mesg.save()
# assert it has been saved in db
mesg = Message.objects.get(pk=2)
self.assertEqual(mesg.name, self.normalUser.username)
self.assertEqual(mesg.mail, self.normalUser.email)
self.assertEqual(mesg.subject, "A second subject")
self.assertEqual(mesg.message, "A second message")
self.assertEqual(mesg.forward, False)
self.assertEqual(mesg.user, self.normalUser)
self.assertTrue(mesg.date)
# we disable cache for tests
class DescriptionAPITest(APITestCase):
"""Description API Test class."""
fixtures = ["initial_data"]
def setUp(self):
# create users
create_test_users(self)
# create few descriptions
self.desc = Description.objects.create(
title="Contact",
source="My beautiful source \n##titre##",
author=self.staffUser
)
self.desc2 = Description.objects.create(
title="Contact2",
source="My beautiful source \n##titre##",
author=self.staffUser
)
self.desc3 = Description.objects.create(
title="Contact3",
source="My beautiful source \n##titre##",
author=self.staffUser
)
self.client = APIClient()
def test_contact_hits(self):
# create some hits, 2 with same IP
hit = Hit.objects.create(
ip = '127.0.0.8',
type = 'CONTACT',
)
hit = Hit.objects.create(
ip = '127.0.0.8',
type = 'CONTACT',
)
hit = Hit.objects.create(
ip = '127.0.0.9',
type = 'CONTACT',
)
url = '/api/contact/hits/'
data = { 'name': 'tom' }
# test without login
test_status_codes(self, url, [401, 401, 401, 401, 401],
postData=data, putData=data, patchData=data)
# test with normal user
login(self, self.normalUser)
test_status_codes(self, url, [403, 403, 403, 403, 403],
postData=data, putData=data, patchData=data)
# test with staff user
login(self, self.staffUser)
test_status_codes(self, url, [200, 405, 405, 405, 405],
postData=data, putData=data, patchData=data)
response=self.client.get(url)
# only 2 hits should be counted
self.assertEqual(response.data, 2)
# we reset hits count
for hit in Hit.objects.all():
hit.delete()
# we clear cache to be sure
cache.clear()
response=self.client.get(url)
# 0 hit should be returned
self.assertEqual(response.data, 0)
def test_latest_description(self):
url = '/api/contact/description/'
data = {
'title': 'toto',
'source': 'tata',
}
# test without login
test_status_codes(self, url, [200, 405, 405, 405, 405],
postData=data, putData=data, patchData=data)
# client should get last description
response=self.client.get(url)
self.assertEqual(response.data['title'], self.desc3.title)
# test with normal user
login(self, self.normalUser)
test_status_codes(self, url, [200, 405, 405, 405, 405],
postData=data, putData=data, patchData=data)
# client should get last description
response=self.client.get(url)
self.assertEqual(response.data['title'], self.desc3.title)
# test with staff member
login(self, self.staffUser)
test_status_codes(self, url, [200, 405, 405, 405, 405],
postData=data, putData=data, patchData=data)
# client should get last description
response=self.client.get(url)
self.assertEqual(response.data['title'], self.desc3.title)
def test_descriptions_list(self):
url = '/api/contact/descriptions/'
data = {
'title': 'toto',
'source': 'tata',
}
# test without login
test_status_codes(self, url, [401, 401, 401, 401, 401],
postData=data, putData=data, patchData=data)
# test with normal user
login(self, self.normalUser)
test_status_codes(self, url, [403, 403, 403, 403, 403],
postData=data, putData=data, patchData=data)
# test with staff member
login(self, self.staffUser)
test_status_codes(self, url, [200, 201, 405, 405, 405],
postData=data, putData=data, patchData=data)
# client should get list of descriptions
response=self.client.get(url)
self.assertEqual(len(response.data['results']), 5)
#self.assertEqual(len(response.data['results']), 4)
desc = Description.objects.latest()
self.assertEqual(desc.title, data['title'])
self.assertEqual(desc.source, data['source'])
self.assertTrue(desc.date_update)
self.assertTrue(desc.content)
# assert user is save as author
self.assertEqual(desc.author, self.staffUser)
def test_descriptions_detail(self):
url = '/api/contact/descriptions/{}/'.format(
self.desc.pk)
data = {
'title': 'toto',
'source': 'tata',
}
# test without login
test_status_codes(self, url, [401, 401, 401, 401, 401],
postData=data, putData=data, patchData=data)
# test with normal user
login(self, self.normalUser)
test_status_codes(self, url, [403, 403, 403, 403, 403],
postData=data, putData=data, patchData=data)
# test with staff member
login(self, self.staffUser)
test_status_codes(self, url, [200, 405, 405, 405, 405],
postData=data, putData=data, patchData=data)
# client should get list of descriptions
response=self.client.get(url)
self.assertEqual(response.data['title'], self.desc.title)
class MessageAPITest(APITestCase):
"""Message API Test class."""
fixtures = ["initial_data"]
def setUp(self):
# create users
create_test_users(self)
# create test messages
create_test_messages(self)
self.client = APIClient()
def test_messages_list(self):
url = '/api/contact/messages/'
data = {
'name': 'toto',
'mail': 'toto@toto.com',
'website': 'http://toto.com',
'subject': 'test',
'message': 'message',
'forward': False
}
data2 = {
'subject': 'test',
'message': 'message',
'forward': True
}
# test without login
# client shouldn't get
response=self.client.get(url)
self.assertEqual(response.status_code, 401)
# client should be able to post
response=self.client.post(url, data)
self.assertEqual(response.status_code, 201)
# !!! assert mail has been sent
# one mail should have been sent (forward is false)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue(mail.outbox[0].subject)
self.assertTrue(mail.outbox[0].message)
self.assertTrue(data['message'] in mail.outbox[0].body)
self.assertTrue(data['subject'] in mail.outbox[0].body)
self.assertTrue(self.staffUser.email in mail.outbox[0].to)
# client shouldn't be able to put
response=self.client.put(url, data)
self.assertEqual(response.status_code, 401)
# client shouldn't be able to patch
response=self.client.patch(url, data)
self.assertEqual(response.status_code, 401)
# client shouldn't be able to delete
response=self.client.delete(url)
self.assertEqual(response.status_code, 401)
# test with normal user
login(self, self.normalUser)
# client shouldn't get
response=self.client.get(url)
self.assertEqual(response.status_code, 403)
# client should be able to post
response=self.client.post(url, data2)
self.assertEqual(response.status_code, 201)
mesg = Message.objects.latest('pk')
self.assertEqual(mesg.name, self.normalUser.username)
self.assertEqual(mesg.mail, self.normalUser.email)
self.assertEqual(mesg.website, self.normalUser.website_link)
self.assertEqual(mesg.user, self.normalUser)
# !!! assert mail has been sent
# 2 mails should have been sent (forward is true)
self.assertEqual(len(mail.outbox), 3)
self.assertTrue(mail.outbox[1].subject)
self.assertTrue(mail.outbox[1].message)
self.assertTrue(data2['message'] in mail.outbox[1].body)
self.assertTrue(data2['subject'] in mail.outbox[1].body)
self.assertTrue(self.staffUser.email in mail.outbox[1].to)
# assert user email is in recipient list
self.assertTrue(self.normalUser.email in mail.outbox[2].to)
# assert message in email body
self.assertTrue(data2['message'] in mail.outbox[2].body)
# client shouldn't be able to put
response=self.client.put(url, data)
self.assertEqual(response.status_code, 403)
# client shouldn't be able to patch
response=self.client.patch(url, data)
self.assertEqual(response.status_code, 403)
# client shouldn't be able to delete
response=self.client.delete(url)
self.assertEqual(response.status_code, 403)
# test with staff member
login(self, self.staffUser)
# client should get list of messages
response=self.client.get(url)
self.assertEqual(response.status_code, 200)
# assert messages have been saved
self.assertEqual(len(response.data['results']), 4)
self.assertEqual(response.data['results'][2]['name'], data['name'])
self.assertEqual(response.data['results'][2]['mail'], data['mail'])
self.assertEqual(response.data['results'][2]['subject'], data['subject'])
self.assertEqual(response.data['results'][2]['message'], data['message'])
self.assertEqual(response.data['results'][2]['website'], data['website'])
self.assertEqual(response.data['results'][2]['forward'], data['forward'])
# assert IP and date have been saved
message = Message.objects.get(pk=4)
self.assertTrue(message.date)
self.assertTrue(message.ip)
# client should be able to post
response=self.client.post(url, data)
self.assertEqual(response.status_code, 201)
# !!! assert mail has been sent
# one mail should have been sent (forward is true)
self.assertEqual(len(mail.outbox), 4)
self.assertTrue(mail.outbox[3].subject)
self.assertTrue(mail.outbox[3].message)
self.assertTrue(data2['message'] in mail.outbox[3].body)
self.assertTrue(data2['subject'] in mail.outbox[3].body)
self.assertTrue(self.staffUser.email in mail.outbox[3].to)
# client shouldn't be able to put
response=self.client.put(url, data)
self.assertEqual(response.status_code, 403)
# client shouldn't be able to patch
response=self.client.patch(url, data)
self.assertEqual(response.status_code, 403)
# client shouldn't be able to delete
response=self.client.delete(url)
self.assertEqual(response.status_code, 405)
def test_messages_detail(self):
url = '/api/contact/messages/1/'
data = {
'name': 'toto',
'mail': 'toto@toto.com',
'website': 'http://toto.com',
'subject': 'test',
'message': 'message',
'forward': False
}
# test without login
test_status_codes(self, url, [401, 401, 401, 401, 401],
postData=data, putData=data, patchData=data)
# test with normal user
login(self, self.normalUser)
test_status_codes(self, url, [403, 403, 403, 403, 403],
postData=data, putData=data, patchData=data)
# test with staff member
login(self, self.staffUser)
# client should get specific message
response=self.client.get(url)
self.assertEqual(response.data['subject'], self.mesg.subject)
# test status codes
test_status_codes(self, url, [200, 405, 405, 405, 204],
postData=data, putData=data, patchData=data)
# assert object has been deleted
m = Message.objects.filter(pk=1).count()
self.assertEqual(m, 0)
|
agpl-3.0
| 5,686,901,337,456,591,000 | 33.339744 | 81 | 0.593678 | false |
facekapow/runtime
|
deps/v8/tools/gen-postmortem-metadata.py
|
2
|
23848
|
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# Emits a C++ file to be compiled and linked into libv8 to support postmortem
# debugging tools. Most importantly, this tool emits constants describing V8
# internals:
#
# v8dbg_type_CLASS__TYPE = VALUE Describes class type values
# v8dbg_class_CLASS__FIELD__TYPE = OFFSET Describes class fields
# v8dbg_parent_CLASS__PARENT Describes class hierarchy
# v8dbg_frametype_NAME = VALUE Describes stack frame values
# v8dbg_off_fp_NAME = OFFSET Frame pointer offsets
# v8dbg_prop_NAME = OFFSET Object property offsets
# v8dbg_NAME = VALUE Miscellaneous values
#
# These constants are declared as global integers so that they'll be present in
# the generated libv8 binary.
#
import re
import sys
#
# Miscellaneous constants, tags, and masks used for object identification.
#
consts_misc = [
{ 'name': 'FirstNonstringType', 'value': 'FIRST_NONSTRING_TYPE' },
{ 'name': 'IsNotStringMask', 'value': 'kIsNotStringMask' },
{ 'name': 'StringTag', 'value': 'kStringTag' },
{ 'name': 'NotStringTag', 'value': 'kNotStringTag' },
{ 'name': 'StringEncodingMask', 'value': 'kStringEncodingMask' },
{ 'name': 'TwoByteStringTag', 'value': 'kTwoByteStringTag' },
{ 'name': 'OneByteStringTag', 'value': 'kOneByteStringTag' },
{ 'name': 'StringRepresentationMask',
'value': 'kStringRepresentationMask' },
{ 'name': 'SeqStringTag', 'value': 'kSeqStringTag' },
{ 'name': 'ConsStringTag', 'value': 'kConsStringTag' },
{ 'name': 'ExternalStringTag', 'value': 'kExternalStringTag' },
{ 'name': 'SlicedStringTag', 'value': 'kSlicedStringTag' },
{ 'name': 'HeapObjectTag', 'value': 'kHeapObjectTag' },
{ 'name': 'HeapObjectTagMask', 'value': 'kHeapObjectTagMask' },
{ 'name': 'SmiTag', 'value': 'kSmiTag' },
{ 'name': 'SmiTagMask', 'value': 'kSmiTagMask' },
{ 'name': 'SmiValueShift', 'value': 'kSmiTagSize' },
{ 'name': 'SmiShiftSize', 'value': 'kSmiShiftSize' },
{ 'name': 'PointerSizeLog2', 'value': 'kPointerSizeLog2' },
{ 'name': 'OddballFalse', 'value': 'Oddball::kFalse' },
{ 'name': 'OddballTrue', 'value': 'Oddball::kTrue' },
{ 'name': 'OddballTheHole', 'value': 'Oddball::kTheHole' },
{ 'name': 'OddballNull', 'value': 'Oddball::kNull' },
{ 'name': 'OddballArgumentMarker', 'value': 'Oddball::kArgumentMarker' },
{ 'name': 'OddballUndefined', 'value': 'Oddball::kUndefined' },
{ 'name': 'OddballUninitialized', 'value': 'Oddball::kUninitialized' },
{ 'name': 'OddballOther', 'value': 'Oddball::kOther' },
{ 'name': 'OddballException', 'value': 'Oddball::kException' },
{ 'name': 'prop_idx_first',
'value': 'DescriptorArray::kFirstIndex' },
{ 'name': 'prop_type_field',
'value': 'DATA' },
{ 'name': 'prop_type_mask',
'value': 'PropertyDetails::TypeField::kMask' },
{ 'name': 'prop_index_mask',
'value': 'PropertyDetails::FieldIndexField::kMask' },
{ 'name': 'prop_index_shift',
'value': 'PropertyDetails::FieldIndexField::kShift' },
{ 'name': 'prop_representation_mask',
'value': 'PropertyDetails::RepresentationField::kMask' },
{ 'name': 'prop_representation_shift',
'value': 'PropertyDetails::RepresentationField::kShift' },
{ 'name': 'prop_representation_integer8',
'value': 'Representation::Kind::kInteger8' },
{ 'name': 'prop_representation_uinteger8',
'value': 'Representation::Kind::kUInteger8' },
{ 'name': 'prop_representation_integer16',
'value': 'Representation::Kind::kInteger16' },
{ 'name': 'prop_representation_uinteger16',
'value': 'Representation::Kind::kUInteger16' },
{ 'name': 'prop_representation_smi',
'value': 'Representation::Kind::kSmi' },
{ 'name': 'prop_representation_integer32',
'value': 'Representation::Kind::kInteger32' },
{ 'name': 'prop_representation_double',
'value': 'Representation::Kind::kDouble' },
{ 'name': 'prop_representation_heapobject',
'value': 'Representation::Kind::kHeapObject' },
{ 'name': 'prop_representation_tagged',
'value': 'Representation::Kind::kTagged' },
{ 'name': 'prop_representation_external',
'value': 'Representation::Kind::kExternal' },
{ 'name': 'prop_desc_key',
'value': 'DescriptorArray::kDescriptorKey' },
{ 'name': 'prop_desc_details',
'value': 'DescriptorArray::kDescriptorDetails' },
{ 'name': 'prop_desc_value',
'value': 'DescriptorArray::kDescriptorValue' },
{ 'name': 'prop_desc_size',
'value': 'DescriptorArray::kDescriptorSize' },
{ 'name': 'elements_fast_holey_elements',
'value': 'FAST_HOLEY_ELEMENTS' },
{ 'name': 'elements_fast_elements',
'value': 'FAST_ELEMENTS' },
{ 'name': 'elements_dictionary_elements',
'value': 'DICTIONARY_ELEMENTS' },
{ 'name': 'bit_field2_elements_kind_mask',
'value': 'Map::ElementsKindBits::kMask' },
{ 'name': 'bit_field2_elements_kind_shift',
'value': 'Map::ElementsKindBits::kShift' },
{ 'name': 'bit_field3_dictionary_map_shift',
'value': 'Map::DictionaryMap::kShift' },
{ 'name': 'bit_field3_number_of_own_descriptors_mask',
'value': 'Map::NumberOfOwnDescriptorsBits::kMask' },
{ 'name': 'bit_field3_number_of_own_descriptors_shift',
'value': 'Map::NumberOfOwnDescriptorsBits::kShift' },
{ 'name': 'off_fp_context',
'value': 'StandardFrameConstants::kContextOffset' },
{ 'name': 'off_fp_constant_pool',
'value': 'StandardFrameConstants::kConstantPoolOffset' },
{ 'name': 'off_fp_marker',
'value': 'StandardFrameConstants::kMarkerOffset' },
{ 'name': 'off_fp_function',
'value': 'JavaScriptFrameConstants::kFunctionOffset' },
{ 'name': 'off_fp_args',
'value': 'JavaScriptFrameConstants::kLastParameterOffset' },
{ 'name': 'scopeinfo_idx_nparams',
'value': 'ScopeInfo::kParameterCount' },
{ 'name': 'scopeinfo_idx_nstacklocals',
'value': 'ScopeInfo::kStackLocalCount' },
{ 'name': 'scopeinfo_idx_ncontextlocals',
'value': 'ScopeInfo::kContextLocalCount' },
{ 'name': 'scopeinfo_idx_ncontextglobals',
'value': 'ScopeInfo::kContextGlobalCount' },
{ 'name': 'scopeinfo_idx_first_vars',
'value': 'ScopeInfo::kVariablePartIndex' },
{ 'name': 'sharedfunctioninfo_start_position_mask',
'value': 'SharedFunctionInfo::kStartPositionMask' },
{ 'name': 'sharedfunctioninfo_start_position_shift',
'value': 'SharedFunctionInfo::kStartPositionShift' },
{ 'name': 'jsarray_buffer_was_neutered_mask',
'value': 'JSArrayBuffer::WasNeutered::kMask' },
{ 'name': 'jsarray_buffer_was_neutered_shift',
'value': 'JSArrayBuffer::WasNeutered::kShift' },
];
#
# The following useful fields are missing accessors, so we define fake ones.
#
extras_accessors = [
'JSFunction, context, Context, kContextOffset',
'Context, closure_index, int, CLOSURE_INDEX',
'Context, native_context_index, int, NATIVE_CONTEXT_INDEX',
'Context, previous_index, int, PREVIOUS_INDEX',
'Context, min_context_slots, int, MIN_CONTEXT_SLOTS',
'HeapObject, map, Map, kMapOffset',
'JSObject, elements, Object, kElementsOffset',
'FixedArray, data, uintptr_t, kHeaderSize',
'JSArrayBuffer, backing_store, Object, kBackingStoreOffset',
'JSArrayBufferView, byte_offset, Object, kByteOffsetOffset',
'JSTypedArray, length, Object, kLengthOffset',
'Map, instance_attributes, int, kInstanceAttributesOffset',
'Map, inobject_properties_or_constructor_function_index, int, kInObjectPropertiesOrConstructorFunctionIndexOffset',
'Map, instance_size, int, kInstanceSizeOffset',
'Map, bit_field, char, kBitFieldOffset',
'Map, bit_field2, char, kBitField2Offset',
'Map, bit_field3, int, kBitField3Offset',
'Map, prototype, Object, kPrototypeOffset',
'NameDictionaryShape, prefix_size, int, kPrefixSize',
'NameDictionaryShape, entry_size, int, kEntrySize',
'NameDictionary, prefix_start_index, int, kPrefixStartIndex',
'SeededNumberDictionaryShape, prefix_size, int, kPrefixSize',
'UnseededNumberDictionaryShape, prefix_size, int, kPrefixSize',
'NumberDictionaryShape, entry_size, int, kEntrySize',
'Oddball, kind_offset, int, kKindOffset',
'HeapNumber, value, double, kValueOffset',
'ConsString, first, String, kFirstOffset',
'ConsString, second, String, kSecondOffset',
'ExternalString, resource, Object, kResourceOffset',
'SeqOneByteString, chars, char, kHeaderSize',
'SeqTwoByteString, chars, char, kHeaderSize',
'SharedFunctionInfo, code, Code, kCodeOffset',
'SharedFunctionInfo, scope_info, ScopeInfo, kScopeInfoOffset',
'SlicedString, parent, String, kParentOffset',
'Code, instruction_start, uintptr_t, kHeaderSize',
'Code, instruction_size, int, kInstructionSizeOffset',
];
#
# The following is a whitelist of classes we expect to find when scanning the
# source code. This list is not exhaustive, but it's still useful to identify
# when this script gets out of sync with the source. See load_objects().
#
expected_classes = [
'ConsString', 'FixedArray', 'HeapNumber', 'JSArray', 'JSFunction',
'JSObject', 'JSRegExp', 'JSValue', 'Map', 'Oddball', 'Script',
'SeqOneByteString', 'SharedFunctionInfo'
];
#
# The following structures store high-level representations of the structures
# for which we're going to emit descriptive constants.
#
types = {}; # set of all type names
typeclasses = {}; # maps type names to corresponding class names
klasses = {}; # known classes, including parents
fields = []; # field declarations
header = '''
/*
* This file is generated by %s. Do not edit directly.
*/
#include "src/v8.h"
#include "src/frames.h"
#include "src/frames-inl.h" /* for architecture-specific frame constants */
using namespace v8::internal;
extern "C" {
/* stack frame constants */
#define FRAME_CONST(value, klass) \
int v8dbg_frametype_##klass = StackFrame::value;
STACK_FRAME_TYPE_LIST(FRAME_CONST)
#undef FRAME_CONST
''' % sys.argv[0];
footer = '''
}
'''
#
# Get the base class
#
def get_base_class(klass):
if (klass == 'Object'):
return klass;
if (not (klass in klasses)):
return None;
k = klasses[klass];
return get_base_class(k['parent']);
#
# Loads class hierarchy and type information from "objects.h".
#
def load_objects():
objfilename = sys.argv[2];
objfile = open(objfilename, 'r');
in_insttype = False;
typestr = '';
#
# Construct a dictionary for the classes we're sure should be present.
#
checktypes = {};
for klass in expected_classes:
checktypes[klass] = True;
#
# Iterate objects.h line-by-line to collect type and class information.
# For types, we accumulate a string representing the entire InstanceType
# enum definition and parse it later because it's easier to do so
# without the embedded newlines.
#
for line in objfile:
if (line.startswith('enum InstanceType {')):
in_insttype = True;
continue;
if (in_insttype and line.startswith('};')):
in_insttype = False;
continue;
line = re.sub('//.*', '', line.strip());
if (in_insttype):
typestr += line;
continue;
match = re.match('class (\w[^:]*)(: public (\w[^{]*))?\s*{\s*',
line);
if (match):
klass = match.group(1).strip();
pklass = match.group(3);
if (pklass):
pklass = pklass.strip();
klasses[klass] = { 'parent': pklass };
#
# Process the instance type declaration.
#
entries = typestr.split(',');
for entry in entries:
types[re.sub('\s*=.*', '', entry).lstrip()] = True;
#
# Infer class names for each type based on a systematic transformation.
# For example, "JS_FUNCTION_TYPE" becomes "JSFunction". We find the
# class for each type rather than the other way around because there are
# fewer cases where one type maps to more than one class than the other
# way around.
#
for type in types:
#
# Symbols and Strings are implemented using the same classes.
#
usetype = re.sub('SYMBOL_', 'STRING_', type);
#
# REGEXP behaves like REG_EXP, as in JS_REGEXP_TYPE => JSRegExp.
#
usetype = re.sub('_REGEXP_', '_REG_EXP_', usetype);
#
# Remove the "_TYPE" suffix and then convert to camel case,
# except that a "JS" prefix remains uppercase (as in
# "JS_FUNCTION_TYPE" => "JSFunction").
#
if (not usetype.endswith('_TYPE')):
continue;
usetype = usetype[0:len(usetype) - len('_TYPE')];
parts = usetype.split('_');
cctype = '';
if (parts[0] == 'JS'):
cctype = 'JS';
start = 1;
else:
cctype = '';
start = 0;
for ii in range(start, len(parts)):
part = parts[ii];
cctype += part[0].upper() + part[1:].lower();
#
# Mapping string types is more complicated. Both types and
# class names for Strings specify a representation (e.g., Seq,
# Cons, External, or Sliced) and an encoding (TwoByte/OneByte),
# In the simplest case, both of these are explicit in both
# names, as in:
#
# EXTERNAL_ONE_BYTE_STRING_TYPE => ExternalOneByteString
#
# However, either the representation or encoding can be omitted
# from the type name, in which case "Seq" and "TwoByte" are
# assumed, as in:
#
# STRING_TYPE => SeqTwoByteString
#
# Additionally, sometimes the type name has more information
# than the class, as in:
#
# CONS_ONE_BYTE_STRING_TYPE => ConsString
#
# To figure this out dynamically, we first check for a
# representation and encoding and add them if they're not
# present. If that doesn't yield a valid class name, then we
# strip out the representation.
#
if (cctype.endswith('String')):
if (cctype.find('Cons') == -1 and
cctype.find('External') == -1 and
cctype.find('Sliced') == -1):
if (cctype.find('OneByte') != -1):
cctype = re.sub('OneByteString$',
'SeqOneByteString', cctype);
else:
cctype = re.sub('String$',
'SeqString', cctype);
if (cctype.find('OneByte') == -1):
cctype = re.sub('String$', 'TwoByteString',
cctype);
if (not (cctype in klasses)):
cctype = re.sub('OneByte', '', cctype);
cctype = re.sub('TwoByte', '', cctype);
#
# Despite all that, some types have no corresponding class.
#
if (cctype in klasses):
typeclasses[type] = cctype;
if (cctype in checktypes):
del checktypes[cctype];
if (len(checktypes) > 0):
for klass in checktypes:
print('error: expected class \"%s\" not found' % klass);
sys.exit(1);
#
# For a given macro call, pick apart the arguments and return an object
# describing the corresponding output constant. See load_fields().
#
def parse_field(call):
# Replace newlines with spaces.
for ii in range(0, len(call)):
if (call[ii] == '\n'):
call[ii] == ' ';
idx = call.find('(');
kind = call[0:idx];
rest = call[idx + 1: len(call) - 1];
args = re.split('\s*,\s*', rest);
consts = [];
if (kind == 'ACCESSORS' or kind == 'ACCESSORS_GCSAFE'):
klass = args[0];
field = args[1];
dtype = args[2];
offset = args[3];
return ({
'name': 'class_%s__%s__%s' % (klass, field, dtype),
'value': '%s::%s' % (klass, offset)
});
assert(kind == 'SMI_ACCESSORS' or kind == 'ACCESSORS_TO_SMI');
klass = args[0];
field = args[1];
offset = args[2];
return ({
'name': 'class_%s__%s__%s' % (klass, field, 'SMI'),
'value': '%s::%s' % (klass, offset)
});
#
# Load field offset information from objects-inl.h.
#
def load_fields():
inlfilename = sys.argv[3];
inlfile = open(inlfilename, 'r');
#
# Each class's fields and the corresponding offsets are described in the
# source by calls to macros like "ACCESSORS" (and friends). All we do
# here is extract these macro invocations, taking into account that they
# may span multiple lines and may contain nested parentheses. We also
# call parse_field() to pick apart the invocation.
#
prefixes = [ 'ACCESSORS', 'ACCESSORS_GCSAFE',
'SMI_ACCESSORS', 'ACCESSORS_TO_SMI' ];
current = '';
opens = 0;
for line in inlfile:
if (opens > 0):
# Continuation line
for ii in range(0, len(line)):
if (line[ii] == '('):
opens += 1;
elif (line[ii] == ')'):
opens -= 1;
if (opens == 0):
break;
current += line[0:ii + 1];
continue;
for prefix in prefixes:
if (not line.startswith(prefix + '(')):
continue;
if (len(current) > 0):
fields.append(parse_field(current));
current = '';
for ii in range(len(prefix), len(line)):
if (line[ii] == '('):
opens += 1;
elif (line[ii] == ')'):
opens -= 1;
if (opens == 0):
break;
current += line[0:ii + 1];
if (len(current) > 0):
fields.append(parse_field(current));
current = '';
for body in extras_accessors:
fields.append(parse_field('ACCESSORS(%s)' % body));
#
# Emit a block of constants.
#
def emit_set(out, consts):
# Fix up overzealous parses. This could be done inside the
# parsers but as there are several, it's easiest to do it here.
ws = re.compile('\s+')
for const in consts:
name = ws.sub('', const['name'])
value = ws.sub('', str(const['value'])) # Can be a number.
out.write('int v8dbg_%s = %s;\n' % (name, value))
out.write('\n');
#
# Emit the whole output file.
#
def emit_config():
out = file(sys.argv[1], 'w');
out.write(header);
out.write('/* miscellaneous constants */\n');
emit_set(out, consts_misc);
out.write('/* class type information */\n');
consts = [];
keys = typeclasses.keys();
keys.sort();
for typename in keys:
klass = typeclasses[typename];
consts.append({
'name': 'type_%s__%s' % (klass, typename),
'value': typename
});
emit_set(out, consts);
out.write('/* class hierarchy information */\n');
consts = [];
keys = klasses.keys();
keys.sort();
for klassname in keys:
pklass = klasses[klassname]['parent'];
bklass = get_base_class(klassname);
if (bklass != 'Object'):
continue;
if (pklass == None):
continue;
consts.append({
'name': 'parent_%s__%s' % (klassname, pklass),
'value': 0
});
emit_set(out, consts);
out.write('/* field information */\n');
emit_set(out, fields);
out.write(footer);
if (len(sys.argv) < 4):
print('usage: %s output.cc objects.h objects-inl.h' % sys.argv[0]);
sys.exit(2);
load_objects();
load_fields();
emit_config();
|
apache-2.0
| -8,884,874,755,149,568,000 | 38.095082 | 119 | 0.543442 | false |
maxterip/plugins.video.Movies-Online
|
resources/tools/server_rtmp.py
|
1
|
5143
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# tvspot - XBMC Add-on by Juarrox (juarrox@gmail.com)
# Version 0.2.9 (18.07.2014)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a la librería plugintools de Jesús (www.mimediacenter.info)
import os
import sys
import urllib
import urllib2
import re
import shutil
import zipfile
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
home = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.tvspot/', ''))
tools = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.tvspot/resources/tools', ''))
addons = xbmc.translatePath(os.path.join('special://home/addons/', ''))
art = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.tvspot/art', ''))
tmp = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.tvspot/tmp', ''))
playlists = xbmc.translatePath(os.path.join('special://home/addons/playlists', ''))
icon = art + 'icon.png'
fanart = 'fanart.jpg'
def resolve_iguide(params):
plugintools.log("[tvspot 4.0].resolve_iguide " + repr(params) )
url = params.get("url")
url = url.strip()
# plugintools.log("URL antes de resolver= " + url)
iguide_palco = {"rtmp": "rtmp://live2.iguide.to/redirect","swfurl": "http://cdn1.iguide.to/player/secure_player_iguide_embed_token.swf" , "pageurl": "http://www.iguide.to/", "token":'#ed%h0#w18623jsda6523lDGD'}
iguide_user = {"rtmp": "","swfurl": "" , "pageurl": "", "token":""}
url_extracted = url.split(" ")
for entry in url_extracted:
if entry.startswith("rtmp"):
entry = entry.replace("rtmp=", "")
entry = entry.replace("rtmp://$OPT:rtmp-raw=", "")
iguide_user["rtmp"]=entry
elif entry.startswith("playpath"):
entry = entry.replace("playpath=", "")
iguide_user["playpath"]=entry
elif entry.startswith("swfUrl"):
entry = entry.replace("swfUrl=", "")
iguide_user["swfurl"]=entry
elif entry.startswith("pageUrl"):
entry = entry.replace("pageUrl=", "")
iguide_user["pageurl"]=entry
elif entry.startswith("token"):
entry = entry.replace("token=", "")
iguide_user["token"]=entry
if url.endswith("Conn=S:OK") == True:
# plugintools.log("No tiene sufijo. Lo añadimos... ")
url = url.replace("Conn=S:OK", "")
if url.startswith("rtmp://$OPT") == True:
# plugintools.log("No tiene prefijo. Lo añadimos... ")
url = url.replace("rtmp://$OPT:rtmp-raw=", "")
plugintools.log("URL Iguide= " + url)
params["url"] = url
play_iguide(iguide_palco, iguide_user)
def resolve_ucaster(params):
plugintools.log("[tvspot 4.0].resolve_ucaster " + repr(params) )
url = params.get("url")
url = url.strip()
plugintools.log("URL antes de resolver= " + url)
if url.endswith("Conn=S:OK") == False:
plugintools.log("No tiene sufijo. Lo añadimos... ")
url = url + " Conn=S:OK"
if url.startswith("rtmp://$OPT") == True:
plugintools.log("No tiene prefijo. Lo añadimos... ")
url = "rtmp://$OPT:rtmp-raw=" + url
plugintools.log("URL Ucaster= " + url)
params["url"] = url
return params
def play_iguide(iguide_palco, iguide_user):
plugintools.log("[tvspot 4.0].iGuide tvspot= " + repr(iguide_palco) )
plugintools.log("[tvspot 4.0].iGuide User= " + repr(iguide_user) )
playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
playlist.clear()
url = iguide_user.get("rtmp") + " playpath=" + iguide_user.get("playpath") + " swfUrl=" + iguide_user.get("swfurl") + " live=1 pageUrl=" + iguide_user.get("pageurl") + " token=" + iguide_user.get("token")
url_user = iguide_user.get("rtmp") + " playpath=" + iguide_user.get("playpath") + " swfUrl=" + iguide_user.get("swfurl") + " live=1 pageUrl=" + iguide_user.get("pageurl") + " token=" + iguide_user.get("token")
url_palco = iguide_palco.get("rtmp") + " playpath=" + iguide_user.get("playpath") + " swfUrl=" + iguide_palco.get("swfurl") + " live=1 pageUrl=" + iguide_user.get("pageurl") + " token=" + iguide_palco.get("token")
url_refixed = iguide_palco.get("rtmp") + " playpath=" + iguide_user.get("playpath") + " swfUrl=" + iguide_palco.get("swfurl") + " live=1 pageUrl=" + iguide_palco.get("pageurl") + " token=" + iguide_palco.get("token") + " Conn=S:OK"
msg = "Resolviendo enlace ... "
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('tvspot', msg, 3 , art+'icon.png'))
playlist.add(url_user)
plugintools.log("[tvspot 0.2.87b playing URL playlist... "+url_palco)
playlist.add(url_palco)
plugintools.log("[tvspot 0.2.87b fixing URL by tvspot... "+url_refixed)
playlist.add(url_refixed)
plugintools.log("[tvspot 0.2.87b parsing URL... "+url_user)
# xbmc.Player( xbmc.PLAYER_CORE_MPLAYER ).play(playlist)
|
gpl-2.0
| -3,087,301,862,750,812,000 | 40.096 | 235 | 0.599377 | false |
DiCarloLab-Delft/PycQED_py3
|
pycqed/tests/test_chevron_sim.py
|
1
|
1198
|
import numpy as np
from pycqed.simulations import chevron_sim as chs
class TestChevronSim:
@classmethod
def setup_class(cls):
cls.e_min = -0.0322
cls.e_max = 0.0322
cls.e_points = 20
cls.time_stop = 60
cls.time_step = 4
cls.bias_tee = lambda self, t, a, b, c: t * a ** 2 + t * b + c
# self.distortion = lambda t: self.lowpass_s(t, 2)
cls.distortion = lambda self, t: self.bias_tee(t, 0., 2e-5, 1)
cls.time_vec = np.arange(0., cls.time_stop, cls.time_step)
cls.freq_vec = np.linspace(cls.e_min, cls.e_max, cls.e_points)
def test_output_shape(self):
"""
Trivial test that just checks if there is nothing that broke the
chevron sims in a way that breaks it.
"""
result = chs.chevron(2.*np.pi*(6.552 - 4.8),
self.e_min, self.e_max,
self.e_points,
np.pi*0.0385,
self.time_stop,
self.time_step,
self.distortion)
assert np.shape(result) == (len(self.freq_vec), len(self.time_vec)+1)
|
mit
| 2,995,853,736,358,412,000 | 32.277778 | 77 | 0.507513 | false |
Fendoe/open-hackathon-o
|
open-hackathon-server/src/hackathon/docker/hosted_docker.py
|
1
|
24562
|
# -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------------
# Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------------
import sys
sys.path.append("..")
from hackathon import (
RequiredFeature,
Component,
Context,
)
from hackathon.database.models import (
Experiment,
DockerContainer,
HackathonAzureKey,
PortBinding,
DockerHostServer,
)
from hackathon.constants import (
EStatus,
PortBindingType,
VEStatus,
HEALTH,
)
from compiler.ast import (
flatten,
)
from threading import (
Lock,
)
from hackathon.template.docker_template_unit import (
DockerTemplateUnit,
)
from hackathon.azureformation.endpoint import (
Endpoint
)
from docker_formation_base import (
DockerFormationBase,
)
from hackathon.azureformation.service import (
Service,
)
from hackathon.hackathon_response import (
internal_server_error
)
from hackathon.constants import (
HEALTH_STATUS,
)
import json
import requests
from datetime import timedelta
class HostedDockerFormation(DockerFormationBase, Component):
template_manager = RequiredFeature("template_manager")
hackathon_manager = RequiredFeature("hackathon_manager")
scheduler = RequiredFeature("scheduler")
"""
Docker resource management based on docker remote api v1.18
Host resource are required. Azure key required in case of azure.
"""
application_json = {'content-type': 'application/json'}
host_ports = []
host_port_max_num = 30
docker_host_manager = RequiredFeature("docker_host_manager")
def __init__(self):
self.lock = Lock()
def report_health(self):
"""Report health of DockerHostServers
:rtype: dict
:return health status item of docker. OK when all servers running, Warning if some of them working, Error if no server running
"""
try:
hosts = self.db.find_all_objects(DockerHostServer)
alive = 0
for host in hosts:
if self.ping(host):
alive += 1
if alive == len(hosts):
return {
HEALTH.STATUS: HEALTH_STATUS.OK
}
elif alive > 0:
return {
HEALTH.STATUS: HEALTH_STATUS.WARNING,
HEALTH.DESCRIPTION: 'at least one docker host servers are down'
}
else:
return {
HEALTH.STATUS: HEALTH_STATUS.ERROR,
HEALTH.DESCRIPTION: 'all docker host servers are down'
}
except Exception as e:
return {
HEALTH.STATUS: HEALTH_STATUS.ERROR,
HEALTH.DESCRIPTION: e.message
}
def get_available_host_port(self, docker_host, private_port):
"""
We use double operation to ensure ports not conflicted, first we get ports from host machine, but in multiple
threads situation, the interval between two requests is too short, maybe the first thread do not get port
ended, so the host machine don't update ports in time, thus the second thread may get the same port.
To avoid this condition, we use static variable host_ports to cache the latest host_port_max_num ports.
Every thread visit variable host_ports is synchronized.
To save space, we will release the ports if the number over host_port_max_num.
:param docker_host:
:param private_port:
:return:
"""
self.log.debug("try to assign docker port %d on server %r" % (private_port, docker_host))
containers = self.__containers_info(docker_host)
host_ports = flatten(map(lambda p: p['Ports'], containers))
# todo if azure return -1
def sub(port):
return port["PublicPort"] if "PublicPort" in port else -1
host_public_ports = map(lambda x: sub(x), host_ports)
return self.__get_available_host_port(host_public_ports, private_port)
def stop(self, name, **kwargs):
"""
stop a container
:param name: container's name
:param docker_host: host machine where container running
:return:
"""
container = kwargs["container"]
expr_id = kwargs["expr_id"]
docker_host = self.docker_host_manager.get_host_server_by_id(container.host_server_id)
if self.__get_container(name, docker_host) is not None:
containers_url = '%s/containers/%s/stop' % (self.get_vm_url(docker_host), name)
req = requests.post(containers_url)
self.log.debug(req.content)
self.__stop_container(expr_id, container, docker_host)
def delete(self, name, **kwargs):
"""
delete a container
:param name:
:param docker_host:
:return:
"""
container = kwargs["container"]
expr_id = kwargs["expr_id"]
docker_host = self.docker_host_manager.get_host_server_by_id(container.host_server_id)
containers_url = '%s/containers/%s?force=1' % (self.get_vm_url(docker_host), name)
req = requests.delete(containers_url)
self.log.debug(req.content)
self.__stop_container(expr_id, container, docker_host)
def start(self, unit, **kwargs):
"""
In this function, we create a container and then start a container
:param unit: docker template unit
:param docker_host:
:return:
"""
virtual_environment = kwargs["virtual_environment"]
hackathon = kwargs["hackathon"]
experiment = kwargs["experiment"]
container_name = unit.get_name()
host_server = self.docker_host_manager.get_available_docker_host(1, hackathon)
container = DockerContainer(experiment,
name=container_name,
host_server_id=host_server.id,
virtual_environment=virtual_environment,
image=unit.get_image_with_tag())
self.db.add_object(container)
self.db.commit()
# port binding
ps = map(lambda p:
[p.port_from, p.port_to],
self.__assign_ports(experiment, host_server, virtual_environment, unit.get_ports()))
# guacamole config
guacamole = unit.get_remote()
port_cfg = filter(lambda p:
p[DockerTemplateUnit.PORTS_PORT] == guacamole[DockerTemplateUnit.REMOTE_PORT],
unit.get_ports())
if len(port_cfg) > 0:
gc = {
"displayname": container_name,
"name": container_name,
"protocol": guacamole[DockerTemplateUnit.REMOTE_PROTOCOL],
"hostname": host_server.public_ip,
"port": port_cfg[0]["public_port"]
}
if DockerTemplateUnit.REMOTE_USERNAME in guacamole:
gc["username"] = guacamole[DockerTemplateUnit.REMOTE_USERNAME]
if DockerTemplateUnit.REMOTE_PASSWORD in guacamole:
gc["password"] = guacamole[DockerTemplateUnit.REMOTE_PASSWORD]
# save guacamole config into DB
virtual_environment.remote_paras = json.dumps(gc)
exist = self.__get_container(container_name, host_server)
if exist is not None:
container.container_id = exist["Id"]
host_server.container_count += 1
self.db.commit()
else:
container_config = unit.get_container_config()
# create container
try:
container_create_result = self.__create(host_server, container_config, container_name)
except Exception as e:
self.log.error(e)
self.log.error("container %s fail to create" % container_name)
return None
container.container_id = container_create_result["Id"]
# start container
try:
self.__start(host_server, container_create_result["Id"])
host_server.container_count += 1
self.db.commit()
except Exception as e:
self.log.error(e)
self.log.error("container %s fail to start" % container["Id"])
return None
# check
if self.__get_container(container_name, host_server) is None:
self.log.error(
"container %s has started, but can not find it in containers' info, maybe it exited again."
% container_name)
return None
self.log.debug("starting container %s is ended ... " % container_name)
virtual_environment.status = VEStatus.RUNNING
self.db.commit()
return container
def get_vm_url(self, docker_host):
return 'http://%s:%d' % (docker_host.public_dns, docker_host.public_docker_api_port)
def pull_image(self, context):
docker_host, image_name, tag = context.docker_host, context.image_name, context.tag
pull_image_url = self.get_vm_url(docker_host) + "/images/create?fromImage=" + image_name + '&tag=' + tag
self.log.debug(" send request to pull image:" + pull_image_url)
return requests.post(pull_image_url)
def get_pulled_images(self, docker_host):
get_images_url = self.get_vm_url(docker_host) + "/images/json?all=0"
current_images_info = json.loads(requests.get(get_images_url).content) # [{},{},{}]
current_images_tags = map(lambda x: x['RepoTags'], current_images_info) # [[],[],[]]
return flatten(current_images_tags) # [ imange:tag, image:tag ]
def ensure_images(self):
hackathons = self.hackathon_manager.get_online_hackathons()
map(lambda h: self.__ensure_images_for_hackathon(h), hackathons)
def check_container_status_is_normal(self, docker_container):
"""check container's running status on docker host
if status is Running or Restarting returns True , else returns False
:type docker_container: DockerContainer
:param docker_container: the container that you want to check
:type boolean
:return True: the container running status is running or restarting , else returns False
"""
docker_host = self.db.find_first_object_by(DockerHostServer, id=docker_container.host_server_id)
if docker_host is not None:
container_info = self.__get_container_info_by_container_id(docker_host, docker_container.container_id)
if container_info is None:
return False
return container_info['State']['Running'] or container_info['State']['Restarting']
else:
return False
def ping(self, docker_host):
"""Ping docker host to check running status
:type docker_host : DockerHostServer
:param docker_host: the hots that you want to check docker service running status
:type boolean
:return: True: running status is OK, else return False
"""
try:
ping_url = '%s/_ping' % self.__get_vm_url(docker_host)
req = requests.get(ping_url)
self.log.debug(req.content)
return req.status_code == 200 and req.content == 'OK'
except Exception as e:
self.log.error(e)
return False
# --------------------------------------------- helper function ---------------------------------------------#
def __name_match(self, id, lists):
for list in lists:
if id in list:
return True
return False
def __get_schedule_job_id(self, hackathon):
return "pull_images_for_hackathon_%s" % hackathon.id
def __ensure_images_for_hackathon(self, hackathon):
# only ensure those alauda is disabled
if hackathon.is_alauda_enabled():
self.log.debug("schedule job of hackathon '%s(%d)' removed for alauda enabled" %
(hackathon.name, hackathon.id))
self.scheduler.remove_job(self.__get_schedule_job_id(hackathon))
return
self.log.debug("adding schedule job to ensure images for hackathon [%d]%s" % (hackathon.id, hackathon.name))
next_run_time = self.util.get_now() + timedelta(seconds=3)
context = Context(hackathon_id=hackathon.id)
self.scheduler.add_interval(feature="template_manager",
method="pull_images_for_hackathon",
id=self.__get_schedule_job_id(hackathon),
context=context,
next_run_time=next_run_time,
minutes=60)
def __get_vm_url(self, docker_host):
return 'http://%s:%d' % (docker_host.public_dns, docker_host.public_docker_api_port)
def __clear_ports_cache(self):
"""
cache ports, if ports' number more than host_port_max_num, release the ports.
But if there is a thread apply new ports, we will do this operation in the next loop.
Because the host machine do not update the ports information,
if we release ports now, the new ports will be lost.
:return:
"""
num = self.db.count(Experiment, Experiment.status == EStatus.STARTING)
if num > 0:
self.log.debug("there are %d experiment is starting, host ports will updated in next loop" % num)
return
self.log.debug("-----release ports cache successfully------")
self.host_ports = []
def __stop_container(self, expr_id, container, docker_host):
self.__release_ports(expr_id, docker_host)
docker_host.container_count -= 1
if docker_host.container_count < 0:
docker_host.container_count = 0
self.db.commit()
def __containers_info(self, docker_host):
containers_url = '%s/containers/json' % self.get_vm_url(docker_host)
req = requests.get(containers_url)
self.log.debug(req.content)
return self.util.convert(json.loads(req.content))
def __get_available_host_port(self, port_bindings, port):
"""
simple lock mechanism, visit static variable ports synchronize, because port_bindings is not in real-time,
so we should cache the latest ports, when the cache ports number is more than host_port_max_num,
we will release it to save space.
:param port_bindings:
:param port:
:return:
"""
self.lock.acquire()
try:
host_port = port + 10000
while host_port in port_bindings or host_port in self.host_ports:
host_port += 1
if host_port >= 65535:
self.log.error("port used up on this host server")
raise Exception("no port available")
if len(self.host_ports) >= self.host_port_max_num:
self.__clear_ports_cache()
self.host_ports.append(host_port)
self.log.debug("host_port is %d " % host_port)
return host_port
finally:
self.lock.release()
def __get_container(self, name, docker_host):
containers = self.__containers_info(docker_host)
return next((c for c in containers if name in c["Names"] or '/' + name in c["Names"]), None)
def __create(self, docker_host, container_config, container_name):
"""
only create a container, in this step, we cannot start a container.
:param docker_host:
:param container_config:
:param container_name:
:return:
"""
containers_url = '%s/containers/create?name=%s' % (self.get_vm_url(docker_host), container_name)
req = requests.post(containers_url, data=json.dumps(container_config), headers=self.application_json)
self.log.debug(req.content)
container = json.loads(req.content)
if container is None:
raise AssertionError("container is none")
return container
def __start(self, docker_host, container_id):
"""
start a container
:param docker_host:
:param container_id:
:return:
"""
url = '%s/containers/%s/start' % (self.get_vm_url(docker_host), container_id)
req = requests.post(url, headers=self.application_json)
self.log.debug(req.content)
def __get_available_public_ports(self, expr_id, host_server, host_ports):
self.log.debug("starting to get azure ports")
ep = Endpoint(Service(self.load_azure_key_id(expr_id)))
host_server_name = host_server.vm_name
host_server_dns = host_server.public_dns.split('.')[0]
public_endpoints = ep.assign_public_endpoints(host_server_dns, 'Production', host_server_name, host_ports)
if not isinstance(public_endpoints, list):
self.log.debug("failed to get public ports")
return internal_server_error('cannot get public ports')
self.log.debug("public ports : %s" % public_endpoints)
return public_endpoints
def load_azure_key_id(self, expr_id):
expr = self.db.get_object(Experiment, expr_id)
hak = self.db.find_first_object_by(HackathonAzureKey, hackathon_id=expr.hackathon_id)
return hak.azure_key_id
def __assign_ports(self, expr, host_server, ve, port_cfg):
"""
assign ports from host server
:param expr:
:param host_server:
:param ve:
:param port_cfg:
:return:
"""
# get 'host_port'
map(lambda p:
p.update(
{DockerTemplateUnit.PORTS_HOST_PORT: self.get_available_host_port(host_server, p[
DockerTemplateUnit.PORTS_PORT])}
),
port_cfg)
# get 'public' cfg
public_ports_cfg = filter(lambda p: DockerTemplateUnit.PORTS_PUBLIC in p, port_cfg)
host_ports = [u[DockerTemplateUnit.PORTS_HOST_PORT] for u in public_ports_cfg]
if self.util.safe_get_config("environment", "prod") == "local":
map(lambda cfg: cfg.update({DockerTemplateUnit.PORTS_PUBLIC_PORT: cfg[DockerTemplateUnit.PORTS_HOST_PORT]}),
public_ports_cfg)
else:
public_ports = self.__get_available_public_ports(expr.id, host_server, host_ports)
for i in range(len(public_ports_cfg)):
public_ports_cfg[i][DockerTemplateUnit.PORTS_PUBLIC_PORT] = public_ports[i]
binding_dockers = []
# update port binding
for public_cfg in public_ports_cfg:
binding_cloud_service = PortBinding(name=public_cfg[DockerTemplateUnit.PORTS_NAME],
port_from=public_cfg[DockerTemplateUnit.PORTS_PUBLIC_PORT],
port_to=public_cfg[DockerTemplateUnit.PORTS_HOST_PORT],
binding_type=PortBindingType.CLOUD_SERVICE,
binding_resource_id=host_server.id,
virtual_environment=ve,
experiment=expr,
url=public_cfg[DockerTemplateUnit.PORTS_URL]
if DockerTemplateUnit.PORTS_URL in public_cfg else None)
binding_docker = PortBinding(name=public_cfg[DockerTemplateUnit.PORTS_NAME],
port_from=public_cfg[DockerTemplateUnit.PORTS_HOST_PORT],
port_to=public_cfg[DockerTemplateUnit.PORTS_PORT],
binding_type=PortBindingType.DOCKER,
binding_resource_id=host_server.id,
virtual_environment=ve,
experiment=expr)
binding_dockers.append(binding_docker)
self.db.add_object(binding_cloud_service)
self.db.add_object(binding_docker)
self.db.commit()
local_ports_cfg = filter(lambda p: DockerTemplateUnit.PORTS_PUBLIC not in p, port_cfg)
for local_cfg in local_ports_cfg:
port_binding = PortBinding(name=local_cfg[DockerTemplateUnit.PORTS_NAME],
port_from=local_cfg[DockerTemplateUnit.PORTS_HOST_PORT],
port_to=local_cfg[DockerTemplateUnit.PORTS_PORT],
binding_type=PortBindingType.DOCKER,
binding_resource_id=host_server.id,
virtual_environment=ve,
experiment=expr)
binding_dockers.append(port_binding)
self.db.add_object(port_binding)
self.db.commit()
return binding_dockers
def __release_ports(self, expr_id, host_server):
"""
release the specified experiment's ports
"""
self.log.debug("Begin to release ports: expr_id: %d, host_server: %r" % (expr_id, host_server))
ports_binding = self.db.find_all_objects_by(PortBinding, experiment_id=expr_id)
if ports_binding is not None:
docker_binding = filter(
lambda u: self.util.safe_get_config("environment", "prod") != "local" and u.binding_type == 1,
ports_binding)
ports_to = [d.port_to for d in docker_binding]
if len(ports_to) != 0:
self.__release_public_ports(expr_id, host_server, ports_to)
for port in ports_binding:
self.db.delete_object(port)
self.db.commit()
self.log.debug("End to release ports: expr_id: %d, host_server: %r" % (expr_id, host_server))
def __release_public_ports(self, expr_id, host_server, host_ports):
ep = Endpoint(Service(self.load_azure_key_id(expr_id)))
host_server_name = host_server.vm_name
host_server_dns = host_server.public_dns.split('.')[0]
self.log.debug("starting to release ports ... ")
ep.release_public_endpoints(host_server_dns, 'Production', host_server_name, host_ports)
def __get_container_info_by_container_id(self, docker_host, container_id):
"""get a container info by container_id from a docker host
:type docker_host: str|unicode
:param: the docker host which you want to search container from
:type container_id: str|unicode
:param as a parameter that you want to search container though docker remote API
:return dic object of the container info if not None
"""
try:
get_container_url = self.get_vm_url(docker_host) + "/container/%s/json?all=0" % container_id
req = requests.get(get_container_url)
if req.status_code >= 200 and req.status_code < 300 :
container_info = json.loads(req.content)
return container_info
return None
except Exception as ex:
self.log.error(ex)
return None
|
mit
| 6,927,733,338,354,126,000 | 42.319224 | 134 | 0.587859 | false |
ISISComputingGroup/EPICS-inst_servers
|
server_common/channel_access.py
|
1
|
11463
|
from __future__ import absolute_import, print_function, unicode_literals, division
from time import sleep
# This file is part of the ISIS IBEX application.
# Copyright (C) 2012-2016 Science & Technology Facilities Council.
# All rights reserved.
#
# This program is distributed in the hope that it will be useful.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution.
# EXCEPT AS EXPRESSLY SET FORTH IN THE ECLIPSE PUBLIC LICENSE V1.0, THE PROGRAM
# AND ACCOMPANYING MATERIALS ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND. See the Eclipse Public License v1.0 for more details.
#
# You should have received a copy of the Eclipse Public License v1.0
# along with this program; if not, you can obtain a copy from
# https://www.eclipse.org/org/documents/epl-v10.php or
# http://opensource.org/licenses/eclipse-1.0.php
from BlockServer.core.macros import MACROS
from server_common.utilities import print_and_log
from concurrent.futures import ThreadPoolExecutor
# Number of threads to serve caputs
NUMBER_OF_CAPUT_THREADS = 20
try:
from genie_python.channel_access_exceptions import UnableToConnectToPVException, ReadAccessException
except ImportError:
class UnableToConnectToPVException(IOError):
"""
The system is unable to connect to a PV for some reason.
"""
def __init__(self, pv_name, err):
super(UnableToConnectToPVException, self).__init__("Unable to connect to PV {0}: {1}".format(pv_name, err))
class ReadAccessException(IOError):
"""
PV exists but its value is unavailable to read.
"""
def __init__(self, pv_name):
super(ReadAccessException, self).__init__("Read access denied for PV {}".format(pv_name))
try:
# noinspection PyUnresolvedReferences
from genie_python.genie_cachannel_wrapper import CaChannelWrapper, EXIST_TIMEOUT
except ImportError:
print("ERROR: No genie_python on the system can not import CaChannelWrapper!")
try:
from genie_python.genie_cachannel_wrapper import AlarmSeverity, AlarmCondition as AlarmStatus
except ImportError:
from enum import IntEnum
class AlarmSeverity(IntEnum):
"""
Enum for severity of alarm
"""
No = 0
Minor = 1
Major = 2
Invalid = 3
class AlarmStatus(IntEnum):
"""
Enum for status of alarm
"""
BadSub = 16
Calc = 12
Comm = 9
Cos = 8
Disable = 18
High = 4
HiHi = 3
HwLimit = 11
Link = 14
Lolo = 5
Low = 6
No = 0
Read = 1
ReadAccess = 20
Scam = 13
Simm = 19
Soft = 15
State = 7
Timeout = 10
UDF = 17
Write = 2
WriteAccess = 21
def _create_caput_pool():
"""
Returns: thread pool for the caputs, making sure it works for older versions of python
"""
try:
executor = ThreadPoolExecutor(max_workers=NUMBER_OF_CAPUT_THREADS, thread_name_prefix="ChannelAccess_Pool")
except TypeError:
executor = ThreadPoolExecutor(max_workers=NUMBER_OF_CAPUT_THREADS)
print("WARNING: thread_name_prefix does not exist for ThreadPoolExecutor in this python, "
"caput pool has generic name.")
return executor
class ChannelAccess(object):
# Create a thread poll so that threads are reused and so ca contexts that each thread gets are shared. This also
# caps the number of ca library threads. 20 is chosen as being probably enough but limited.
thread_pool = _create_caput_pool()
@staticmethod
def wait_for_tasks():
"""
Wait for all requested tasks to complete, i.e. all caputs.
It does this by shutting down the current threadpool waiting for all tasks to complete and then create a new
pool.
"""
ChannelAccess.thread_pool.shutdown()
ChannelAccess.thread_pool = _create_caput_pool()
@staticmethod
def caget(name, as_string=False, timeout=None):
"""Uses CaChannelWrapper from genie_python to get a pv value. We import CaChannelWrapper when used as this means
the tests can run without having genie_python installed
Args:
name (string): The name of the PV to be read
as_string (bool, optional): Set to read a char array as a string, defaults to false
timeout (float, None): timeout value to use; None for use default timeout
Returns:
obj : The value of the requested PV, None if no value was read
"""
try:
if timeout is None:
return CaChannelWrapper.get_pv_value(name, as_string)
else:
return CaChannelWrapper.get_pv_value(name, as_string, timeout=timeout)
except Exception as err:
# Probably has timed out
print_and_log(str(err))
return None
@staticmethod
def caput(name, value, wait=False, set_pv_value=None, safe_not_quick=True):
"""
Uses CaChannelWrapper from genie_python to set a pv value. Waiting will put the call in a thread so the order
is no longer guarenteed. Also if the call take time a queue will be formed of put tasks.
We import CaChannelWrapper when used as this means the tests can run without having genie_python installed
Args:
name (string): The name of the PV to be set
value (object): The data to send to the PV
wait (bool, optional): Wait for the PV to set before returning
set_pv_value: function to call to set a pv, used only in testing; None to use CaChannelWrapper set value
safe_not_quick (bool): True run all checks while setting the pv, False don't run checks just write the value,
e.g. disp check
Returns:
None: if wait is False
Future: if wait if True
"""
if set_pv_value is None:
# We need to put the default here rather than as a python default argument because the linux build does
# not have CaChannelWrapper. The argument default would be looked up at class load time, causing the
# linux build to fail to load the entire class.
set_pv_value = CaChannelWrapper.set_pv_value
def _put_value():
set_pv_value(name, value, wait, safe_not_quick=safe_not_quick)
if wait:
# If waiting then run in this thread.
_put_value()
return None
else:
# If not waiting, run in a different thread.
# Even if not waiting genie_python sometimes takes a while to return from a set_pv_value call.
return ChannelAccess.thread_pool.submit(_put_value)
@staticmethod
def caput_retry_on_fail(pv_name, value, retry_count=5, safe_not_quick=True):
"""
Write to a pv and check the value is set, retry if not; raise if run out of retries
Args:
pv_name: pv name to write to
value: value to write
retry_count: number of retries
safe_not_quick (bool): True run all checks while setting the pv, False don't run checks just write the value,
e.g. disp check
Raises:
IOError: if pv can not be set
"""
current_value = None
for _ in range(retry_count):
ChannelAccess.caput(pv_name, value, wait=True, safe_not_quick=safe_not_quick)
current_value = ChannelAccess.caget(pv_name)
if current_value == value:
break
else:
raise IOError("PV value can not be set, pv {}, was {} expected {}".format(pv_name, current_value, value))
@staticmethod
def pv_exists(name, timeout=None):
"""
See if the PV exists.
Args:
name (string): The PV name.
timeout(optional): How long to wait for the PV to "appear".
Returns:
True if exists, otherwise False.
"""
if timeout is None:
timeout = EXIST_TIMEOUT
return CaChannelWrapper.pv_exists(name, timeout)
@staticmethod
def add_monitor(name, call_back_function):
"""
Add a callback to a pv which responds on a monitor (i.e. value change). This currently only tested for
numbers.
Args:
name: name of the pv
call_back_function: the callback function, arguments are value,
alarm severity (AlarmSeverity),
alarm status (AlarmStatus)
"""
CaChannelWrapper.add_monitor(name, call_back_function)
@staticmethod
def poll():
"""
Flush the send buffer and execute any outstanding background activity for all connected pvs.
NB Connected pv is one which is in the cache
"""
CaChannelWrapper.poll()
@staticmethod
def clear_monitor(name):
"""
Clears the monitor on a pv if it exists
"""
try:
CaChannelWrapper.get_chan(name).clear_channel()
except UnableToConnectToPVException:
pass
class ManagerModeRequiredException(Exception):
"""
Exception to be thrown if manager mode was required, but not enabled, for an operation.
"""
def __init__(self, *args, **kwargs):
super(ManagerModeRequiredException, self).__init__(*args, **kwargs)
def verify_manager_mode(channel_access=ChannelAccess(), message="Operation must be performed in manager mode"):
"""
Verifies that manager mode is active, throwing an error if it was not active.
Args:
channel_access (ChannelAccess, optional): the channel access class to use
message (str): Message given to exception if manager mode was not enabled.
Raises:
ManagerModeRequiredException: if manager mode was not enabled or was unable to connect
"""
try:
is_manager = channel_access.caget("{}CS:MANAGER".format(MACROS["$(MYPVPREFIX)"])).lower() == "yes"
except UnableToConnectToPVException as e:
raise ManagerModeRequiredException("Manager mode is required, but the manager mode PV did not connect "
"(caused by: {})".format(e))
except ReadAccessException as e:
raise ManagerModeRequiredException("Manager mode is required, but the manager mode PV could not be read "
"(caused by: {})".format(e))
except Exception as e:
raise ManagerModeRequiredException("Manager mode is required, but an unknown exception occurred "
"(caused by: {})".format(e))
if not is_manager:
raise ManagerModeRequiredException(message)
def maximum_severity(*alarms):
"""
Get the alarm with maximum severity (or first if items have equal severity)
Args:
*alarms (Tuple[AlarmSeverity, AlarmStatus]): alarms to choose from
Returns:
(Optional[Tuple[AlarmSeverity, AlarmStatus]]) alarm with maximum severity; none for no arguments
"""
maximum_severity_alarm = None
for alarm in alarms:
if maximum_severity_alarm is None or alarm[0] > maximum_severity_alarm[0]:
maximum_severity_alarm = alarm
return maximum_severity_alarm
|
bsd-3-clause
| -3,149,935,148,602,946,000 | 36.460784 | 121 | 0.634651 | false |
lwahlmeier/python-litesockets
|
tests/sslTests.py
|
1
|
3170
|
from __future__ import print_function
import unittest, hashlib, logging
import litesockets, time
from . import utils
import os
try:
xrange(1)
except:
xrange=range
DIRNAME = os.path.dirname(__file__)
TEST_STRING = ("TEST"*100).encode('utf-8')
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s")
log = logging.getLogger("root")
log.setLevel(logging.DEBUG)
class TestSSL(unittest.TestCase):
def setUp(self):
self.SE = litesockets.SocketExecuter()
def tearDown(self):
self.SE.stop()
def test_SimpleSSLSendTest(self):
ta = utils.testClass(self.SE)
server = self.SE.createTCPServer("localhost", 0)
server.setSSLInfo(certfile="%s/tmp.crt"%(DIRNAME), keyfile="%s/tmp.key"%(DIRNAME), do_handshake_on_connect=True)
server.setOnClient(ta.accept)
server.start()
PORT = server.getSocket().getsockname()[1]
client = self.SE.createTCPClient("localhost", PORT)
test_client = utils.testClass(self.SE)
client.setReader(test_client.read)
client.enableSSL()
client.startSSL()
client.connect()
client.write(TEST_STRING)
utils.waitTill(lambda X: ta.read_len < X, len(TEST_STRING) , 500)
self.assertEquals(ta.reads[0], TEST_STRING)
ta.reads.pop(0)
ta.clients[0].write(TEST_STRING)
utils.waitTill(lambda X: test_client.read_len <= X, 0, 500)
self.assertEquals(test_client.reads[0], TEST_STRING)
print("Done Read")
time.sleep(1)
client.close()
print("{}".format(client))
server.close()
utils.waitTill(lambda X: len(self.SE.getClients()) > X, 0, 5000)
utils.waitTill(lambda X: len(self.SE.getServers()) > X, 0, 5000)
print("Done Waiting")
self.assertEquals(0, len(self.SE.getClients()))
self.assertEquals(0, len(self.SE.getServers()))
def test_SSLsendLots(self):
LOOPS = 500
STR_SIZE = len(TEST_STRING)
BYTES = STR_SIZE*LOOPS
test = utils.testClass(self.SE)
server = self.SE.createTCPServer("localhost", 0)
server.setSSLInfo(certfile="%s/tmp.crt"%(DIRNAME), keyfile="%s/tmp.key"%(DIRNAME), do_handshake_on_connect=True)
server.setOnClient(test.accept)
server.start()
PORT = server.getSocket().getsockname()[1]
client = self.SE.createTCPClient("localhost", PORT)
test_client = utils.testClass(self.SE)
client.setReader(test_client.read)
client.enableSSL()
client.connect()
client.startSSL()
baseSha = hashlib.sha256()
for i in xrange(0, LOOPS):
baseSha.update(TEST_STRING)
client.write(TEST_STRING)
newSha = baseSha.hexdigest()
utils.waitTill(lambda X: test.read_len < X, BYTES, 5000)
self.assertEquals(test.read_len, BYTES)
self.assertEquals(hashlib.sha256(b''.join(test.reads)).hexdigest(), newSha)
test.clients[0].write(b''.join(test.reads))
utils.waitTill(lambda X: test_client.read_len < X, BYTES, 5000)
self.assertEquals(test.read_len, BYTES)
self.assertEquals(hashlib.sha256(b''.join(test_client.reads)).hexdigest(), newSha)
class TestSSLSelect(TestSSL):
def setUp(self):
self.SE = litesockets.SocketExecuter(forcePlatform="win")
def tearDown(self):
self.SE.stop()
|
unlicense
| 4,541,997,351,854,999,600 | 28.90566 | 116 | 0.679495 | false |
tedunderwood/GenreProject
|
python/piketty/fifteenwordsnippets.py
|
1
|
11248
|
# fifteenwordsnippets.py
# A script that searches a HathiTrust corpus of 6,942 volumes (1700-1923), plus Hoyt & Richard's
# corpus of 808 vols (1923-1950), for words related to money. It takes seven words on either
# side of those words to create a snippet.
# In cases where the possibly-monetary word is ambiguous, e.g. "pounds," it runs the central
# seven words of the snippet through a regularized logistic model (created by model_contexts)
# in order to make a prediction about the likelihood that this word refers to money. The
# model I used is based on 700 manually-tagged snippets; it's about about 87% accurate,
# five-fold crossvalidated.
import modelingcounter
import os, sys
import SonicScrewdriver as utils
import csv
import pickle
from bagofwords import WordVector, StandardizingVector
from sklearn.linear_model import LogisticRegression
# We start with a number of functions borrowed from other scripts; these were used to
# generate the logistic model, so we use them also here to clean and normalize snippets.
punctuple = ('.', ',', '?', '!', ';', '"', '“', '”', ':', '--', '—', ')', '(', "'", "`", "[", "]", "{", "}")
def all_nonalphanumeric(astring):
nonalphanum = True
for character in astring:
if character.isalpha() or character.isdigit():
nonalphanum = False
break
return nonalphanum
def strip_punctuation(astring):
global punctuple
keepclipping = True
suffix = ""
while keepclipping == True and len(astring) > 1:
keepclipping = False
if astring.endswith(punctuple):
suffix = astring[-1:] + suffix
astring = astring[:-1]
keepclipping = True
keepclipping = True
prefix = ""
while keepclipping == True and len(astring) > 1:
keepclipping = False
if astring.startswith(punctuple):
prefix = prefix + astring[:1]
astring = astring[1:]
keepclipping = True
return(prefix, astring, suffix)
def as_wordlist(line):
''' Converts a line into a list of words, splitting
tokens brutally and unreflectively at punctuation.
One of the effects will be to split possessives into noun
and s. But this might not be a bad thing for current
purposes.
'''
line = line.replace('”', ' ')
line = line.replace(':', ' ')
line = line.replace(';', ' ')
line = line.replace('—', ' ')
line = line.replace('--', ' ')
line = line.replace('.', ' ')
line = line.replace(',', ' ')
line = line.replace('-', ' ')
line = line.replace('—', ' ')
line = line.replace("'", ' ')
line = line.replace('"', ' ')
# That's not the most efficient way to do this computationally,
# but it prevents me from having to look up the .translate
# method.
words = line.split(' ')
wordlist = list()
for word in words:
word = word.lower()
prefix, word, suffix = strip_punctuation(word)
# In case we missed anything.
if len(word) > 0 and not all_nonalphanumeric(word):
wordlist.append(word)
return wordlist
def is_money(wordlist, WINDOWRADIUS, model, features, standardizer):
# We're getting a wordlist generated by WINDOWRADIUS, but
# we only want the central seven words for our model.
startindex = WINDOWRADIUS - 3
endindex = WINDOWRADIUS + 4
modelsegment = ' '.join(wordlist[startindex : endindex])
# You'd think we could just take a list of words, but in
# generating the model we ran strings through a particular
# tokenizing process, and we should replicate that here.
normalizedlist = as_wordlist(modelsegment)
vector = WordVector(normalizedlist)
vector.selectfeatures(features)
# This turns a sparse dictionary into an array with zeroes
# for missing features.
vector.normalizefrequencies()
# raw counts are divided by total counts.
vector.standardizefrequencies(standardizer)
# features are now centered on the means, and divided by
# standard deviations, calculated on the training set
classlabel = model.predict(vector.features)[0]
if classlabel == 1:
return True
elif classlabel == 0:
return False
else:
print("ANOMALY!")
print(classlabel)
return False
# Cause that's how I do error handling.
# Main script.
# Let's load the model.
modelfolder = "/Volumes/TARDIS/work/moneycontext/"
modelpath = modelfolder + "logisticmodel.p"
with open(modelpath, mode = 'rb') as f:
logisticmodel = pickle.load(f)
standardizerpath = modelfolder + 'standardizer.p'
with open(standardizerpath, mode = 'rb') as f:
standardizer = pickle.load(f)
featurepath = modelfolder + 'featurelist.p'
with open(featurepath, mode = 'rb') as f:
features = pickle.load(f)
# Now load HathiTrust metadata.
rows, columns, table = utils.readtsv('/Volumes/TARDIS/work/metadata/MergedMonographs.tsv')
ambiguouswords = {'crown', 'crowns', 'guinea', 'guineas', 'nickel', 'sovereign', 'sovereigns', 'pound', 'pounds', 'quid'}
moneywords = {'dollar', 'dollars', 'dime', 'dimes', 'nickel', 'nickels', 'pound', 'pounds', 'shilling', 'shillings', 'sovereign', 'sovereigns','cent', 'cents', 'centime', 'centimes', 'crown', 'crowns', 'halfcrown', 'half-crown','penny', 'pennies', 'pence', 'farthing', 'farthings', 'franc', 'francs', 'guilder', 'guilders', 'florin', 'florins', 'guinea', 'guineas', "ha'penny", 'tuppence', 'twopence', 'sixpence', '|arabicprice|', '|price|', 'quid'}
# Words I explicitly decided not to include: 'quarter', 'quarters', 'mark', 'marks.' Monetary uses
# seemed rare enough relative to others that they'd be more likely to introduce noise than to help.
# |arabicprice| is a code the tokenizer in modelingcounter produces whenever it encounters
# a number connected to £, $, ¢, s, or d. In the output we convert that to |price|, for no very
# good reason.
wealthwords = {'fortune', 'fortunes', 'wealth', 'rich', 'riches', 'money', 'moneys', 'fund', 'funds', 'sum', 'sums', 'price', 'prices', 'priced'}
# This is by no means an exhaustive list. Owe, loan, borrowed, etc.
# If we really want to get at the full range of words potentially
# associated with money, topic modeling would be an appropriate lever.
# We can perhaps enumerate currency terms intuitively, but not these.
alltargetwords = moneywords
sourcedir = "/Volumes/TARDIS/work/moneytexts/"
filelist = os.listdir(sourcedir)
filelist = [x for x in filelist if x.endswith(".txt")]
contexts = []
WINDOWRADIUS = 7
ctr = 0
for filename in filelist:
htid = utils.pairtreelabel(filename.replace('.fic.txt', ''))
if htid not in rows:
print(htid)
continue
else:
date = utils.simple_date(htid, table)
filepath = os.path.join(sourcedir, filename)
with open(filepath, encoding = 'utf-8') as f:
filelines = f.readlines()
pagelist = [filelines]
# The wordcounter module expects a list of pages, each of which is a list of lines.
# Ebooks have no pages -- at least as I currently receive them -- so we treat it
# all as one giant page.
tokenstream = modelingcounter.makestream(pagelist)
newcontexts = modelingcounter.extract_snippets(tokenstream, WINDOWRADIUS, alltargetwords)
approvedcontexts = []
for snippet, snippettomodel in newcontexts:
keyword = snippettomodel[WINDOWRADIUS]
keyword = keyword.lower()
prefix, keyword, suffix = strip_punctuation(keyword)
if keyword in wealthwords:
category = 'wealth'
elif keyword in ambiguouswords:
currency = is_money(snippettomodel, WINDOWRADIUS, logisticmodel, features, standardizer)
if currency:
category = 'money'
else:
category = "notmoney"
elif keyword in moneywords:
category = 'money'
else:
print('ANOMALY: ' + keyword)
# Cause that's how I do error handling.
category = 'null'
if category == 'money':
approvedcontexts.append((htid, date, snippet, keyword, category))
print(ctr)
ctr += 1
outfile = "/Volumes/TARDIS/work/moneycontext/twentyfivesnippets.tsv"
with open(outfile, mode='a', encoding='utf-8') as f:
for context in approvedcontexts:
htid, date, alist, keyword, category = context
snippet = " ".join(alist)
snippet = snippet.replace('\t', '')
# Because we don't want stray tabs in our tab-separated values.
f.write(htid + '\t' + str(date) + '\t' + keyword + '\t' + category + '\t' + snippet + '\n')
sourcedir = "/Volumes/TARDIS/work/US_NOVELS_1923-1950/"
filelist = os.listdir(sourcedir)
fileset = set([x for x in filelist if x.endswith(".txt")])
filelist = list(fileset)
metafile = os.path.join(sourcedir, "US_NOVELS_1923-1950_META.txt")
datedict = dict()
dateset = set()
with open(metafile, newline='', encoding = 'utf-8') as f:
reader = csv.reader(f)
for fields in reader:
idcode = fields[0]
date = int(fields[8])
datedict[idcode] = date
dateset.add(date)
for filename in filelist:
htid = utils.pairtreelabel(filename.replace('.txt', ''))
if htid not in datedict:
print(htid)
continue
else:
date = datedict[htid]
filepath = os.path.join(sourcedir, filename)
with open(filepath, encoding = 'utf-8') as f:
filelines = f.readlines()
pagelist = [filelines]
# The wordcounter module expects a list of pages, each of which is a list of lines.
# Ebooks have no pages -- at least as I currently receive them -- so we treat it
# all as one giant page.
tokenstream = modelingcounter.makestream(pagelist)
newcontexts = modelingcounter.extract_snippets(tokenstream, WINDOWRADIUS, alltargetwords)
approvedcontexts = []
for snippet, snippettomodel in newcontexts:
keyword = snippettomodel[WINDOWRADIUS]
keyword = keyword.lower()
prefix, keyword, suffix = strip_punctuation(keyword)
if keyword in wealthwords:
category = 'wealth'
elif keyword in ambiguouswords:
currency = is_money(snippettomodel, WINDOWRADIUS, logisticmodel, features, standardizer)
if currency:
category = 'money'
else:
category = "notmoney"
elif keyword in moneywords:
category = 'money'
else:
print('ANOMALY: ' + keyword)
# Cause that's how I do error handling.
category = 'null'
if category == 'money':
approvedcontexts.append((htid, date, snippet, keyword, category))
outfile = "/Volumes/TARDIS/work/moneycontext/twentyfivesnippets.tsv"
with open(outfile, mode='a', encoding='utf-8') as f:
for context in approvedcontexts:
htid, date, alist, keyword, category = context
snippet = " ".join(alist)
snippet = snippet.replace('\t', '')
# Because we don't want stray tabs in our tab-separated values.
f.write(htid + '\t' + str(date) + '\t' + keyword + '\t' + category + '\t' + snippet + '\n')
|
mit
| 1,693,274,650,786,676,000 | 33.460123 | 449 | 0.644205 | false |
matthewnorman/chronosphere
|
cronparse.py
|
1
|
7269
|
import re
import time
import datetime
CRON_ORDER = ['minute', 'hour', 'day', 'month', 'dayofweek']
BOUNDS = {'minute': (0, 59), 'hour': (0, 23), 'day': (1, 31),
'month': (1, 12), 'dayofweek': (1, 7)}
def str_test(tester):
if isinstance(tester, str) or isinstance(tester, unicode):
return True
return False
class CronParseException(Exception):
"""
Raise this when you're in trouble
"""
class CronParse(object):
def __init__(self, input_cron=None, timezone=None):
super(CronParse, self).__init__()
self.crontab_times = {}
self.crontab_cycle = {}
self.ranges = {}
self.cron_parts = {}
if input_cron is not None:
self.set_cron(input_cron=input_cron)
def set_cron(self, input_cron):
"""
Given a string format, store it in components, or, if
of the format */x, in crontab_cycle
"""
if not str_test(input_cron):
# We can't handle this
raise TypeError('Input must be a string object')
split_crons = input_cron.split()
if not len(split_crons) == 5:
msg = 'Must have five different components for cron format.'
raise ValueError(msg)
for key, value in zip(CRON_ORDER, split_crons):
all_ints = [int(x) for x in re.findall(r"[\d]+", value)]
bounds = BOUNDS[key]
for num in all_ints:
if num < bounds[0] or num > bounds[1]:
msg = 'Invalid value {} for {}'.format(num, key)
raise ValueError(msg)
self.cron_parts[key] = value
def get_time(self):
"""
What time is it? Return a dictionary with the minute, hour,
dayofmonth, month, and dayofweek as specified in the cron
format.
"""
result = {}
timestamp = time.time()
date = datetime.datetime.fromtimestamp(timestamp)
return date
def get_day_of_week(self, date):
# Because datetime and cron count their days differently
day_of_week = date.weekday() + 1
if day_of_week == 7:
day_of_week = 0
return day_of_week
def validate_dt_part(self, dt, component):
"""
Validate each component of the dt (besides the day of the week)
because they all work in more or less the same way.
"""
time_value = getattr(dt, component)
if self.cron_parts[component] == '*':
# Always true
return True
for x in self.cron_parts[component].split(','):
# Split into a list of individual parts
if x.isdigit():
# Then it's simple
if int(x) == time_value:
return True
continue
if '-' in x:
# This is a range. Extract the range part and handle it.
range_min, range_max = x.split('/')[0].split('-')
if time_value < int(range_min) or time_value > int(range_max):
continue
if not '/' in x:
return True
if '/' in x:
cycle_value = x.split('/')[1]
if not cycle_value.isdigit():
raise ValueError('Invalid timevalue %s' % x)
if time_value % int(cycle_value) == 0:
return True
return False
def validate_dow(self, dt):
"""
Validate the day of the week
"""
if self.cron_parts['dayofweek'] == '*':
return True
else:
current_day = self.get_day_of_week(date=dt)
for entry in self.cron_parts['dayofweek']:
if entry.isdigit() and\
current_day == int(self.cron_parts['dayofweek']):
return True
if '-' in entry:
mini, maxi = entry.split('/')[0].split('-')
if current_day < mini or current_day > maxi:
continue
if '/' in entry:
return True
if '*' in entry:
cycle_value = entry.split('/')[1]
if not cycle_value.isdigit():
raise ValueError('Invalid timevalue %s' % x)
if current_day % int(cycle_value) == 0:
return True
return False
def validate_day(self, dt):
"""
Validate the day as one method. This is because cron uses an OR
when both day of month and day of week are specified.
"""
if self.cron_parts['dayofweek'] == '*':
return self.validate_dt_part(dt=dt, component='day')
elif self.cron_parts['day'] == '*':
return self.validate_dow(dt=dt)
else:
return (self.validate_dt_part(dt=dt, component='day') or
self.validate_dow(dt=dt))
def brute_next(self, now):
"""
Brute force this - simply iterate through all possible times.
"""
dt = now
while True:
valid_day = self.validate_day(dt=dt)
valid_month = self.validate_dt_part(dt=dt, component='month')
valid_hour = self.validate_dt_part(dt=dt, component='hour')
valid_minute = self.validate_dt_part(dt=dt, component='minute')
if not valid_month:
if dt.month > 11:
dt = datetime.datetime(year=dt.year+1, month=1,
day=1, hour=0, minute=0)
else:
dt = datetime.datetime(year=dt.year, month=dt.month+1,
day=1, hour=0, minute=0)
if not valid_day:
# Increment by day and try again
dt = dt + datetime.timedelta(days=1)
if dt.minute != 0:
dt = dt - datetime.timedelta(minutes=dt.minute)
if dt.hour != 0:
dt = dt - datetime.timedelta(hours=dt.hour)
elif not valid_hour:
# Increment by an hour and try again
dt = dt + datetime.timedelta(hours=1)
if dt.minute != 0:
dt = dt - datetime.timedelta(minutes=dt.minute)
elif not valid_minute:
# Increment by one minute
dt = dt + datetime.timedelta(minutes=1)
elif dt.year - now.year > 100:
# There are no hundred year cycles
raise CronParseException('Stuck in infinite loop')
else:
break
# At the end
return dt
def is_valid_time(self, dt=None):
"""
Given a dt, is now a valid time?
"""
if not dt:
dt = self.get_time()
valid_day = self.validate_day(dt=dt)
valid_month = self.validate_dt_part(dt=dt, component='month')
valid_hour = self.validate_dt_part(dt=dt, component='hour')
valid_minute = self.validate_dt_part(dt=dt, component='minute')
return valid_day and valid_month and valid_hour and valid_minute
|
mit
| 799,791,373,471,677,200 | 34.115942 | 78 | 0.500619 | false |
juneJuly/sulley
|
network_monitor.py
|
1
|
8872
|
#!c:\\python\\python.exe
import threading
import getopt
import time
import sys
import os
from sulley import pedrpc
import pcapy
import impacket
import impacket.ImpactDecoder
PORT = 26001
IFS = []
ERR = lambda msg: sys.stderr.write("ERR> " + msg + "\n") or sys.exit(1)
USAGE = "USAGE: network_monitor.py" \
"\n <-d|--device DEVICE #> device to sniff on (see list below)" \
"\n [-f|--filter PCAP FILTER] BPF filter string" \
"\n [-P|--log_path PATH] log directory to store pcaps to" \
"\n [-l|--log_level LEVEL] log level (default 1), increase for more verbosity" \
"\n [--port PORT] TCP port to bind this agent to" \
"\n\nNetwork Device List:\n"
#check to make sure at superuser level on linux
if sys.platform.startswith('linux'):
if os.geteuid() != 0:
print "Must be run with elevated permissions on Linux. (e.g. $ sudo python network_monitor.py ...)"
sys.exit(2)
# add the device list to the usage string.
i = 0
for dev in pcapy.findalldevs():
IFS.append(dev)
# if we are on windows, try and resolve the device UUID into an IP address.
if sys.platform.startswith("win"):
import _winreg
try:
# extract the device UUID and open the TCP/IP parameters key for it.
dev = dev[dev.index("{"):dev.index("}")+1]
subkey = r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces\%s" % dev
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey)
# if there is a DHCP address snag that, otherwise fall back to the IP address.
try: ip = _winreg.QueryValueEx(key, "DhcpIPAddress")[0]
except: ip = _winreg.QueryValueEx(key, "IPAddress")[0][0]
dev = dev + "\t" + ip
except:
pass
USAGE += " [%d] %s\n" % (i, dev)
i += 1
########################################################################################################################
class pcap_thread (threading.Thread):
def __init__ (self, network_monitor, pcap, pcap_save_path):
self.network_monitor = network_monitor
self.pcap = pcap
self.decoder = None
self.dumper = self.pcap.dump_open(pcap_save_path)
self.active = True
self.data_bytes = 0
# register the appropriate decoder.
if pcap.datalink() == pcapy.DLT_EN10MB:
self.decoder = impacket.ImpactDecoder.EthDecoder()
elif pcap.datalink() == pcapy.DLT_LINUX_SLL:
self.decoder = impacket.ImpactDecoder.LinuxSLLDecoder()
else:
raise Exception
threading.Thread.__init__(self)
def packet_handler (self, header, data):
# add the captured data to the PCAP.
self.dumper.dump(header, data)
# increment the captured byte count.
self.data_bytes += len(data)
# log the decoded data at the appropriate log level.
self.network_monitor.log(self.decoder.decode(data), 15)
def run (self):
# process packets while the active flag is raised.
while self.active:
self.pcap.dispatch(0, self.packet_handler)
########################################################################################################################
class network_monitor_pedrpc_server (pedrpc.server):
def __init__ (self, host, port, device, filter="", log_path="./", log_level=1):
'''
@type host: String
@param host: Hostname or IP address to bind server to
@type port: Integer
@param port: Port to bind server to
@type device: String
@param device: Name of device to capture packets on
@type ignore_pid: Integer
@param ignore_pid: (Optional, def=None) Ignore this PID when searching for the target process
@type log_path: String
@param log_path: (Optional, def="./") Path to save recorded PCAPs to
@type log_level: Integer
@param log_level: (Optional, def=1) Log output level, increase for more verbosity
'''
# initialize the PED-RPC server.
pedrpc.server.__init__(self, host, port)
self.device = device
self.filter = filter
self.log_path = log_path
self.log_level = log_level
self.pcap = None
self.pcap_thread = None
# ensure the log path is valid.
if not os.access(self.log_path, os.X_OK):
self.log("invalid log path: %s" % self.log_path)
raise Exception
self.log("Network Monitor PED-RPC server initialized:")
self.log("\t device: %s" % self.device)
self.log("\t filter: %s" % self.filter)
self.log("\t log path: %s" % self.log_path)
self.log("\t log_level: %d" % self.log_level)
self.log("Awaiting requests...")
def __stop (self):
'''
Kill the PCAP thread.
'''
if self.pcap_thread:
self.log("stopping active packet capture thread.", 10)
self.pcap_thread.active = False
self.pcap_thread = None
def alive (self):
'''
Returns True. Useful for PED-RPC clients who want to see if the PED-RPC connection is still alive.
'''
return True
def post_send (self):
'''
This routine is called after the fuzzer transmits a test case and returns the number of bytes captured by the
PCAP thread.
@rtype: Integer
@return: Number of bytes captured in PCAP thread.
'''
# grab the number of recorded bytes.
data_bytes = self.pcap_thread.data_bytes
# stop the packet capture thread.
self.__stop()
self.log("stopped PCAP thread, snagged %d bytes of data" % data_bytes)
return data_bytes
def pre_send (self, test_number):
'''
This routine is called before the fuzzer transmits a test case and spin off a packet capture thread.
'''
self.log("initializing capture for test case #%d" % test_number)
# open the capture device and set the BPF filter.
self.pcap = pcapy.open_live(self.device, 65355, True, 100)
self.pcap.setfilter(self.filter)
# instantiate the capture thread.
pcap_log_path = "%s/%d.pcap" % (self.log_path, test_number)
self.pcap_thread = pcap_thread(self, self.pcap, pcap_log_path)
self.pcap_thread.start()
def log (self, msg="", level=1):
'''
If the supplied message falls under the current log level, print the specified message to screen.
@type msg: String
@param msg: Message to log
'''
if self.log_level >= level:
print "[%s] %s" % (time.strftime("%I:%M.%S"), msg)
def retrieve (self, test_number):
'''
Return the raw binary contents of the PCAP saved for the specified test case number.
@type test_number: Integer
@param test_number: Test number to retrieve PCAP for.
'''
self.log("retrieving PCAP for test case #%d" % test_number)
pcap_log_path = "%s/%d.pcap" % (self.log_path, test_number)
fh = open(pcap_log_path, "rb")
data = fh.read()
fh.close()
return data
def set_filter (self, filter):
self.log("updating PCAP filter to '%s'" % filter)
self.filter = filter
def set_log_path (self, log_path):
self.log("updating log path to '%s'" % log_path)
self.log_path = log_path
########################################################################################################################
if __name__ == "__main__":
# parse command line options.
try:
opts, args = getopt.getopt(sys.argv[1:], "d:f:P:l:", ["device=", "filter=", "log_path=", "log_level=", "port="])
except getopt.GetoptError:
ERR(USAGE)
device = None
filter = ""
log_path = "./"
log_level = 1
for opt, arg in opts:
if opt in ("-d", "--device"): device = IFS[int(arg)]
if opt in ("-f", "--filter"): filter = arg
if opt in ("-P", "--log_path"): log_path = arg
if opt in ("-l", "--log_level"): log_level = int(arg)
if opt in ("--port"): PORT = int(arg)
if not device:
ERR(USAGE)
try:
servlet = network_monitor_pedrpc_server("0.0.0.0", PORT, device, filter, log_path, log_level)
servlet.serve_forever()
except:
pass
|
gpl-2.0
| -6,967,395,600,718,050,000 | 32.353383 | 120 | 0.535505 | false |
platformio/platformio-core
|
platformio/builder/tools/platformio.py
|
1
|
12169
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import fnmatch
import os
import sys
from SCons import Builder, Util # pylint: disable=import-error
from SCons.Node import FS # pylint: disable=import-error
from SCons.Script import COMMAND_LINE_TARGETS # pylint: disable=import-error
from SCons.Script import AlwaysBuild # pylint: disable=import-error
from SCons.Script import DefaultEnvironment # pylint: disable=import-error
from SCons.Script import Export # pylint: disable=import-error
from SCons.Script import SConscript # pylint: disable=import-error
from platformio import __version__, fs
from platformio.compat import IS_MACOS, string_types
from platformio.package.version import pepver_to_semver
SRC_HEADER_EXT = ["h", "hpp"]
SRC_ASM_EXT = ["S", "spp", "SPP", "sx", "s", "asm", "ASM"]
SRC_C_EXT = ["c"]
SRC_CXX_EXT = ["cc", "cpp", "cxx", "c++"]
SRC_BUILD_EXT = SRC_C_EXT + SRC_CXX_EXT + SRC_ASM_EXT
SRC_FILTER_DEFAULT = ["+<*>", "-<.git%s>" % os.sep, "-<.svn%s>" % os.sep]
def scons_patched_match_splitext(path, suffixes=None):
"""Patch SCons Builder, append $OBJSUFFIX to the end of each target"""
tokens = Util.splitext(path)
if suffixes and tokens[1] and tokens[1] in suffixes:
return (path, tokens[1])
return tokens
def GetBuildType(env):
return (
"debug"
if (
set(["__debug", "sizedata"]) & set(COMMAND_LINE_TARGETS)
or env.GetProjectOption("build_type") == "debug"
)
else "release"
)
def BuildProgram(env):
env.ProcessProgramDeps()
env.ProcessProjectDeps()
# append into the beginning a main LD script
if env.get("LDSCRIPT_PATH") and not any("-Wl,-T" in f for f in env["LINKFLAGS"]):
env.Prepend(LINKFLAGS=["-T", env.subst("$LDSCRIPT_PATH")])
# enable "cyclic reference" for linker
if (
env.get("LIBS")
and env.GetCompilerType() == "gcc"
and (env.PioPlatform().is_embedded() or not IS_MACOS)
):
env.Prepend(_LIBFLAGS="-Wl,--start-group ")
env.Append(_LIBFLAGS=" -Wl,--end-group")
program = env.Program(
os.path.join("$BUILD_DIR", env.subst("$PROGNAME$PROGSUFFIX")),
env["PIOBUILDFILES"],
)
env.Replace(PIOMAINPROG=program)
AlwaysBuild(
env.Alias(
"checkprogsize",
program,
env.VerboseAction(env.CheckUploadSize, "Checking size $PIOMAINPROG"),
)
)
print("Building in %s mode" % env.GetBuildType())
return program
def ProcessProgramDeps(env):
def _append_pio_macros():
core_version = pepver_to_semver(__version__)
env.AppendUnique(
CPPDEFINES=[
(
"PLATFORMIO",
int(
"{0:02d}{1:02d}{2:02d}".format(
core_version.major, core_version.minor, core_version.patch
)
),
)
]
)
_append_pio_macros()
env.PrintConfiguration()
# fix ASM handling under non case-sensitive OS
if not Util.case_sensitive_suffixes(".s", ".S"):
env.Replace(AS="$CC", ASCOM="$ASPPCOM")
# process extra flags from board
if "BOARD" in env and "build.extra_flags" in env.BoardConfig():
env.ProcessFlags(env.BoardConfig().get("build.extra_flags"))
# apply user flags
env.ProcessFlags(env.get("BUILD_FLAGS"))
# process framework scripts
env.BuildFrameworks(env.get("PIOFRAMEWORK"))
if env.GetBuildType() == "debug":
env.ConfigureDebugFlags()
# remove specified flags
env.ProcessUnFlags(env.get("BUILD_UNFLAGS"))
if "__test" in COMMAND_LINE_TARGETS:
env.ConfigureTestTarget()
def ProcessProjectDeps(env):
project_lib_builder = env.ConfigureProjectLibBuilder()
# prepend project libs to the beginning of list
env.Prepend(LIBS=project_lib_builder.build())
# prepend extra linker related options from libs
env.PrependUnique(
**{
key: project_lib_builder.env.get(key)
for key in ("LIBS", "LIBPATH", "LINKFLAGS")
if project_lib_builder.env.get(key)
}
)
projenv = env.Clone()
# CPPPATH from dependencies
projenv.PrependUnique(CPPPATH=project_lib_builder.env.get("CPPPATH"))
# extra build flags from `platformio.ini`
projenv.ProcessFlags(env.get("SRC_BUILD_FLAGS"))
is_test = "__test" in COMMAND_LINE_TARGETS
if is_test:
projenv.BuildSources(
"$BUILD_TEST_DIR", "$PROJECT_TEST_DIR", "$PIOTEST_SRC_FILTER"
)
if not is_test or env.GetProjectOption("test_build_project_src"):
projenv.BuildSources(
"$BUILD_SRC_DIR", "$PROJECT_SRC_DIR", env.get("SRC_FILTER")
)
if not env.get("PIOBUILDFILES") and not COMMAND_LINE_TARGETS:
sys.stderr.write(
"Error: Nothing to build. Please put your source code files "
"to '%s' folder\n" % env.subst("$PROJECT_SRC_DIR")
)
env.Exit(1)
Export("projenv")
def ParseFlagsExtended(env, flags): # pylint: disable=too-many-branches
if not isinstance(flags, list):
flags = [flags]
result = {}
for raw in flags:
for key, value in env.ParseFlags(str(raw)).items():
if key not in result:
result[key] = []
result[key].extend(value)
cppdefines = []
for item in result["CPPDEFINES"]:
if not Util.is_Sequence(item):
cppdefines.append(item)
continue
name, value = item[:2]
if '"' in value:
value = value.replace('"', '\\"')
elif value.isdigit():
value = int(value)
elif value.replace(".", "", 1).isdigit():
value = float(value)
cppdefines.append((name, value))
result["CPPDEFINES"] = cppdefines
# fix relative CPPPATH & LIBPATH
for k in ("CPPPATH", "LIBPATH"):
for i, p in enumerate(result.get(k, [])):
if os.path.isdir(p):
result[k][i] = os.path.realpath(p)
# fix relative path for "-include"
for i, f in enumerate(result.get("CCFLAGS", [])):
if isinstance(f, tuple) and f[0] == "-include":
result["CCFLAGS"][i] = (f[0], env.File(os.path.realpath(f[1].get_path())))
return result
def ProcessFlags(env, flags): # pylint: disable=too-many-branches
if not flags:
return
env.Append(**env.ParseFlagsExtended(flags))
# Cancel any previous definition of name, either built in or
# provided with a -U option // Issue #191
undefines = [
u
for u in env.get("CCFLAGS", [])
if isinstance(u, string_types) and u.startswith("-U")
]
if undefines:
for undef in undefines:
env["CCFLAGS"].remove(undef)
if undef[2:] in env["CPPDEFINES"]:
env["CPPDEFINES"].remove(undef[2:])
env.Append(_CPPDEFFLAGS=" %s" % " ".join(undefines))
def ProcessUnFlags(env, flags):
if not flags:
return
parsed = env.ParseFlagsExtended(flags)
# get all flags and copy them to each "*FLAGS" variable
all_flags = []
for key, unflags in parsed.items():
if key.endswith("FLAGS"):
all_flags.extend(unflags)
for key, unflags in parsed.items():
if key.endswith("FLAGS"):
parsed[key].extend(all_flags)
for key, unflags in parsed.items():
for unflag in unflags:
for current in env.get(key, []):
conditions = [
unflag == current,
isinstance(current, (tuple, list)) and unflag[0] == current[0],
]
if any(conditions):
env[key].remove(current)
def MatchSourceFiles(env, src_dir, src_filter=None):
src_filter = env.subst(src_filter) if src_filter else None
src_filter = src_filter or SRC_FILTER_DEFAULT
return fs.match_src_files(
env.subst(src_dir), src_filter, SRC_BUILD_EXT + SRC_HEADER_EXT
)
def CollectBuildFiles(
env, variant_dir, src_dir, src_filter=None, duplicate=False
): # pylint: disable=too-many-locals
sources = []
variants = []
src_dir = env.subst(src_dir)
if src_dir.endswith(os.sep):
src_dir = src_dir[:-1]
for item in env.MatchSourceFiles(src_dir, src_filter):
_reldir = os.path.dirname(item)
_src_dir = os.path.join(src_dir, _reldir) if _reldir else src_dir
_var_dir = os.path.join(variant_dir, _reldir) if _reldir else variant_dir
if _var_dir not in variants:
variants.append(_var_dir)
env.VariantDir(_var_dir, _src_dir, duplicate)
if fs.path_endswith_ext(item, SRC_BUILD_EXT):
sources.append(env.File(os.path.join(_var_dir, os.path.basename(item))))
middlewares = env.get("__PIO_BUILD_MIDDLEWARES")
if not middlewares:
return sources
new_sources = []
for node in sources:
new_node = node
for callback, pattern in middlewares:
if pattern and not fnmatch.fnmatch(node.srcnode().get_path(), pattern):
continue
new_node = callback(new_node)
if new_node:
new_sources.append(new_node)
return new_sources
def AddBuildMiddleware(env, callback, pattern=None):
env.Append(__PIO_BUILD_MIDDLEWARES=[(callback, pattern)])
def BuildFrameworks(env, frameworks):
if not frameworks:
return
if "BOARD" not in env:
sys.stderr.write(
"Please specify `board` in `platformio.ini` to use "
"with '%s' framework\n" % ", ".join(frameworks)
)
env.Exit(1)
board_frameworks = env.BoardConfig().get("frameworks", [])
if frameworks == ["platformio"]:
if board_frameworks:
frameworks.insert(0, board_frameworks[0])
else:
sys.stderr.write("Error: Please specify `board` in `platformio.ini`\n")
env.Exit(1)
for f in frameworks:
if f == "arduino":
# Arduino IDE appends .o the end of filename
Builder.match_splitext = scons_patched_match_splitext
if "nobuild" not in COMMAND_LINE_TARGETS:
env.ConvertInoToCpp()
if f in board_frameworks:
SConscript(env.GetFrameworkScript(f), exports="env")
else:
sys.stderr.write("Error: This board doesn't support %s framework!\n" % f)
env.Exit(1)
def BuildLibrary(env, variant_dir, src_dir, src_filter=None):
env.ProcessUnFlags(env.get("BUILD_UNFLAGS"))
return env.StaticLibrary(
env.subst(variant_dir), env.CollectBuildFiles(variant_dir, src_dir, src_filter)
)
def BuildSources(env, variant_dir, src_dir, src_filter=None):
nodes = env.CollectBuildFiles(variant_dir, src_dir, src_filter)
DefaultEnvironment().Append(
PIOBUILDFILES=[
env.Object(node) if isinstance(node, FS.File) else node for node in nodes
]
)
def exists(_):
return True
def generate(env):
env.AddMethod(GetBuildType)
env.AddMethod(BuildProgram)
env.AddMethod(ProcessProgramDeps)
env.AddMethod(ProcessProjectDeps)
env.AddMethod(ParseFlagsExtended)
env.AddMethod(ProcessFlags)
env.AddMethod(ProcessUnFlags)
env.AddMethod(MatchSourceFiles)
env.AddMethod(CollectBuildFiles)
env.AddMethod(AddBuildMiddleware)
env.AddMethod(BuildFrameworks)
env.AddMethod(BuildLibrary)
env.AddMethod(BuildSources)
return env
|
apache-2.0
| -4,102,121,921,136,001,500 | 30.772846 | 87 | 0.612376 | false |
alesaccoia/TF_SoundClassification
|
as_sound/exec/train_vad_ann_5FCL_classifier.py
|
1
|
1585
|
import numpy as np
import as_classification.ann_models
import as_sound.features.extractFeatures as ef
import as_classification.utilities
import os
import matplotlib.pyplot as plt
# -------------------------------
# CREATE MODEL
# -------------------------------
model = as_classification.ann_models.ANN_5FCL()
model.initialize(15,2)
# -------------------------------
# READ AUDIO FILES
# -------------------------------
speech_data = ef.computeSupervectorForFile(os.path.dirname(os.path.realpath(__file__)) + '/data/Speech.wav', 8000, 2048, 2049)
noise_data = ef.computeSupervectorForFile(os.path.dirname(os.path.realpath(__file__)) + '/data/Noise.wav', 8000, 2048, 2049)
whole_data = np.hstack((speech_data, noise_data))
whole_data = np.swapaxes(whole_data,0,1)
whole_labels = np.zeros((whole_data.shape[0], 2))
whole_labels[:speech_data.shape[1],0] = 1
whole_labels[speech_data.shape[1]:,1] = 1
training_data = {"labels": whole_labels,
"data": whole_data}
training_data, test_data = as_classification.utilities.divideTrainingData(training_data, 0.6)
# -------------------------------
# TRAIN
# -------------------------------
model.train(training_data, test_data, 10, 20, 100)
model.saveCheckpoint(os.path.dirname(os.path.realpath(__file__)) + '/data/vadModel_ANN_5FCL.chkp')
#xp = np.arange(0,prediction.shape[0])
#plt.plot(xp, test_data[:,14], '-b', label='RMS')
#plt.plot(xp, prediction[:,0], '-r', label='ANN Output')
#plt.legend(loc='upper left')
#plt.show()
#feat = as_sound.features.extractFeatures.computeSupervector(normalized_data)
|
mit
| 3,080,008,847,308,803,000 | 25.864407 | 126 | 0.625868 | false |
DedMemez/ODS-August-2017
|
battle/BattleProps.py
|
1
|
13551
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.battle.BattleProps
from panda3d.core import NodePath, SequenceNode, Texture, VBase4
from direct.actor import Actor
from direct.directnotify import DirectNotifyGlobal
from otp.otpbase import OTPGlobals
import random
Props = ((5, 'partyBall', 'partyBall'),
(5, 'feather', 'feather-mod', 'feather-chan'),
(5, 'lips', 'lips'),
(5, 'lipstick', 'lipstick'),
(5, 'hat', 'hat'),
(5, 'cane', 'cane'),
(5, 'cubes', 'cubes-mod', 'cubes-chan'),
(5, 'ladder', 'ladder2'),
(4, 'fishing-pole', 'fishing-pole-mod', 'fishing-pole-chan'),
(5, '1dollar', '1dollar-bill-mod', '1dollar-bill-chan'),
(5, 'big-magnet', 'magnet'),
(5, 'hypno-goggles', 'hypnotize-mod', 'hypnotize-chan'),
(5, 'slideshow', 'av_screen'),
(5, 'banana', 'banana-peel-mod', 'banana-peel-chan'),
(5, 'rake', 'rake-mod', 'rake-chan'),
(5, 'marbles', 'marbles-mod', 'marbles-chan'),
(5, 'tnt', 'tnt-mod', 'tnt-chan'),
(5, 'trapdoor', 'trapdoor'),
(5, 'quicksand', 'quicksand'),
(5, 'traintrack', 'traintrack2'),
(5, 'train', 'train'),
(5, 'megaphone', 'megaphone'),
(5, 'aoogah', 'aoogah'),
(5, 'bikehorn', 'bikehorn'),
(5, 'bugle', 'bugle'),
(5, 'elephant', 'elephant'),
(5, 'fog_horn', 'fog_horn'),
(5, 'whistle', 'whistle'),
(5, 'singing', 'singing'),
(3.5, 'creampie', 'tart'),
(5, 'fruitpie-slice', 'fruit-pie-slice'),
(5, 'creampie-slice', 'cream-pie-slice'),
(5, 'birthday-cake', 'birthday-cake-mod', 'birthday-cake-chan'),
(5, 'wedding-cake', 'wedding_cake'),
(3.5, 'squirting-flower', 'squirting-flower'),
(5, 'glass', 'glass-mod', 'glass-chan'),
(4, 'water-gun', 'water-gun'),
(3.5, 'bottle', 'bottle'),
(5, 'firehose', 'firehose-mod', 'firehose-chan'),
(5, 'hydrant', 'battle_hydrant'),
(4, 'stormcloud', 'stormcloud-mod', 'stormcloud-chan'),
(5, 'geyser', 'geyser'),
(3.5, 'button', 'button'),
(5, 'flowerpot', 'flowerpot-mod', 'flowerpot-chan'),
(5, 'sandbag', 'sandbag-mod', 'sandbag-chan'),
(4, 'anvil', 'anvil-mod', 'anvil-chan'),
(5, 'weight', 'weight-mod', 'weight-chan'),
(5, 'safe', 'safe-mod', 'safe-chan'),
(5, 'piano', 'piano-mod', 'piano-chan'),
(5, 'rake-react', 'rake-step-mod', 'rake-step-chan'),
(5, 'pad', 'pad'),
(4, 'propeller', 'propeller-mod', 'propeller-chan'),
(5, 'calculator', 'calculator-mod', 'calculator-chan'),
(5, 'rollodex', 'roll-o-dex'),
(5, 'rubber-stamp', 'rubber-stamp'),
(5, 'rubber-stamp-pad', 'rubber-stamp-pad-mod', 'rubber-stamp-pad-chan'),
(5, 'smile', 'smile-mod', 'smile-chan'),
(5, 'golf-club', 'golf-club'),
(5, 'golf-ball', 'golf-ball'),
(5, 'redtape', 'redtape'),
(5, 'redtape-tube', 'redtape-tube'),
(5, 'bounced-check', 'bounced-check'),
(5, 'calculator', 'calculator-mod', 'calculator-chan'),
(3.5, 'clip-on-tie', 'clip-on-tie-mod', 'clip-on-tie-chan'),
(5, 'pen', 'pen'),
(5, 'pencil', 'pencil'),
(3, 'snowball', 'snowball'),
(3.5, 'phone', 'phone'),
(3.5, 'receiver', 'receiver'),
(5, 'sharpener', 'sharpener'),
(3.5, 'shredder', 'shredder'),
(3.5, 'shredder-paper', 'shredder-paper-mod', 'shredder-paper-chan'),
(5, 'watercooler', 'watercooler'),
(5, 'dagger', 'dagger'),
(5, 'card', 'card'),
(5, 'baseball', 'baseball'),
(5, 'bird', 'bird'),
(5, 'can', 'can'),
(5, 'cigar', 'cigar'),
(5, 'evil-eye', 'evil-eye'),
(5, 'gavel', 'gavel'),
(5, 'half-windsor', 'half-windsor'),
(5, 'lawbook', 'lawbook'),
(5, 'newspaper', 'newspaper'),
(5, 'pink-slip', 'pink-slip'),
(5, 'teeth', 'teeth-mod', 'teeth-chan'),
(5, 'power-tie', 'power-tie'),
(3.5, 'spray', 'spray'),
(3.5, 'splash', 'splash'),
(3.5, 'splat', 'splat-mod', 'splat-chan'),
(3.5, 'stun', 'stun-mod', 'stun-chan'),
(3.5, 'glow', 'glow'),
(3.5, 'suit_explosion', 'suit_explosion-mod', 'suit_explosion-chan'),
(3.5, 'suit_explosion_dust', 'dust_cloud'),
(4, 'ripples', 'ripples'),
(4, 'wake', 'wake'),
(4, 'splashdown', 'SZ_splashdown-mod', 'SZ_splashdown-chan'))
CreampieColor = VBase4(250.0 / 255.0, 241.0 / 255.0, 24.0 / 255.0, 1.0)
FruitpieColor = VBase4(55.0 / 255.0, 40.0 / 255.0, 148.0 / 255.0, 1.0)
BirthdayCakeColor = VBase4(253.0 / 255.0, 119.0 / 255.0, 220.0 / 255.0, 1.0)
SnowballColor = VBase4(1, 1, 1, 1)
Splats = {'tart': (0.3, FruitpieColor),
'fruitpie-slice': (0.5, FruitpieColor),
'creampie-slice': (0.5, CreampieColor),
'fruitpie': (0.7, FruitpieColor),
'creampie': (0.7, CreampieColor),
'birthday-cake': (0.9, BirthdayCakeColor),
'wedding-cake': (0.9, BirthdayCakeColor),
'snowball': (0.5, SnowballColor)}
Variants = ('tart', 'fruitpie', 'splat-tart', 'dust', 'kapow', 'double-windsor', 'splat-fruitpie-slice', 'splat-creampie-slice', 'splat-fruitpie', 'splat-creampie', 'splat-birthday-cake', 'splat-wedding-cake', 'splat-snowball', 'splash-from-splat', 'clip-on-tie', 'lips', 'small-magnet', '5dollar', '10dollar', 'suit_explosion', 'quicksand', 'trapdoor', 'geyser', 'ship', 'trolley', 'traintrack')
class PropPool:
notify = DirectNotifyGlobal.directNotify.newCategory('PropPool')
def __init__(self):
self.props = {}
self.propCache = []
self.propStrings = {}
self.propTypes = {}
self.maxPoolSize = config.GetInt('prop-pool-size', 8)
for p in Props:
phase = p[0]
propName = p[1]
modelName = p[2]
if len(p) == 4:
animName = p[3]
propPath = self.getPath(phase, modelName)
animPath = self.getPath(phase, animName)
self.propTypes[propName] = 'actor'
self.propStrings[propName] = (propPath, animPath)
else:
propPath = self.getPath(phase, modelName)
self.propTypes[propName] = 'model'
self.propStrings[propName] = (propPath,)
propName = 'tart'
self.propStrings[propName] = (self.getPath(3.5, 'tart'),)
self.propTypes[propName] = 'model'
propName = 'fruitpie'
self.propStrings[propName] = (self.getPath(3.5, 'tart'),)
self.propTypes[propName] = 'model'
propName = 'double-windsor'
self.propStrings[propName] = (self.getPath(5, 'half-windsor'),)
self.propTypes[propName] = 'model'
splatAnimFileName = self.getPath(3.5, 'splat-chan')
for splat in Splats.keys():
propName = 'splat-' + splat
self.propStrings[propName] = (self.getPath(3.5, 'splat-mod'), splatAnimFileName)
self.propTypes[propName] = 'actor'
propName = 'splash-from-splat'
self.propStrings[propName] = (self.getPath(3.5, 'splat-mod'), splatAnimFileName)
self.propTypes[propName] = 'actor'
propName = 'small-magnet'
self.propStrings[propName] = (self.getPath(5, 'magnet'),)
self.propTypes[propName] = 'model'
propName = '5dollar'
self.propStrings[propName] = (self.getPath(5, '1dollar-bill-mod'), self.getPath(5, '1dollar-bill-chan'))
self.propTypes[propName] = 'actor'
propName = '10dollar'
self.propStrings[propName] = (self.getPath(5, '1dollar-bill-mod'), self.getPath(5, '1dollar-bill-chan'))
self.propTypes[propName] = 'actor'
propName = 'dust'
self.propStrings[propName] = (self.getPath(5, 'dust-mod'), self.getPath(5, 'dust-chan'))
self.propTypes[propName] = 'actor'
propName = 'kapow'
self.propStrings[propName] = (self.getPath(5, 'kapow-mod'), self.getPath(5, 'kapow-chan'))
self.propTypes[propName] = 'actor'
propName = 'ship'
self.propStrings[propName] = ('phase_5/models/props/ship',)
self.propTypes[propName] = 'model'
propName = 'trolley'
self.propStrings[propName] = ('phase_4/models/modules/trolley_station_TT',)
self.propTypes[propName] = 'model'
def getPath(self, phase, model):
return 'phase_%s/models/props/%s' % (phase, model)
def makeVariant(self, name):
if name == 'tart':
self.props[name].setScale(0.5)
elif name == 'fruitpie':
self.props[name].setScale(0.75)
elif name == 'double-windsor':
self.props[name].setScale(1.5)
elif name[:6] == 'splat-':
prop = self.props[name]
scale = prop.getScale() * Splats[name[6:]][0]
prop.setScale(scale)
prop.setColor(Splats[name[6:]][1])
elif name == 'splash-from-splat':
self.props[name].setColor(0.75, 0.75, 1.0, 1.0)
elif name == 'clip-on-tie':
tie = self.props[name]
tie.getChild(0).setHpr(23.86, -16.03, 9.18)
elif name == 'small-magnet':
self.props[name].setScale(0.5)
elif name == 'shredder-paper':
paper = self.props[name]
paper.setPosHpr(2.22, -0.95, 1.16, -48.61, 26.57, -111.51)
paper.flattenMedium()
elif name == 'lips':
lips = self.props[name]
lips.setPos(0, 0, -3.04)
lips.flattenMedium()
elif name == '5dollar':
tex = loader.loadTexture('phase_5/maps/dollar_5.jpg')
tex.setMinfilter(Texture.FTLinearMipmapLinear)
tex.setMagfilter(Texture.FTLinear)
self.props[name].setTexture(tex, 1)
elif name == '10dollar':
tex = loader.loadTexture('phase_5/maps/dollar_10.jpg')
tex.setMinfilter(Texture.FTLinearMipmapLinear)
tex.setMagfilter(Texture.FTLinear)
self.props[name].setTexture(tex, 1)
elif name == 'dust':
bin = 110
for cloudNum in xrange(1, 12):
cloudName = '**/cloud' + str(cloudNum)
cloud = self.props[name].find(cloudName)
cloud.setBin('fixed', bin)
bin -= 10
elif name == 'kapow':
l = self.props[name].find('**/letters')
l.setBin('fixed', 20)
e = self.props[name].find('**/explosion')
e.setBin('fixed', 10)
elif name == 'suit_explosion':
joints = ['**/joint_scale_POW', '**/joint_scale_BLAM', '**/joint_scale_BOOM']
joint = random.choice(joints)
self.props[name].find(joint).hide()
joints.remove(joint)
joint = random.choice(joints)
self.props[name].find(joint).hide()
elif name == 'quicksand' or name == 'trapdoor':
p = self.props[name]
p.setBin('shadow', -5)
p.setDepthWrite(0)
p.getChild(0).setPos(0, 0, OTPGlobals.FloorOffset)
elif name == 'traintrack' or name == 'traintrack2':
prop = self.props[name]
prop.find('**/tunnel3').hide()
prop.find('**/tunnel2').hide()
prop.find('**/tracksA').setPos(0, 0, OTPGlobals.FloorOffset)
elif name == 'geyser':
p = self.props[name]
s = SequenceNode('geyser')
p.findAllMatches('**/Splash*').reparentTo(NodePath(s))
s.loop(0)
s.setFrameRate(12)
p.attachNewNode(s)
elif name == 'ship':
self.props[name] = self.props[name].find('**/ship_gag')
elif name == 'trolley':
self.props[name] = self.props[name].find('**/trolley_car')
def unloadProps(self):
for p in self.props.values():
if type(p) != type(()):
self.__delProp(p)
self.props = {}
self.propCache = []
def getProp(self, name):
return self.__getPropCopy(name)
def __getPropCopy(self, name):
if self.propTypes[name] == 'actor':
if name not in self.props:
prop = Actor.Actor()
prop.loadModel(self.propStrings[name][0])
if settings['smoothAnimations']:
prop.setBlend(frameBlend=True)
animDict = {}
animDict[name] = self.propStrings[name][1]
prop.loadAnims(animDict)
prop.setName(name)
self.storeProp(name, prop)
if name in Variants:
self.makeVariant(name)
return Actor.Actor(other=self.props[name])
else:
if name not in self.props:
prop = loader.loadModel(self.propStrings[name][0])
prop.setName(name)
self.storeProp(name, prop)
if name in Variants:
self.makeVariant(name)
return self.props[name].copyTo(hidden)
def storeProp(self, name, prop):
self.props[name] = prop
self.propCache.append(prop)
if len(self.props) > self.maxPoolSize:
oldest = self.propCache.pop(0)
del self.props[oldest.getName()]
self.__delProp(oldest)
self.notify.debug('props = %s' % self.props)
self.notify.debug('propCache = %s' % self.propCache)
def getPropType(self, name):
return self.propTypes[name]
def __delProp(self, prop):
if prop == None:
self.notify.warning('tried to delete null prop!')
return
else:
if isinstance(prop, Actor.Actor):
prop.cleanup()
else:
prop.removeNode()
return
globalPropPool = PropPool()
|
apache-2.0
| -4,398,891,153,103,457,000 | 39.830247 | 396 | 0.551325 | false |
open-switch/ops-cli
|
tests/test_vtysh_ct_bgp_router_cli.py
|
1
|
10162
|
#!/usr/bin/python
# (c) Copyright 2015 Hewlett Packard Enterprise Development LP
#
# GNU Zebra is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# GNU Zebra is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Zebra; see the file COPYING. If not, write to the Free
# Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
from opsvsi.docker import *
from opsvsi.opsvsitest import *
import time
class bgpCLItest(OpsVsiTest):
def setupNet(self):
host_opts = self.getHostOpts()
switch_opts = self.getSwitchOpts()
bgp_topo = SingleSwitchTopo(k=0, hopts=host_opts, sopts=switch_opts)
self.net = Mininet(bgp_topo, switch=VsiOpenSwitch,
host=Host, link=OpsVsiLink,
controller=None, build=True)
def verify_bgp_router_table(self):
info("\n########## Test to verify BGP router table"
" ##########\n")
s1 = self.net.switches[0]
out = s1.cmdCLI("show ip bgp summary")
assert "No bgp router configured." in out, \
"Test to verify BGP router table FAILED!"
info("\n########## Test to verify BGP router table successfull"
" ##########\n")
def configure_bgp_router_flags(self):
info("\n########## Test to configure BGP router flags"
" ##########\n")
fast_ext_failover_str = "bgp fast-external-failover"
fast_ext_failover_flag = False
log_neighbor_changes_str = "bgp log-neighbor-changes"
log_neighbor_changes_flag = False
s1 = self.net.switches[0]
s1.cmdCLI("configure terminal")
s1.cmdCLI("router bgp 100")
s1.cmdCLI(fast_ext_failover_str)
s1.cmdCLI(log_neighbor_changes_str)
s1.cmdCLI("end")
dump = s1.cmdCLI("show running-config")
lines = dump.split('\n')
for line in lines:
if fast_ext_failover_str in line:
fast_ext_failover_flag = True
elif log_neighbor_changes_str in line:
log_neighbor_changes_flag = True
if fast_ext_failover_flag is False:
info("### BGP fast-external-failover flag not set ###\n")
elif log_neighbor_changes_flag is False:
info("### BGP log-neighbor-changes flag not set ###\n")
if fast_ext_failover_flag is False or \
log_neighbor_changes_flag is False:
info("### Test to set BGP Router flags-FAILED! ###\n")
def unconfigure_bgp_router_flags(self):
info("\n########## Test to unconfigure BGP router flags"
" ##########\n")
fast_ext_failover_str = "bgp fast-external-failover"
no_fast_ext_failover_str = "no bgp fast-external-failover"
fast_ext_failover_flag = False
log_neighbor_changes_str = "bgp log-neighbor-changes"
no_log_neighbor_changes_str = "no bgp log-neighbor-changes"
log_neighbor_changes_flag = False
s1 = self.net.switches[0]
s1.cmdCLI("configure terminal")
s1.cmdCLI("router bgp 100")
s1.cmdCLI(no_fast_ext_failover_str)
s1.cmdCLI(no_log_neighbor_changes_str)
s1.cmdCLI("end")
dump = s1.cmdCLI("show running-config")
lines = dump.split('\n')
for line in lines:
if fast_ext_failover_str in line:
fast_ext_failover_flag = True
elif log_neighbor_changes_str in line:
log_neighbor_changes_flag = True
if fast_ext_failover_flag is True:
info("### BGP fast-external-failover flag is set ###\n")
elif log_neighbor_changes_flag is True:
info("### BGP log-neighbor-changes flag is set ###\n")
if fast_ext_failover_flag is True or \
log_neighbor_changes_flag is True:
info("### Test to unconfigure BGP Router flags-FAILED! ###\n")
def configure_bgp_network(self):
info("\n########## Test to configure BGP network"
" ##########\n")
network_str = "network 3001::/32"
network_str_flag = False
s1 = self.net.switches[0]
s1.cmdCLI("configure terminal")
s1.cmdCLI("router bgp 100")
s1.cmdCLI("network 3001::1/32")
s1.cmdCLI("end")
dump = s1.cmdCLI("show running-config")
lines = dump.split('\n')
for line in lines:
if network_str in line:
network_str_flag = True
assert network_str_flag is True, \
'Test to configure BGP network FAILED!'
def unconfigure_bgp_network(self):
info("\n########## Test to unconfigure BGP network"
" ##########\n")
network_str = "network 3001::/32"
network_str_flag = False
s1 = self.net.switches[0]
s1.cmdCLI("configure terminal")
s1.cmdCLI("router bgp 100")
s1.cmdCLI("no network 3001::1/32")
s1.cmdCLI("end")
dump = s1.cmdCLI("show running-config")
lines = dump.split('\n')
for line in lines:
if network_str in line:
network_str_flag = True
assert network_str_flag is False, \
'Test to unconfigure BGP network FAILED!'
def configure_routemap_match(self):
info("\n########## Test to configure Route-Map Match commands"
" ##########\n")
match_ipv6_prefix_list_str = "match ipv6 address prefix-list 5"
match_ipv6_prefix_list_flag = False
match_community_str = "match community 100"
match_community_str_flag = False
match_extcommunity_str = "match extcommunity e1"
match_extcommunity_str_flag = False
s1 = self.net.switches[0]
s1.cmdCLI("configure terminal")
s1.cmdCLI("route-map r1 permit 10")
s1.cmdCLI(match_ipv6_prefix_list_str)
s1.cmdCLI(match_community_str)
s1.cmdCLI(match_extcommunity_str)
s1.cmdCLI("end")
dump = s1.cmdCLI("show running-config")
lines = dump.split('\n')
for line in lines:
if match_ipv6_prefix_list_str in line:
match_ipv6_prefix_list_flag = True
elif match_community_str in line:
match_community_str_flag = True
elif match_extcommunity_str in line:
match_extcommunity_str_flag = True
if match_ipv6_prefix_list_flag is False:
info("### Error configuring 'match ipv6 address prefix-list' ###\n")
elif match_community_str_flag is False:
info("### Error configuring 'match community' ###\n")
elif match_extcommunity_str_flag is False:
info("### Error configuring 'match extcommunity' ###\n")
if match_ipv6_prefix_list_flag is False or \
match_community_str_flag is False or \
match_extcommunity_str_flag is False:
info("### Test to configure Route-Map match commands FAILED! ###\n")
def unconfigure_routemap_match(self):
info("\n########## Test to unconfigure Route-Map Match commands"
" ##########\n")
match_ipv6_prefix_list_str = "match ipv6 address prefix-list 5"
no_match_ipv6_prefix_list_str = "no match ipv6 address prefix-list 5"
match_ipv6_prefix_list_flag = False
match_community_str = "match community 100"
no_match_community_str = "no match community 100"
match_community_str_flag = False
match_extcommunity_str = "match extcommunity e1"
no_match_extcommunity_str = "no match extcommunity e1"
match_extcommunity_str_flag = False
s1 = self.net.switches[0]
s1.cmdCLI("configure terminal")
s1.cmdCLI("route-map r1 permit 10")
s1.cmdCLI(no_match_ipv6_prefix_list_str)
s1.cmdCLI(no_match_community_str)
s1.cmdCLI(no_match_extcommunity_str)
s1.cmdCLI("end")
dump = s1.cmdCLI("show running-config")
lines = dump.split('\n')
for line in lines:
if match_ipv6_prefix_list_str in line:
match_ipv6_prefix_list_flag = True
elif match_community_str in line:
match_community_str_flag = True
elif match_extcommunity_str in line:
match_extcommunity_str_flag = True
if match_ipv6_prefix_list_flag is True:
info("### Error unconfiguring 'match ipv6 address prefix-list' ###\n")
elif match_community_str_flag is True:
info("### Error unconfiguring 'match community' ###\n")
elif match_extcommunity_str_flag is True:
info("### Error unconfiguring 'match extcommunity' ###\n")
if match_ipv6_prefix_list_flag is True or \
match_community_str_flag is True or \
match_extcommunity_str_flag is True:
info("### Test to unconfigure Route-Map match commands FAILED! ###\n")
@pytest.mark.skipif(True, reason="Disabling old tests")
class Test_bgpd_router_cmds:
def setup(self):
pass
def teardown(self):
pass
def setup_class(cls):
Test_bgpd_router_cmds.test = bgpCLItest()
def teardown_class(cls):
Test_bgpd_router_cmds.test.net.stop()
def setup_method(self, method):
pass
def teardown_method(self, method):
pass
def __del__(self):
del self.test
def test_bgp_router_cmds(self):
self.test.verify_bgp_router_table()
self.test.configure_bgp_router_flags()
self.test.unconfigure_bgp_router_flags()
self.test.configure_bgp_network()
self.test.unconfigure_bgp_network()
self.test.configure_routemap_match()
self.test.unconfigure_routemap_match()
|
gpl-2.0
| 7,163,407,321,694,720,000 | 35.952727 | 83 | 0.594666 | false |
tulip-control/tulip-control
|
contrib/xml/xmlio.py
|
1
|
23496
|
# Copyright (c) 2014 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""
Contains XML import and export functions so that we can export (and not
recompute) Tulip data structures.
"""
import numpy
import scipy.sparse
import xml.etree.ElementTree as ET
import polytope
from tulip import transys
from tulip import hybrid
from tulip import abstract
# Global names used in tags
N_PROPS = 'props'
N_PROP = 'prop'
N_LOWERBOUND = 'lb'
N_UPPERBOUND = 'ub'
N_FULLDIM = 'fulldim'
N_MINREP = 'minrep'
N_VOLUME = 'volume'
N_DIMENSION = 'dim'
N_BBOX = 'bbox'
N_CHEBR = 'chebR'
N_CHEBXC = 'chebXc'
N_POLYLIST = 'list_poly'
N_REGIONLIST = 'list_reg'
N_KEY = 'key'
N_VALUE = 'value'
N_KEYVALUEPAIR = 'pair'
N_DOMAIN = 'domain'
N_PROPREG = 'proplist'
N_ADJ = 'adj'
N_ITEM = 'item'
# data types used in xml file (attributes)
T_SET = 'set'
T_STRING = 'str'
T_INT = 'int'
T_FLOAT = 'float'
T_BOOL = 'bool'
T_POLYTOPE = 'polytope'
T_MATRIX = 'ndarray'
T_REGION = 'region'
T_DICT = 'dict'
T_PPP = 'PPP'
T_ADJ = 'adjmatrix'
T_TUPLE = 'tuple'
T_LIST = 'list'
T_LTISYS = 'LtiSysDyn'
T_PWASYS = 'PwaSysDyn'
T_HYBRIDSYS = 'HybridSysDyn'
T_FTS = 'FiniteTransitionSystem'
T_OFTS = 'OpenFiniteTransitionSystem'
T_HYBRIDSYS = 'SwitchedSysDyn'
def _make_pretty(tree, indent=1):
"""
Modifies the tail attributes of nodes in an XML tree so that the resulting
printed XML file looks like an indented tree (and not one long line).
@type tree: xml.etree.ElementTree.Element
@type indent: int
@return: None, just modifies tree by reference
@rtype: None
"""
tab_string = '\n' + '\t'*indent
# If a tree has children, put tabs in front of the first child
if tree.getchildren():
if tree.text is not None:
tree.text = tab_string + tree.text
else:
tree.text = tab_string
# Number of children in the tree
N = len(tree.getchildren())
# Recursively run function on children
for index, child in enumerate(tree):
_make_pretty(child, indent=indent+1)
# Print new line and tabs at the tail of each child, except the last
# child. Print new line and one fewer tab at the tail of the last child.
if index < N - 1:
child.tail = tab_string
else:
child.tail = '\n' + '\t'*(indent-1)
def exportXML(data, filename, tag=None):
"""
Exports a Tulip data structure into an XML file.
@param data: The Tulip data structure to export into an xml lfile
@type data: L{Polytope}, L{Region}, L{PropPreservingPartition},
L{SwitchedSysDyn}, L{PwaSysDyn}, L{LtiSysDyn}
@param filename: The name of the XML file to export to.
@type filename: string
@param tag: (Optional) What we want the first tag of the XML file to read.
@type tag: string
@return: No return, just writes text.
@rtype: None
"""
if tag is None:
tag = "object"
tree = _export_xml(data, None, tag)
_make_pretty(tree)
pretty_string = ET.tostring(tree)
xmlfile = open(filename, 'w')
xmlfile.write(pretty_string)
xmlfile.close()
def importXML(filename):
"""
Takes a Tulip XML file and returns a Tulip data structure.
@param filename: XML file containing exported data to import
@type filename: string
@return: the data structure exported into the file.
@rtype: L{Polytope}, L{Region}, L{PropPreservingPartition},
L{SwitchedSysDyn}, L{PwaSysDyn}, L{LtiSysDyn}
"""
# Load the file into an xml tree
xmltree = ET.parse(filename)
# send the root to the general tree parser
return _import_xml(xmltree.getroot())
def _import_xml(node):
"""
Returns the Tulip data structure contained in a parsed XML file.
@type tree: xml.etree.ElementTree.Element
@rtype: L{Polytope}, L{Region}, L{PropPreservingPartition}, L{HybridSysDyn},
L{PwaSysDyn}, L{LtiSysDyn}, L{FiniteTransitionSystem},
L{OpenFiniteTransitionSystem}
"""
# Get the type of data this is
nodetype = node.attrib['type']
# Call the right import function
# Python types
if nodetype == T_STRING:
return node.text
elif nodetype == T_INT:
return int(node.text)
elif nodetype == T_FLOAT:
return float(node.text)
elif nodetype == T_BOOL:
return bool(node.text)
elif nodetype == T_MATRIX:
return eval('numpy.array(' + node.text + ')')
elif nodetype == T_TUPLE:
return _import_list(node, type_str=T_TUPLE)
elif nodetype == T_DICT:
return _import_dictionary(node)
elif nodetype == T_LIST:
return _import_list(node)
elif nodetype == T_ADJ:
return _import_adj(node)
elif nodetype == T_SET:
return _import_list(node, type_STR=T_SET)
# Tulip data structures
elif nodetype == T_POLYTOPE:
return _import_polytope(node)
elif nodetype == T_REGION:
return _import_region(node)
elif nodetype == T_PPP:
return _import_ppp(node)
elif nodetype == T_LTISYS:
return _import_ltisys(node)
elif nodetype == T_PWASYS:
return _import_pwasys(node)
elif nodetype == T_HYBRIDSYS:
return _import_hybridsys(node)
else:
raise TypeError('Type ' + nodetype + ' is not supported.')
def _import_adj(node):
# Get number of rows and columns
N = _import_xml(node.findall('num_states')[0])
# Make matrix
sparse_matrix = scipy.sparse.lil_matrix((N,N))
# Get entries and fill them in with ones
entries = _import_xml(node.findall('index_list')[0])
for entry in entries:
sparse_matrix[entry[0],entry[1]] = 1
return sparse_matrix
def _import_ppp(node):
# Domain
domain_node = node.findall('domain')
if domain_node:
domain = _import_xml(domain_node[0])
else:
domain = None
# Regions
regions_node = node.findall('list_reg')
if regions_node:
list_regions = _import_xml(regions_node[0])
else:
list_regions = []
# adj
adj_node = node.findall('adj')
if adj_node:
adjmatrix = _import_xml(adj_node[0])
else:
adjmatrix = None
# prop_regions
prop_regions_node = node.findall('proplist')
if prop_regions_node:
prop_regions = _import_xml(prop_regions_node[0])
else:
prop_regions = None
return abstract.prop2partition.PropPreservingPartition(domain=domain,
regions=list_regions, adj=adjmatrix, prop_regions=prop_regions)
def _import_list(node, type_str=T_LIST):
all_stuff = []
for child in node:
all_stuff.append(_import_xml(child))
if type_str==T_SET:
all_stuff = set(all_stuff)
elif type_str==T_TUPLE:
all_stuff = tuple(all_stuff)
return all_stuff
def _import_dictionary(node):
dictionary = {}
for keyvaluepair in node:
key = _import_xml(keyvaluepair.findall(N_KEY)[0])
value = _import_xml(keyvaluepair.findall(N_VALUE)[0])
dictionary[key] = value
return dictionary
def _import_region(node):
# Get the polytope list and import the polytopes
polytope_list = node.findall(N_POLYLIST)[0]
# Import the polytopes
list_poly = _import_xml(polytope_list)
return polytope.Region(list_poly=list_poly)
def _import_polytope(node):
# Get the A matrix
A = _import_xml(node.findall('A')[0])
# Get the b matrix
b = _import_xml(node.findall('b')[0])
return polytope.Polytope(A=A, b=b)
def _import_ltisys(node):
A = _import_xml(node.findall('A')[0])
B = _import_xml(node.findall('B')[0])
E = _import_xml(node.findall('E')[0])
K = _import_xml(node.findall('K')[0])
Uset = node.findall('Uset')
Wset = node.findall('Wset')
domain = node.findall('domain')
if not Uset:
Uset = None
else:
Uset = _import_xml(Uset[0])
if not Wset:
Wset = None
else:
Wset = _import_xml(Wset[0])
if not domain:
domain = None
else:
domain = _import_xml(domain[0])
return hybrid.LtiSysDyn(A=A, B=B, E=E, K=K, Uset=Uset, Wset=Wset,
domain=domain)
def _import_pwasys(node):
domain = node.findall('domain')
if domain:
domain = _import_xml(domain[0])
else:
domain = None
# Get list of ltisys
ltilist = node.findall('ltilist')[0]
list_subsys = _import_xml(ltilist)
return hybrid.PwaSysDyn(list_subsys=list_subsys, domain=domain)
def _import_hybridsys(node):
# Get parts, import the non-optional parts
disc_domain_size = _import_xml(node.findall('disc_domain_size')[0])
sys_labels = node.findall('sys_labels')
env_labels = node.findall('env_labels')
cts_ss = _import_xml(node.findall('cts_ss')[0])
dynamics = _import_xml(node.findall('dynamics')[0])
if sys_labels:
sys_labels = _import_xml(sys_labels[0])
else:
sys_labels = None
if env_labels:
env_labels = _import_xml(env_labels[0])
else:
env_lables = None
return hybrid.SwitchedSysDyn(disc_domain_size=disc_domain_size,
dynamics=dynamics, cts_ss=cts_ss, env_labels=env_labels,
disc_sys_labels=sys_labels)
def _export_xml(data, parent=None, tag=None, tag_list=[]):
"""Exports Tulip data structures to XML structures for later import. This
function is called both internal
@param data: the data structure to be exported into an XML tree.
@type data: numpy.ndarray or L{Polytope} or L{Region} or
L{FiniteTransitionSystem} or L{PropPreservingPartition} or
L{AbstractSysDyn} or dict
@param parent:
@type parent: None or xml.etree.ElementTree.Element or
xml.etree.ElementTree.SubElement
@type tag: None or string
@return: None (if parent is None), or an xml tree
@rtype: None or xml.etree.ElementTree.Element or
xml.etree.ElementTree.SubElement
"""
# Tulip types (parent might not exist)
if isinstance(data, polytope.Polytope):
if parent is None:
return _export_polytope(data, parent, tag)
else:
_export_polytope(data, parent, tag)
elif isinstance(data, polytope.Region):
if parent is None:
return _export_region(data, parent, tag)
else:
_export_region(data, parent, tag)
elif isinstance(data, abstract.prop2partition.PropPreservingPartition):
if parent is None:
return _export_ppp(data, parent, tag)
else:
_export_ppp(data, parent, tag)
elif isinstance(data, hybrid.LtiSysDyn):
if parent is None:
return _export_ltisys(data, parent, tag)
else:
_export_ltisys(data, parent, tag)
elif isinstance(data, hybrid.PwaSysDyn):
if parent is None:
return _export_pwasys(data, parent, tag)
else:
_export_pwasys(data, parent, tag)
elif isinstance(data, hybrid.SwitchedSysDyn):
if parent is None:
return _export_hybridsys(data, parent, tag)
else:
_export_hybridsys(data, parent, tag)
elif isinstance(data, transys.transys.FTS):
if parent is None:
return _export_fts(data, parent, tag, type_str=T_FTS)
else:
_export_fts(data, parent, tag, type_str=T_FTS)
elif isinstance(data, transys.transys.OpenFTS):
if parent is None:
return _export_fts(data, parent, tag, type_str=T_OFTS)
else:
_export_fts(data, parent, tag, type_str=T_OFTS)
# parent will always not be none
elif (isinstance(data, int) or isinstance(data, numpy.int32)):
if tag is None:
tag = "integer"
new_node = ET.SubElement(parent, tag, type=T_INT)
new_node.text = str(data)
elif isinstance(data, str):
if tag is None:
tag = "string"
new_node = ET.SubElement(parent, tag, type=T_STRING)
new_node.text = data
elif (isinstance(data, bool) or isinstance(data, numpy.bool_)):
if tag is None:
tag = "bool"
new_node = ET.SubElement(parent, tag, type=T_BOOL)
new_node.text = str(data)
elif isinstance(data, float):
if tag is None:
tag = "float"
new_node = ET.SubElement(parent, tag, type=T_FLOAT)
new_node.text = str(data)
elif isinstance(data, dict):
_export_dict(data, parent, tag)
elif isinstance(data, numpy.ndarray):
if tag is None:
tag = "numpyarray"
new_node = ET.SubElement(parent, tag, type=T_MATRIX)
new_node.text = str(data.tolist())
elif isinstance(data, tuple):
#_export_tuple(data, parent, tag)
_export_list(data, parent, tag, type_str=T_TUPLE, tag_list=tag_list)
elif isinstance(data, list):
_export_list(data, parent, tag, type_str=T_LIST, tag_list=tag_list)
elif isinstance(data, scipy.sparse.lil.lil_matrix):
_export_adj(data, parent, tag)
elif isinstance(data, set):
#_export_set(data, parent, tag)
_export_list(data, parent, tag, type_str=T_SET, tag_list=tag_list)
# Type not found
else:
raise TypeError('Type ' + str(type(data)) + ' is not supported.')
def _export_fts(fts, parent, tag, type_str=T_OFTS):
if tag is None:
tag = "TransitionSystem"
if parent is None:
tree = ET.Element(tag, type=type_str)
else:
tree = ET.SubElement(parent, tag, type=type_str)
# List of atomic propositions
_export_list(fts.aps, tree, 'APs', type_str=T_SET)
# List of states with labels
N = len(fts.states.find())
tag_list0 = [ 'state' for i in range(N) ]
states_list_node = ET.SubElement(tree, 'states')
for state in fts.states.find():
state_node = ET.SubElement(states_list_node, 'state')
name = state[0]
ap_list = state[1]['ap']
_export_xml(state[0], state_node, 'name')
_export_xml(ap_list, state_node, 'aps')
# _export_list(fts.states.find(), tree, 'states', tag_list=tag_list0)
# List of initial states
M = len(fts.states.initial)
tag_list1 = [ 'state' for i in range(M) ]
_export_list(fts.states.initial, tree, 'initial_states', tag_list=tag_list1)
# List of actions
if hasattr(fts, 'env_actions'):
_export_list(fts.env_actions, tree, 'env_actions')
if hasattr(fts, 'sys_actions'):
_export_list(fts.sys_actions, tree, 'sys_actions')
# List of transitions with actions. We're not going to use the usual export
# list function to make things more clear
transitions_list_node = ET.SubElement(tree, 'transitions')
for transition in fts.transitions.find():
transition_node = ET.SubElement(transitions_list_node, 'transition')
state1 = transition[0]
state2 = transition[1]
actions_dict = transition[2]
_export_xml(state1, transition_node, 'start_state')
_export_xml(state2, transition_node, 'end_state')
# start_node = ET.SubElement(transition_node, 'start_state')
# start_node.text = state1
# end_node = ET.SubElement(transition_node, 'end_state')
# end_node.text = state2
if 'sys_actions' in actions_dict.keys():
_export_xml(actions_dict['sys_actions'], transition_node,
tag='sys_action')
if 'env_actions' in actions_dict.keys():
_export_xml(actions_dict['env_actions'], transition_node,
tag='env_action')
# _export_list(fts.transitions.find(), tree, 'transitions')
# label_list = ['
if parent is None:
return tree
def _export_ppp(ppp, parent, tag):
"""
@return: None (if parent is None), or an xml tree
@rtype: None or xml.etree.ElementTree.Element or
xml.etree.ElementTree.SubElement
"""
if tag is None:
tag = "PropPreservingPartition"
if parent is None:
tree = ET.Element(tag, type=T_PPP)
else:
tree = ET.SubElement(parent, tag, type=T_PPP)
# Domain (polytope)
_export_polytope(ppp.domain, tree, tag=N_DOMAIN)
# regions (list of regions)
_export_xml(ppp.regions, tree, N_REGIONLIST)
# adj (adjacency matrix)
_export_adj(ppp.adj, tree, N_ADJ)
# prop regions (dictionary mapping strings to regions)
_export_xml(ppp.prop_regions, tree, N_PROPREG)
if parent is None:
return tree
def _export_ltisys(ltisys, parent, tag=None):
"""
@return: None (if parent is None), or an xml tree
@rtype: None or xml.etree.ElementTree.Element or
xml.etree.ElementTree.SubElement
"""
if tag is None:
tag = "LtiSysDyn"
if parent is None:
tree = ET.Element(tag, type=T_LTISYS)
else:
tree = ET.SubElement(parent, tag, type=T_LTISYS)
# State space matrices
_export_xml(ltisys.A, tree, 'A')
_export_xml(ltisys.B, tree, 'B')
_export_xml(ltisys.E, tree, 'E')
_export_xml(ltisys.K, tree, 'K')
# Domain matrices
if ltisys.Uset is not None:
_export_polytope(ltisys.Uset, tree, 'Uset')
if ltisys.Wset is not None:
_export_polytope(ltisys.Wset, tree, 'Wset')
if ltisys.domain is not None:
_export_polytope(ltisys.domain, tree, 'domain')
if parent is None:
return tree
def _export_pwasys(pwasys, parent, tag=None):
"""
@return: None (if parent is None), or an xml tree
@rtype: None or xml.etree.ElementTree.Element or
xml.etree.ElementTree.SubElement
"""
if tag is None:
tag = "PwaSysDyn"
if parent is None:
tree = ET.Element(tag, type=T_PWASYS)
else:
tree = ET.SubElement(parent, tag, type=T_PWASYS)
# Export domain
if pwasys.domain is not None:
_export_polytope(pwasys.domain, tree, 'domain')
# Export lti list
_export_list(pwasys.list_subsys, tree, 'ltilist')
if parent is None:
return tree
def _export_hybridsys(hybridsys, parent, tag=None):
"""
@return: None (if parent is None), or an xml tree
@rtype: None or xml.etree.ElementTree.Element or
xml.etree.ElementTree.SubElement
"""
if tag is None:
tag = "SwitchedSysDyn"
if parent is None:
tree = ET.Element(tag, type=T_HYBRIDSYS)
else:
tree = ET.SubElement(parent, tag, type=T_HYBRIDSYS)
# cts_ss
_export_xml(hybridsys.cts_ss, tree, tag="cts_ss")
# disc_domain_size
_export_xml(hybridsys.disc_domain_size, tree, tag="disc_domain_size")
# disc_sys_labels
_export_xml(hybridsys.disc_sys_labels, tree, tag="sys_labels")
# env_labels
_export_xml(hybridsys.env_labels, tree, tag="env_labels")
# Dynamics
_export_dict(hybridsys.dynamics, tree, tag="dynamics")
if parent is None:
return tree
def _export_polytope(poly, parent, tag=None):
"""Builds an XML tree from a polytope
@param poly: Polytope to export
@type poly: Polytope
@return: None (if parent is None), or an xml tree
@rtype: None or xml.etree.ElementTree.Element or
xml.etree.ElementTree.SubElement
"""
if tag is None:
tag = "polytope"
if parent is None:
tree = ET.Element(tag, type=T_POLYTOPE)
else:
tree = ET.SubElement(parent, tag, type=T_POLYTOPE)
# A and b matrices
_export_xml(poly.A, tree, 'A')
_export_xml(poly.b, tree, 'b')
# Optional parts
# minimal representation (bool)
if poly.minrep is not None:
_export_xml(poly.minrep, tree, N_MINREP)
# bounding box
if poly.bbox is not None:
bbox_node = ET.SubElement(tree, N_BBOX)
_export_xml(poly.bbox[0], bbox_node, N_LOWERBOUND)
_export_xml(poly.bbox[1], bbox_node, N_UPPERBOUND)
# chebyshev center (ndarray)
if poly.chebXc is not None:
_export_xml(poly.chebXc, tree, N_CHEBXC)
# chebyshev radius (float)
if poly.chebR is not None:
_export_xml(poly.chebR, tree, N_CHEBR)
# dimension (integer)
if poly.dim:
_export_xml(poly.dim, tree, N_DIMENSION)
# full dimension (bool)
if poly.fulldim is not None:
_export_xml(poly.fulldim, tree, N_FULLDIM)
# volume (float)
if poly.volume is not None:
_export_xml(poly.volume, tree, N_VOLUME)
# Return if there is no parent
if parent is None:
return tree
def _export_region(reg, parent, tag=None):
"""
@return: None (if parent is None), or an xml tree
@rtype: None or xml.etree.ElementTree.Element or
xml.etree.ElementTree.SubElement
"""
if tag is None:
tag = "region"
if parent is None:
tree = ET.Element(tag, type=T_REGION)
else:
tree = ET.SubElement(parent, tag, type=T_REGION)
# Attach list of polytopes
_export_list(reg.list_poly, tree, N_POLYLIST)
# Attach optional parts of region:
# Bounding box, two numpy arrays
if reg.bbox is not None:
bbox_node = ET.SubElement(tree, N_BBOX)
_export_xml(reg.bbox[0], bbox_node, N_LOWERBOUND)
_export_xml(reg.bbox[1], bbox_node, N_UPPERBOUND)
# Dimension (integer)
if reg.dim:
_export_xml(reg.dim, tree, N_DIMENSION)
# Fulldim (bool)
if reg.fulldim is not None:
_export_xml(reg.fulldim, tree, N_FULLDIM)
# Volume (float)
if reg.volume is not None:
_export_xml(reg.volume, tree, N_VOLUME)
# Chebyshev radius (float)
if reg.chebR is not None:
_export_xml(reg.chebR, tree, N_CHEBR)
# Chebyshev center (array)
if reg.chebXc is not None:
_export_xml(reg.chebXc, tree, N_CHEBXC)
# Propositions that hold in region (set of strings)
if reg.props:
_export_xml(reg.props, tree, N_PROPS)
if parent is None:
return tree
def _export_adj(matrix, parent, tag=None):
"""Converts an adjacency matrix (scipy.sparse.lil.lil_matrix) into an xml
tree.
@param matrix: Sparce adjacency matrix.
@type matrix: scipy.sparse.lil.lil_matrix
@type parent: None or xml.etree.ElementTree.Element or
xml.etree.ElementTree.SubElement
@type tag: string
@return: None (if parent is None), or an xml tree
@rtype: None or xml.etree.ElementTree.Element or
xml.etree.ElementTree.SubElement
"""
if tag is None:
tag = "adj"
if parent is None:
tree = ET.Element(tag, type=T_ADJ)
else:
tree = ET.SubElement(parent, tag, type=T_ADJ)
# number of states, because the matrix must be square
(M,N) = matrix.shape
_export_xml(N, tree, "num_states")
# list of nonzero indices
(row_indices, col_indices) = matrix.nonzero()
indices = []
for i, row_ind in enumerate(row_indices):
col_ind = col_indices[i]
indices.append((row_ind, col_ind))
_export_list(indices, tree, "index_list")
if parent is None:
return tree
def _export_dict(dictionary, parent, tag=None):
"""
Converts a dictionary into an XML tree. The key and value can be any
supported type because the function calls _export_xml() on the key and
value to form the document tree.
@type dictionary: dict
@type parent: None or xml.etree.ElementTree.Element or
xml.etree.ElementTree.SubElement
@type tag: string
@return: An XML tree if parent is None. Otherwise, modifies the tree parent
is contained in without returning.
@rtype: xml.etree.ElementTree.Element or None
"""
if tag is None:
tag = "dict"
if parent is None:
tree = ET.Element(tag, type=T_DICT)
else:
tree = ET.SubElement(parent, tag, type=T_DICT)
# Make key value pairs
for key, value in dictionary.items():
pair_node = ET.SubElement(tree, N_KEYVALUEPAIR)
_export_xml(key, parent=pair_node, tag=N_KEY)
_export_xml(value, parent=pair_node, tag=N_VALUE)
if parent is None:
return tree
def _export_list(lst, parent, tag=None, type_str=T_LIST, tag_list=[]):
# Tag list is either a list of labels or a list of lists
if tag is None:
tag = "list"
if parent is None:
tree = ET.Element(tag, type=type_str)
else:
tree = ET.SubElement(parent, tag, type=type_str)
if (tag_list and (len(tag_list) != len(lst))):
raise ValueError('len(tag_list) != len(lst).')
elif not tag_list:
tag_list = [ N_ITEM for item in lst ]
for ind, item in enumerate(lst):
_export_xml(item, parent=tree, tag=tag_list[ind])
if parent is None:
return tree
|
bsd-3-clause
| -2,195,090,474,921,197,600 | 24.73494 | 77 | 0.701311 | false |
bitdagger/mtg-scanner
|
scanner.py
|
1
|
8352
|
from __future__ import print_function
import numpy as np
import cv2
import math
import json
import sys
import phash
import operator
import signal
import base64
from debugger import MTG_Debugger
from mtgexception import MTGException
from transformer import MTG_Transformer
"""Scanner module
This module is responsible for handling user input and reading the data from
the camera to pass off to other modules.
"""
class MTG_Scanner:
"""Attributes:
running (bool): Is the scanning loop running
frame (image): The active frame
bApplyTransforms (bool): Should transforms be applied
bVertFlip (bool): Should the frame be flipped vertically?
threshold (int): Hamming distance threshold
detected_card (image): The image of the proposed card
detected_id (int): The MultiverseID of the proposed card
previous_id (int): The MultiverseID of the last card entered
blacklist (array): Array of MultiverseIDs to exclude from detection
referencedb (MTG_Reference_DB): The reference database object
storagedb (MTG_Storage_DB): The storage database object
debugger (MTG_Debugger): The debugging object
transformer (MTG_Transformer): The transformer object
captureDevice (cv2.VideoCapture): The camera to capture from
"""
def __init__(self, source, referencedb, storagedb, debug):
self.running = False
self.frame = None
self.bApplyTransforms = False
self.bVertFlip = False
self.threshold = 15
self.detected_card = None
self.detected_id = None
self.previous_id = None
self.blacklist = []
self.referencedb = referencedb
self.storagedb = storagedb
self.debugger = MTG_Debugger(debug)
self.transformer = MTG_Transformer(self.debugger)
self.captureDevice = cv2.VideoCapture(source)
def run(self):
"""Main execution
"""
self.running = True
while(self.running):
if (self.detected_card is None):
self.debugger.reset()
__, frame = self.captureDevice.read()
if (frame is None):
print('Error: No frame read from camera')
break
if (self.bApplyTransforms):
try:
frame = self.transformer.applyTransforms(frame)
except MTGException as msg:
self.bApplyTransforms = False
else:
height, width, __ = frame.shape
cv2.rectangle(
frame,
(0, 0),
(width - 1, height - 1),
(255, 0, 0),
2)
if (self.bVertFlip):
height, width, __ = frame.shape
M = cv2.getRotationMatrix2D(
(width / 2, height / 2),
180,
1)
frame = cv2.warpAffine(frame, M, (width, height))
self.frame = frame
cv2.imshow('Preview', self.frame)
self.debugger.display()
else:
cv2.imshow('Detected Card', self.detected_card)
self.handleKey(cv2.waitKey(1) & 0xFF, frame)
if (self.captureDevice is not None):
self.captureDevice.release()
cv2.destroyAllWindows()
def detectCard(self):
"""Detect the card from the active frame
"""
# The phash python bindings operate on files, so we have to write our
# current frame to a file to continue
cv2.imwrite('frame.jpg', self.frame)
# Use phash on our frame
ihash = phash.dct_imagehash('frame.jpg')
idigest = phash.image_digest('frame.jpg')
candidates = {}
hashes = self.referencedb.get_hashes()
for MultiverseID in hashes:
if (MultiverseID in self.blacklist):
continue
hamd = phash.hamming_distance(ihash, int(hashes[MultiverseID]))
if (hamd <= self.threshold):
candidates[MultiverseID] = hamd
if (not len(candidates)):
print('No matches found')
return None
finalists = []
minV = min(candidates.values())
for MultiverseID in candidates:
if (candidates[MultiverseID] == minV):
finalists.append(MultiverseID)
bestMatch = None
correlations = {}
for MultiverseID in finalists:
hamd = candidates[MultiverseID]
digest = phash.image_digest(
self.referencedb.IMAGE_FILE % MultiverseID)
corr = phash.cross_correlation(idigest, digest)
if (bestMatch is None or corr > correlations[bestMatch]):
bestMatch = MultiverseID
correlations[MultiverseID] = corr
return bestMatch
def handleKey(self, key, frame):
if (self.detected_card is None):
if (key == 8 or key == 27):
self.bApplyTransforms = not self.bApplyTransforms
elif (key == ord('d')):
self.debugger.toggle()
elif (key == 171):
self.detected_id = self.previous_id
if (self.detected_id is not None):
self.detected_card = cv2.imread(
self.referencedb.IMAGE_FILE % self.detected_id,
cv2.IMREAD_UNCHANGED)
elif (key == 10):
if (not self.bApplyTransforms):
self.bApplyTransforms = True
else:
self.detected_id = self.detectCard()
if (self.detected_id is not None):
self.detected_card = cv2.imread(
self.referencedb.IMAGE_FILE % self.detected_id,
cv2.IMREAD_UNCHANGED)
else:
if (key == ord('n')):
cv2.destroyWindow('Detected Card')
self.blacklist.append(self.detected_id)
self.detected_id = self.detectCard()
if (self.detected_id is not None):
self.detected_card = cv2.imread(
self.referencedb.IMAGE_FILE % self.detected_id,
cv2.IMREAD_UNCHANGED)
if (key == ord('p')):
self.blacklist = []
for i in range(0, 4):
self.storagedb.add_card(self.detected_id, 0)
name, code = self.referencedb.get_card_info(self.detected_id)
print('Added 4x ' + name + '[' + code + ']...')
self.previous_id = self.detected_id
self.detected_card = None
self.detected_id = None
self.bApplyTransforms = False
cv2.destroyWindow('Detected Card')
if (key == 10 or key == ord('y')):
self.blacklist = []
self.storagedb.add_card(self.detected_id, 0)
name, code = self.referencedb.get_card_info(self.detected_id)
print('Added ' + name + '[' + code + ']...')
self.previous_id = self.detected_id
self.detected_card = None
self.detected_id = None
self.bApplyTransforms = False
cv2.destroyWindow('Detected Card')
if (key == ord('f')):
self.blacklist = []
self.storagedb.add_card(self.detected_id, 1)
name, code = self.referencedb.get_card_info(self.detected_id)
print('Added foil ' + name + '[' + code + ']...')
self.previous_id = self.detected_id
self.detected_card = None
self.detected_id = None
self.bApplyTransforms = False
cv2.destroyWindow('Detected Card')
elif (key == 8 or key == 27):
self.blacklist = []
self.detected_card = None
self.detected_id = None
self.bApplyTransforms = False
cv2.destroyWindow('Detected Card')
if (key == ord('q')):
self.running = False
|
mit
| -6,063,127,265,762,515,000 | 36.452915 | 77 | 0.530771 | false |
elParaguayo/RPI-Info-Screen
|
displayscreen.py
|
1
|
14668
|
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import pygame
import ConfigParser
import sys
import os
import urllib2
import urllib
import StringIO
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
class PiInfoScreen():
# Set default names
pluginname = "UNDEFINED"
plugininfo = "You should set pluginname and plugininfo in your plugin subclass"
# List of screen sizes supported by the script
supportedsizes = [ (694,466) ]
# Refresh time = how often the data on the screen should be updated (seconds)
refreshtime = 30
# How long screen should be displayed before moving on to next screen (seconds)
# only relevant when screen is autmatically changing screens
# rather than waiting for key press
displaytime = 5
# Read the plugin's config file and dump contents to a dictionary
def readConfig(self):
class AutoVivification(dict):
"""Implementation of perl's autovivification feature."""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
self.pluginConfig = AutoVivification()
try:
config = ConfigParser.ConfigParser()
config.read(self.configfile)
for section in config.sections():
for option in config.options(section):
self.pluginConfig[section][option] = config.get(section,option)
except:
pass
self.setPluginVariables()
# Can be overriden to allow plugin to change option type
# Default method is to treat all options as strings
# If option needs different type (bool, int, float) then this should be
# done here
# Alternatively, plugin can just read variables from the pluginConfig
# dictionary that's created
# Any other variables (colours, fonts etc.) should be defined here
def setPluginVariables(self):
pass
# Tells the main script that the plugin is compatible with the requested
# screen size
def supported(self):
return self.supported
# Returns the refresh time
def refreshtime(self):
return self.refreshtime
# Returns the display time
def displaytime(self):
return self.displaytime
# Returns a short description of the script
# displayed when user requests list of installed plugins
def showInfo(self):
return self.plugininfo
# Returns name of the plugin
def screenName(self):
return self.pluginname
# Handle button events
# These should be overriden by screens if required
def Button1Click(self):
pass
def Button2Click(self):
pass
def Button3Click(self):
pass
def Button3Click(self):
pass
# Get web page
def getPage(self, url):
user_agent = 'Mozilla/5 (Solaris 10) Gecko'
headers = { 'User-Agent' : user_agent }
request = urllib2.Request(url)
response = urllib2.urlopen(request)
the_page = response.read()
return the_page
# Function to get image and return in format pygame can use
def LoadImageFromUrl(self, url, solid = False):
f = urllib.urlopen(url)
buf = StringIO.StringIO(f.read())
image = self.LoadImage(buf, solid)
return image
def LoadImage(self, fileName, solid = False):
image = pygame.image.load(fileName)
image = image.convert()
if not solid:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, pygame.RLEACCEL)
return image
# Draws a progress bar
def showProgress(self, position, barsize,
bordercolour, fillcolour, bgcolour):
try:
if position < 0 : position = 0
if position > 1 : position = 1
except:
position = 0
progress = pygame.Surface(barsize)
pygame.draw.rect(progress,bgcolour,(0,0,barsize[0],barsize[1]))
progresswidth = int(barsize[0] * position)
pygame.draw.rect(progress,fillcolour,(0,0,progresswidth,barsize[1]))
pygame.draw.rect(progress,bordercolour,(0,0,barsize[0],barsize[1]),1)
return progress
def render_textrect(self, string, font, rect, text_color,
background_color, justification=0, vjustification=0,
margin=0, shrink = False, SysFont=None, FontPath=None,
MaxFont=0, MinFont=0):
"""Returns a surface containing the passed text string, reformatted
to fit within the given rect, word-wrapping as necessary. The text
will be anti-aliased.
Takes the following arguments:
string - the text you wish to render. \n begins a new line.
font - a Font object
rect - a rectstyle giving the size of the surface requested.
text_color - a three-byte tuple of the rgb value of the
text color. ex (0, 0, 0) = BLACK
background_color - a three-byte tuple of the rgb value of the surface.
justification - 0 (default) left-justified
1 horizontally centered
2 right-justified
Returns the following values:
Success - a surface object with the text rendered onto it.
Failure - raises a TextRectException if the text won't fit onto the
surface.
"""
""" Amended by el_Paraguayo:
- cutoff=True - cuts off text instead of raising error
- margin=(left,right,top,bottom) or
- margin=2 is equal to margin = (2,2,2,2)
- shrink=True adds variable font size to fit text
- Has additional args:
- SysFont=None - set SysFont to use when shrinking
- FontPath=none - set custom font path to use when shrinking
MaxFont=0 (max font size)
MinFont=0 (min font size)
- vjustification=0 adds vertical justification
0 = Top
1 = Middle
2 = Bottom
"""
class TextRectException(Exception):
def __init__(self, message = None):
self.message = message
def __str__(self):
return self.message
def draw_text_rect(string, font, rect, text_color, background_color,
justification=0, vjustification=0, margin=0,
cutoff=True):
final_lines = []
requested_lines = string.splitlines()
# Create a series of lines that will fit on the provided
# rectangle.
for requested_line in requested_lines:
if font.size(requested_line)[0] > (rect.width - (margin[0] + margin[1])):
words = requested_line.split(' ')
# if any of our words are too long to fit, return.
# for word in words:
# if font.size(word)[0] >= (rect.width - (margin * 2)):
# raise TextRectException, "The word " + word + "
# is too long to fit in the rect passed."
# Start a new line
accumulated_line = ""
for word in words:
test_line = accumulated_line + word + " "
# Build the line while the words fit.
if font.size(test_line.strip())[0] < (rect.width - (margin[0] + margin[1])) :
accumulated_line = test_line
else:
final_lines.append(accumulated_line)
accumulated_line = word + " "
final_lines.append(accumulated_line)
else:
final_lines.append(requested_line)
# Let's try to write the text out on the surface.
surface = pygame.Surface(rect.size)
surface.fill(background_color)
accumulated_height = 0
for line in final_lines:
if accumulated_height + font.size(line)[1] >= (rect.height - margin[2] - margin[3]):
if not cutoff:
raise TextRectException, "Once word-wrapped, the text string was too tall to fit in the rect."
else:
break
if line != "":
tempsurface = font.render(line.strip(), 1, text_color)
if justification == 0:
surface.blit(tempsurface, (0 + margin[0], accumulated_height + margin[2]))
elif justification == 1:
surface.blit(tempsurface, ((rect.width - tempsurface.get_width()) / 2, accumulated_height + margin[2]))
elif justification == 2:
surface.blit(tempsurface, (rect.width - tempsurface.get_width() - margin[1], accumulated_height + margin[2]))
else:
raise TextRectException, "Invalid justification argument: " + str(justification)
accumulated_height += font.size(line)[1]
if vjustification == 0:
# Top aligned, we're ok
pass
elif vjustification == 1:
# Middle aligned
tempsurface = pygame.Surface(rect.size)
tempsurface.fill(background_color)
vpos = (0, (rect.size[1] - accumulated_height)/2)
tempsurface.blit(surface, vpos, (0,0,rect.size[0],accumulated_height))
surface = tempsurface
elif vjustification == 2:
# Bottom aligned
tempsurface = pygame.Surface(rect.size)
tempsurface.fill(background_color)
vpos = (0, (rect.size[1] - accumulated_height - margin[3]))
tempsurface.blit(surface, vpos, (0,0,rect.size[0],accumulated_height))
surface = tempsurface
else:
raise TextRectException, "Invalid vjustification argument: " + str(justification)
return surface
surface = None
if type(margin) is tuple:
if not len(margin) == 4:
try:
margin = (int(margin), int(margin), int(margin), int(margin))
except:
margin = (0,0,0,0)
elif type(margin) is int:
margin = (margin, margin, margin, margin)
else:
margin = (0,0,0,0)
if not shrink:
surface = draw_text_rect(string, font, rect, text_color, background_color,
justification=justification, vjustification=vjustification,
margin=margin, cutoff=False)
else:
fontsize = MaxFont
fit = False
while fontsize >= MinFont:
if FontPath is None:
myfont = pygame.font.SysFont(SysFont,fontsize)
else:
myfont = pygame.font.Font(FontPath,fontsize)
try:
surface = draw_text_rect(string, myfont, rect,text_color, background_color,
justification=justification, vjustification=vjustification,
margin=margin, cutoff=False)
fit = True
break
except:
fontsize -= 1
if not fit:
surface = draw_text_rect(string, myfont, rect, text_color, background_color,
justification=justification, vjustification=vjustification,
margin=margin)
return surface
# Main function - returns screen to main script
# Will be overriden by plugins
# Defaults to showing name and description of plugin
def showScreen(self):
self.screen.fill([0,0,0])
screentext = pygame.font.SysFont("freesans",20).render("%s: %s." % (self.pluginname, self.plugininfo),1,(255,255,255))
screenrect = screentext.get_rect()
screenrect.centerx = self.screen.get_rect().centerx
screenrect.centery = self.screen.get_rect().centery
self.screen.blit(screentext,screenrect)
return self.screen
def setUpdateTimer(self):
pygame.time.set_timer(self.userevents["update"], 0)
pygame.time.set_timer(self.userevents["update"], int(self.refreshtime * 1000))
# This function should not be overriden
def __init__(self, screensize, scale=True, userevents=None):
# Set config filepath...
self.plugindir=os.path.dirname(sys.modules[self.__class__.__module__].__file__)
self.configfile = os.path.join(self.plugindir, "config", "screen.ini")
# ...and read the config file
self.readConfig()
# Save the requested screen size
self.screensize = screensize
self.userevents = userevents
# Check requested screen size is compatible and set supported property
if screensize not in self.supportedsizes:
self.supported = False
else:
self.supported = True
# Initialise pygame for the class
if self.supported or scale:
pygame.init()
self.screen = pygame.display.set_mode(self.screensize)
self.surfacesize = self.supportedsizes[0]
self.surface = pygame.Surface(self.surfacesize)
|
gpl-3.0
| 2,532,543,182,638,478,300 | 38.750678 | 133 | 0.557131 | false |
chrislit/abydos
|
abydos/distance/_baulieu_ii.py
|
1
|
4475
|
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._baulieu_ii.
Baulieu II similarity
"""
from typing import Any, Counter as TCounter, Optional, Sequence, Set, Union
from ._token_distance import _TokenDistance
from ..tokenizer import _Tokenizer
__all__ = ['BaulieuII']
class BaulieuII(_TokenDistance):
r"""Baulieu II similarity.
For two sets X and Y and a population N, Baulieu II similarity
:cite:`Baulieu:1989` is
.. math::
sim_{BaulieuII}(X, Y) =
\frac{|X \cap Y|^2 \cdot |(N \setminus X) \setminus Y|^2}
{|X| \cdot |Y| \cdot |N \setminus X| \cdot |N \setminus Y|}
This is based on Baulieu's 13th dissimilarity coefficient.
In :ref:`2x2 confusion table terms <confusion_table>`, where a+b+c+d=n,
this is
.. math::
sim_{BaulieuII} =
\frac{a^2d^2}{(a+b)(a+c)(b+d)(c+d)}
.. versionadded:: 0.4.0
"""
def __init__(
self,
alphabet: Optional[
Union[TCounter[str], Sequence[str], Set[str], int]
] = None,
tokenizer: Optional[_Tokenizer] = None,
intersection_type: str = 'crisp',
**kwargs: Any
) -> None:
"""Initialize BaulieuII instance.
Parameters
----------
alphabet : Counter, collection, int, or None
This represents the alphabet of possible tokens.
See :ref:`alphabet <alphabet>` description in
:py:class:`_TokenDistance` for details.
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
intersection_type : str
Specifies the intersection type, and set type as a result:
See :ref:`intersection_type <intersection_type>` description in
:py:class:`_TokenDistance` for details.
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-gram. Using this parameter and tokenizer=None
will cause the instance to use the QGram tokenizer with this
q value.
metric : _Distance
A string distance measure class for use in the ``soft`` and
``fuzzy`` variants.
threshold : float
A threshold value, similarities above which are counted as
members of the intersection for the ``fuzzy`` variant.
.. versionadded:: 0.4.0
"""
super(BaulieuII, self).__init__(
alphabet=alphabet,
tokenizer=tokenizer,
intersection_type=intersection_type,
**kwargs
)
def sim(self, src: str, tar: str) -> float:
"""Return the Baulieu II similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Baulieu II similarity
Examples
--------
>>> cmp = BaulieuII()
>>> cmp.sim('cat', 'hat')
0.24871959237343852
>>> cmp.sim('Niall', 'Neil')
0.13213719608444902
>>> cmp.sim('aluminum', 'Catalan')
0.013621892326789235
>>> cmp.sim('ATCG', 'TAGC')
0.0
.. versionadded:: 0.4.0
"""
self._tokenize(src, tar)
a = self._intersection_card()
b = self._src_only_card()
c = self._tar_only_card()
d = self._total_complement_card()
num = a * a * d * d
if num == 0:
return 0.0
return num / ((a + b) * (a + c) * (b + d) * (c + d))
if __name__ == '__main__':
import doctest
doctest.testmod()
|
gpl-3.0
| -737,164,622,069,967,200 | 28.248366 | 78 | 0.575419 | false |
hying-caritas/ibsuite
|
ibpy/ibpy/input.py
|
1
|
1091
|
#
# Copyright 2009 Huang Ying <huang.ying.caritas@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
import sys
import pdf
import pdfinfo
import imb
import imbinfo
import djvu
import djvuinfo
def get_input_info(config):
iformat = config.input_format
if iformat == 'pdf':
info_parser = pdfinfo.PDFInfoParser(config)
elif iformat == 'imb':
info_parser = imbinfo.ImbInfoParser(config)
elif iformat == 'djvu':
info_parser = djvuinfo.DJVUInfoParser(config)
else:
print 'Invalid input format: %s' % (iformat)
sys.exit(-1)
return info_parser.parse()
def create_input_to_ppm(config):
if config.input_format == 'pdf':
return pdf.create_pdf_to_ppm(config)
elif config.input_format == 'imb':
return imb.IMBToPPM(config)
elif config.input_format == 'djvu':
return djvu.DJVUToPPM(config)
|
gpl-2.0
| -2,425,802,207,864,475,600 | 27.710526 | 70 | 0.68561 | false |
greyhavens/thane
|
tamarin-central/configure.py
|
1
|
10406
|
#!/usr/bin/env python
# -*- Mode: Python; indent-tabs-mode: nil -*-
# vi: set ts=4 sw=4 expandtab:
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is [Open Source Virtual Machine].
#
# The Initial Developer of the Original Code is
# Adobe System Incorporated.
# Portions created by the Initial Developer are Copyright (C) 2005-2006
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
#
# This script runs just like a traditional configure script, to do configuration
# testing and makefile generation.
import os.path
import sys
thisdir = os.path.dirname(os.path.abspath(__file__))
# Look for additional modules in our build/ directory.
sys.path.append(thisdir)
from build.configuration import *
import build.getopt
o = build.getopt.Options()
config = Configuration(thisdir, options = o,
sourcefile = 'core/avmplus.h')
buildTamarin = o.getBoolArg('tamarin', True)
if buildTamarin:
config.subst("ENABLE_TAMARIN", 1)
buildShell = o.getBoolArg("shell", False)
if (buildShell):
config.subst("ENABLE_SHELL", 1)
buildThane = o.getBoolArg("thane", False)
if (buildThane):
config.subst("ENABLE_THANE", 1)
APP_CPPFLAGS = ""
APP_CXXFLAGS = ""
OPT_CXXFLAGS = "-O3 "
OPT_CPPFLAGS = ""
DEBUG_CPPFLAGS = "-DDEBUG -D_DEBUG "
DEBUG_CXXFLAGS = ""
DEBUG_LDFLAGS = ""
OS_LIBS = []
OS_LDFLAGS = ""
MMGC_CPPFLAGS = ""
AVMSHELL_CPPFLAGS = ""
AVMSHELL_LDFLAGS = ""
MMGC_DEFINES = {'SOFT_ASSERTS': None}
NSPR_INCLUDES = ""
NSPR_LDOPTS = ""
selfTest = o.getBoolArg("selftests", False)
if selfTest:
APP_CPPFLAGS += "-DAVMPLUS_SELFTEST "
memoryProfiler = o.getBoolArg("memory-profiler", False)
if memoryProfiler:
APP_CPPFLAGS += "-DMMGC_MEMORY_PROFILER "
MMGC_INTERIOR_PTRS = o.getBoolArg('mmgc-interior-pointers', False)
if MMGC_INTERIOR_PTRS:
MMGC_DEFINES['MMGC_INTERIOR_PTRS'] = None
MMGC_DYNAMIC = o.getBoolArg('mmgc-shared', False)
if MMGC_DYNAMIC:
MMGC_DEFINES['MMGC_DLL'] = None
MMGC_CPPFLAGS += "-DMMGC_IMPL "
MMGC_THREADSAFE = o.getBoolArg('threadsafe-mmgc', False)
if MMGC_THREADSAFE:
MMGC_DEFINES['MMGC_THREADSAFE'] = None
NSPR_INCLUDES = o.getStringArg('nspr-includes')
MMGC_CPPFLAGS += NSPR_INCLUDES + " "
APP_CPPFLAGS += NSPR_INCLUDES + " "
NSPR_LDOPTS = o.getStringArg('nspr-ldopts')
OS_LDFLAGS += " " + NSPR_LDOPTS
os, cpu = config.getTarget()
if config.getCompiler() == 'GCC':
APP_CXXFLAGS = "-fstrict-aliasing -Wextra -Wall -Wno-reorder -Wno-switch -Wno-invalid-offsetof -Wsign-compare -Wunused-parameter -fmessage-length=0 -fno-rtti -fno-exceptions "
if config.getDebug():
APP_CXXFLAGS += ""
else:
APP_CXXFLAGS += "-Wuninitialized "
DEBUG_CXXFLAGS += "-g "
elif config.getCompiler() == 'VS':
if cpu == "arm":
APP_CXXFLAGS = "-W4 -WX -wd4291 -wd4201 -wd4189 -wd4740 -wd4127 -fp:fast -GF -GS- -Zc:wchar_t- "
OS_LDFLAGS += "-MAP "
if config.getDebug():
DEBUG_CXXFLAGS = "-Od "
APP_CXXFLAGS += "-GR- -fp:fast -GS- -Zc:wchar_t- -Zc:forScope "
else:
OPT_CXXFLAGS = "-O2 -GR- "
else:
APP_CXXFLAGS = "-W4 -WX -wd4291 -GF -fp:fast -GS- -Zc:wchar_t- "
OS_LDFLAGS += "-SAFESEH:NO -MAP "
if config.getDebug():
DEBUG_CXXFLAGS = "-Od -EHsc "
else:
OPT_CXXFLAGS = "-O2 -Ob1 -GR- "
if memoryProfiler:
OPT_CXXFLAGS += "-Oy- -Zi "
DEBUG_CXXFLAGS += "-Zi "
DEBUG_LDFLAGS += "-DEBUG "
elif config.getCompiler() == 'SunStudio':
OPT_CXXFLAGS = "-xO5 "
DEBUG_CXXFLAGS += "-g "
else:
raise Exception('Unrecognized compiler: ' + config.getCompiler())
zlib_include_dir = o.getStringArg('zlib-include-dir')
if zlib_include_dir is not None:
AVMSHELL_CPPFLAGS += "-I%s " % zlib_include_dir
zlib_lib = o.getStringArg('zlib-lib')
if zlib_lib is not None:
AVMSHELL_LDFLAGS = zlib_lib
else:
AVMSHELL_LDFLAGS = '$(call EXPAND_LIBNAME,z)'
if os == "darwin":
AVMSHELL_LDFLAGS += " -exported_symbols_list " + thisdir + "/platform/mac/shell/exports.exp"
MMGC_DEFINES.update({'TARGET_API_MAC_CARBON': 1,
'DARWIN': 1,
'_MAC': None,
'AVMPLUS_MAC': None,
'TARGET_RT_MAC_MACHO': 1})
APP_CXXFLAGS += "-fpascal-strings -faltivec -fasm-blocks "
if cpu == 'x86_64' or cpu == 'ppc64' or o.getBoolArg("leopard"):
# use --enable-leopard to build for 10.5 or later; this is mainly useful for enabling
# us to build with gcc4.2 (which requires the 10.5 sdk), since it has a slightly different
# set of error & warning sensitivities. Note that we don't override CC/CXX here, the calling script
# is expected to do that if desired (thus we can support 10.5sdk with either 4.0 or 4.2)
APP_CXXFLAGS += "-mmacosx-version-min=10.5 -isysroot /Developer/SDKs/MacOSX10.5.sdk "
config.subst("MACOSX_DEPLOYMENT_TARGET",10.5)
else:
APP_CXXFLAGS += "-mmacosx-version-min=10.4 -isysroot /Developer/SDKs/MacOSX10.4u.sdk "
config.subst("MACOSX_DEPLOYMENT_TARGET",10.4)
elif os == "freebsd":
MMGC_DEFINES.update({
'LINUX' :None,
'HAVE_PTHREAD_NP_H' :None,
'UNIX': None,
'AVMPLUS_UNIX' :None })
OS_LIBS.append('pthread')
APP_CPPFLAGS += '-DAVMPLUS_CDECL '
elif os == "windows" or os == "cygwin":
MMGC_DEFINES.update({'WIN32': None,
'_CRT_SECURE_NO_DEPRECATE': None})
OS_LDFLAGS += "-MAP "
if cpu == "arm":
APP_CPPFLAGS += "-DARM -D_ARM_ -DARMV5 -DUNICODE -DUNDER_CE=1 -DMMGC_ARM -QRarch5t "
OS_LIBS.append('mmtimer corelibc coredll')
else:
APP_CPPFLAGS += "-DWIN32_LEAN_AND_MEAN -D_CONSOLE "
OS_LIBS.append('winmm')
OS_LIBS.append('shlwapi')
elif os == "linux":
MMGC_DEFINES.update({'UNIX': None,
'AVMPLUS_UNIX': None,
'LINUX': None})
OS_LIBS.append('pthread')
APP_CPPFLAGS += '-DAVMPLUS_CDECL '
if cpu == "x86_64":
# workaround https://bugzilla.mozilla.org/show_bug.cgi?id=467776
OPT_CXXFLAGS += '-fno-schedule-insns2 '
# these warnings are too noisy
APP_CXXFLAGS += ' -Wno-parentheses '
if config.getDebug():
OS_LIBS.append("dl")
elif os == "sunos":
if config.getCompiler() != 'GCC':
APP_CXXFLAGS = ""
OPT_CXXFLAGS = "-xO5 "
DEBUG_CXXFLAGS = "-g "
MMGC_DEFINES.update({'UNIX': None,
'AVMPLUS_UNIX': None,
'SOLARIS': None})
OS_LIBS.append('pthread')
APP_CPPFLAGS += '-DAVMPLUS_CDECL '
if config.getDebug():
OS_LIBS.append("dl")
else:
raise Exception("Unsupported OS")
if cpu == "i686":
if config.getCompiler() == 'GCC' and os == 'darwin':
#only mactel always has sse2
APP_CPPFLAGS += "-msse2 "
elif cpu == "powerpc":
# we detect this in core/avmbuild.h and MMgc/*build.h
None
elif cpu == "ppc64":
# we detect this in core/avmbuild.h and MMgc/*build.h
None
elif cpu == "sparc":
APP_CPPFLAGS += "-DAVMPLUS_SPARC "
elif cpu == "x86_64":
# we detect this in core/avmbuild.h and MMgc/*build.h
None
elif cpu == "arm":
# we detect this in core/avmbuild.h and MMgc/*build.h
None
else:
raise Exception("Unsupported CPU")
if o.getBoolArg("selftests"):
APP_CPPFLAGS += "-DAVMPLUS_SELFTEST "
if o.getBoolArg("debugger"):
APP_CPPFLAGS += "-DDEBUGGER "
if o.getBoolArg('perfm'):
APP_CPPFLAGS += "-DPERFM "
if o.getBoolArg('disable-nj'):
APP_CPPFLAGS += '-DAVMPLUS_DISABLE_NJ '
if o.getBoolArg('abc-interp'):
APP_CPPFLAGS += '-DAVMPLUS_ABC_INTERPRETER '
if o.getBoolArg('selftest'):
APP_CPPFLAGS += '-DAVMPLUS_SELFTEST '
# We do two things with MMGC_DEFINES: we append it to APP_CPPFLAGS and we also write MMgc-config.h
APP_CPPFLAGS += ''.join(val is None and ('-D%s ' % var) or ('-D%s=%s ' % (var, val))
for (var, val) in MMGC_DEFINES.iteritems())
definePattern = \
"""#ifndef %(var)s
#define %(var)s %(val)s
#endif
"""
outpath = "%s/MMgc-config.h" % config.getObjDir()
contents = ''.join(definePattern % {'var': var,
'val': val is not None and val or ''}
for (var, val) in MMGC_DEFINES.iteritems())
writeFileIfChanged(outpath, contents)
config.subst("APP_CPPFLAGS", APP_CPPFLAGS)
config.subst("APP_CXXFLAGS", APP_CXXFLAGS)
config.subst("OPT_CPPFLAGS", OPT_CPPFLAGS)
config.subst("OPT_CXXFLAGS", OPT_CXXFLAGS)
config.subst("DEBUG_CPPFLAGS", DEBUG_CPPFLAGS)
config.subst("DEBUG_CXXFLAGS", DEBUG_CXXFLAGS)
config.subst("DEBUG_LDFLAGS", DEBUG_LDFLAGS)
config.subst("OS_LIBS", " ".join(OS_LIBS))
config.subst("OS_LDFLAGS", OS_LDFLAGS)
config.subst("MMGC_CPPFLAGS", MMGC_CPPFLAGS)
config.subst("AVMSHELL_CPPFLAGS", AVMSHELL_CPPFLAGS)
config.subst("AVMSHELL_LDFLAGS", AVMSHELL_LDFLAGS)
config.subst("MMGC_DYNAMIC", MMGC_DYNAMIC and 1 or '')
config.generate("Makefile")
o.finish()
|
bsd-2-clause
| -431,756,292,624,213,300 | 34.037037 | 179 | 0.639439 | false |
tgcmteam/tgcmlinux
|
src/tgcm/contrib/mobile-manager2/src/mobilemanager/devices/huawei/Modem.py
|
1
|
2711
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Authors : David Castellanos <dcastellanos@indra.es>
#
# Copyright (c) 2012, Telefonica Móviles España S.A.U.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from mobilemanager.mmdbus.service import method
from mobilemanager.devices.ModemGsmExceptions import IncorrectPassword
MM_URI = 'org.freedesktop.ModemManager.Modem'
MM_URI_DBG = 'org.freedesktop.ModemManager.Debug'
class Modem(object):
@method(MM_URI,
in_signature='', out_signature='b',
method_name='IsOperatorLocked')
def m_is_operator_locked(self):
def function(task):
cmd = 'AT^CARDLOCK?'
regex = '\^CARDLOCK: (?P<status>.+),(?P<times>.+),(?P<operator>.+)'
r_values = ['status', 'times', 'operator']
res = self.io.com.send_query({"type" : "regex",
"cmd" : cmd,
"task" : task,
"regex" : regex,
"r_values" : r_values})
is_operator_locked = False
if (res is not None) and (res['status'] == '1'):
is_operator_locked = True
return is_operator_locked
task_msg = "[Huawei] Is Device Operator Locked?"
return self.io.task_pool.exec_task(function, task_msg=task_msg)
@method(MM_URI,
in_signature='s', out_signature='',
method_name='UnlockOperator')
def m_unlock_operator(self, unlock_code):
def function(task):
cmd = 'AT^CARDLOCK="%s"' % unlock_code
res = self.io.com.send_query({"type" : "simple",
"cmd" : cmd,
"task" : task })
if res is not True:
raise IncorrectPassword
task_msg = "[Huawei] Device Operator Unlock"
self.io.task_pool.exec_task(function, task_msg=task_msg)
|
gpl-2.0
| 5,042,313,276,414,435,000 | 36.625 | 79 | 0.578442 | false |
mardix/pylot
|
pylot/__init__.py
|
1
|
12115
|
"""
Pylot
"""
import os
import datetime
import inspect
from werkzeug.contrib.fixers import ProxyFix
from flask_classy import (FlaskView,
route)
from flask import (Flask,
abort,
redirect,
request,
render_template,
flash,
url_for,
jsonify,
session)
from flask_assets import Environment
from flask_kvsession import KVSessionExtension
from simplekv.memory.redisstore import RedisStore
import utils
# ------------------------------------------------------------------------------
import pkginfo
NAME = pkginfo.NAME
__version__ = pkginfo.VERSION
__author__ = pkginfo.AUTHOR
__license__ = pkginfo.LICENSE
__copyright__ = pkginfo.COPYRIGHT
# ------------------------------------------------------------------------------
class Pylot(FlaskView):
"""
Pylot a FlaskView extension
"""
LAYOUT = "layout.html" # The default layout
assets = None
_app = None
_bind = set()
_context = dict(
APP_NAME="",
APP_VERSION="",
YEAR=datetime.datetime.now().year,
GOOGLE_ANALYTICS_ID=None,
LOGIN_ENABLED=False,
LOGIN_OAUTH_ENABLED=False,
LOGIN_OAUTH_CLIENT_IDS=[],
LOGIN_OAUTH_BUTTONS=[],
META=dict(
title="",
description="",
url="",
image="",
site_name="",
object_type="",
locale="",
keywords=[],
use_opengraph=True,
use_googleplus=True,
use_twitter=True
)
)
@classmethod
def init(cls, flask_or_import_name, directory=None, config=None):
"""
Allow to register all subclasses of Pylot
So we call it once initiating
:param flask_or_import_name: Flask instance or import name -> __name__
:param directory: The directory containing your project's Views, Templates and Static
:param config: string of config object. ie: "app.config.Dev"
"""
if isinstance(flask_or_import_name, Flask):
app = flask_or_import_name
else:
app = Flask(flask_or_import_name)
app.wsgi_app = ProxyFix(app.wsgi_app)
if config:
app.config.from_object(config)
if directory:
app.template_folder = directory + "/templates"
app.static_folder = directory + "/static"
cls._app = app
cls.assets = Environment(cls._app)
for _app in cls._bind:
_app(cls._app)
for subcls in cls.__subclasses__():
subcls.register(cls._app)
return cls._app
@classmethod
def bind_(cls, kls):
"""
To bind application that needs the 'app' object to init
:param app: callable function that will receive 'Flask.app' as first arg
"""
if not hasattr(kls, "__call__"):
raise TypeError("From Pylot.bind_: '%s' is not callable" % kls)
cls._bind.add(kls)
return kls
@classmethod
def extends_(cls, kls):
"""
A view decorator to extend another view class or function to itself
It will inherit all its methods and propeties and use them on itself
-- EXAMPLES --
class Index(Pylot):
pass
index = Index()
::-> As decorator on classes ::
@index.extends_
class A(object):
def hello(self):
pass
@index.extends_
class C()
def world(self):
pass
::-> Decorator With function call ::
@index.extends_
def hello(self):
pass
"""
if inspect.isclass(kls):
for _name, _val in kls.__dict__.items():
if not _name.startswith("__"):
setattr(cls, _name, _val)
elif inspect.isfunction(kls):
setattr(cls, kls.__name__, kls)
return cls
@classmethod
def context_(cls, **kwargs):
"""
Assign a global view context to be used in the template
:params **kwargs:
"""
cls._context.update(kwargs)
@classmethod
def config_(cls, key, default=None):
"""
Shortcut to access the config in your class
:param key: The key to access
:param default: The default value when None
:returns mixed:
"""
return cls._app.config.get(key, default)
@classmethod
def meta_(cls, **kwargs):
"""
Meta allows you to add meta data to site
:params **kwargs:
meta keys we're expecting:
title (str)
description (str)
url (str) (Will pick it up by itself if not set)
image (str)
site_name (str) (but can pick it up from config file)
object_type (str)
keywords (list)
locale (str)
**Boolean By default these keys are True
use_opengraph
use_twitter
use_googleplus
"""
_name_ = "META"
meta_data = cls._context.get(_name_, {})
for k, v in kwargs.items():
# Prepend/Append string
if (k.endswith("__prepend") or k.endswith("__append")) \
and isinstance(v, str):
k, position = k.split("__", 2)
_v = meta_data.get(k, "")
if position == "prepend":
v += _v
elif position == "append":
v = _v + v
if k == "keywords" and not isinstance(k, list):
raise ValueError("Meta keyword must be a list")
meta_data[k] = v
cls.context_(_name_=meta_data)
@classmethod
def success_(cls, message):
"""
Set a flash success message
"""
flash(message, "success")
@classmethod
def error_(cls, message):
"""
Set a flash error message
"""
flash(message, "error")
@classmethod
def render(cls, data={}, view_template=None, layout=None, **kwargs):
"""
To render data to the associate template file of the action view
:param data: The context data to pass to the template
:param view_template: The file template to use. By default it will map the classname/action.html
:param layout: The body layout, must contain {% include __view_template__ %}
"""
if not view_template:
stack = inspect.stack()[1]
module = inspect.getmodule(cls).__name__
module_name = module.split(".")[-1]
action_name = stack[3] # The method being called in the class
view_name = cls.__name__ # The name of the class without View
if view_name.endswith("View"):
view_name = view_name[:-4]
view_template = "%s/%s.html" % (view_name, action_name)
data = data if data else dict()
data["__"] = cls._context if cls._context else {}
if kwargs:
data.update(kwargs)
data["__view_template__"] = view_template
return render_template(layout or cls.LAYOUT, **data)
class Mailer(object):
"""
A simple wrapper to switch between SES-Mailer and Flask-Mail based on config
"""
mail = None
provider = None
def init_app(self, app):
import ses_mailer
import flask_mail
self.app = app
self.provider = app.config.get("MAILER_BACKEND", "SES").upper()
if self.provider not in ["SES", "FLASK-MAIL"]:
raise AttributeError("Invalid Mail provider")
if self.provider == "SES":
self.mail = ses_mailer.Mail(app=app)
elif self.provider == "FLASK-MAIL":
self.mail = flask_mail.Mail(app)
def send(self, to, subject, body, reply_to=None, **kwargs):
"""
Send simple message
"""
if self.provider == "SES":
self.mail.send(to=to,
subject=subject,
body=body,
reply_to=reply_to,
**kwargs)
elif self.provider == "FLASK-MAIL":
msg = flask_mail.Message(recipients=to, subject=subject, body=body, reply_to=reply_to,
sender=self.app.config.get("MAIL_DEFAULT_SENDER"))
self.mail.send(msg)
def send_template(self, template, to, reply_to=None, **context):
"""
Send Template message
"""
if self.provider == "SES":
self.mail.send_template(template=template, to=to, reply_to=reply_to, **context)
elif self.provider == "FLASK-MAIL":
ses_mail = ses_mailer.Mail(app=self.app)
data = ses_mail.parse_template(template=template, **context)
msg = flask_mail.Message(recipients=to,
subject=data["subject"],
body=data["body"],
reply_to=reply_to,
sender=self.app.config.get("MAIL_DEFAULT_SENDER")
)
self.mail.send(msg)
class Storage(object):
store = None
def init_app(self, app):
import flask_store
type = app.config.get("STORAGE_BACKEND", "LOCAL")
if type == "S3":
provider = "flask_store.providers.s3.S3Provider"
elif type == "LOCAL":
provider = "flask_store.providers.local.LocalProvider"
else:
provider = app.config.get("STORAGE_BACKEND")
bucket = app.config.get("STORAGE_S3_BUCKET", "")
domain = app.config.get("STORAGE_DOMAIN", "https://s3.amazonaws.com/%s/" % bucket)
app.config.update({
"STORE_PROVIDER": provider,
"STORE_PATH": app.config.get("STORAGE_PATH"),
"STORE_URL_PREFIX": app.config.get("STORAGE_URL_PREFIX", "files"),
"STORE_DOMAIN": domain,
"STORE_S3_REGION": app.config.get("STORAGE_S3_REGION", "us-east-1"),
"STORE_S3_BUCKET": bucket,
"STORE_S3_ACCESS_KEY": app.config.get("AWS_ACCESS_KEY_ID"),
"STORE_S3_SECRET_KEY": app.config.get("AWS_SECRET_ACCESS_KEY")
})
self.store = flask_store.Store(app=app)
def get_url(self, file, absolute=False):
provider = self.store.Provider(file)
return provider.absolute_url if absolute else provider.relative_url
def get_path(self, file, absolute=False):
provider = self.store.Provider(file)
return provider.absolute_path if absolute else provider.relative_path
def get(self):
pass
def put(self, file):
provider = self.store.Provider(file)
provider.save()
return dict(filename=provider.filename,
relative_url=provider.relative_url,
absolute_url=provider.absolute_url,
absolute_path=provider.absolute_path)
def exists(self, file):
provider = self.store.Provider(file)
return provider.exists()
class Cache(object):
pass
class Session(object):
def __init__(self, app):
self.app = app
# SESSION
store = None
backend = self.app.config.get("SESSION_BACKEND")
if backend:
backend = backend.upper()
if backend == "REDIS":
uri = self.app.config.get("SESSION_BACKEND_URI")
_redis = utils.connect_redis(uri)
store = RedisStore(_redis)
if store:
KVSessionExtension(store, self.app)
class AppError(Exception):
""" For exception in application pages """
pass
# ------------------------------------------------------------------------------
# Setup facade
mailer = Mailer()
storage = Storage()
cache = Cache()
Pylot.bind_(Session)
Pylot.bind_(mailer.init_app)
Pylot.bind_(storage.init_app)
|
mit
| 3,641,058,281,966,772,000 | 29.516373 | 104 | 0.526042 | false |
csdms/bob
|
bob/build_environment.py
|
1
|
1669
|
import os
from ConfigParser import SafeConfigParser
import warnings
class BuilderEnvironment(object):
"""Set up environment
"""
required_options = set(['cmake', 'wget', 'pkg_config', 'tar', 'xz', 'svn'])
def __init__(self, cfg_file='bob.cfg', environ={}):
self._environ = environ
self._environ.update(self.read(cfg_file))
def warn_missing_options(self, opts):
missing = self.required_options - set(opts)
if len(missing) > 0:
warnings.warn('%s: missing required option(s)' %
', '.join(missing))
def warn_unknown_options(self, opts):
unknowns = set(opts) - self.required_options
if len(unknowns) > 0:
warnings.warn('%s: unrecognized option(s)' % ', '.join(unknowns))
def read(self, cfg_file):
parser = SafeConfigParser()
parser.optionxform = str
with open(cfg_file, 'r') as cfg:
parser.readfp(cfg)
prog_paths = {}
try:
paths = parser.items('bob')
except NoSectionError:
warnings.warn('%s: not a bob cfg file.' % cfg_file)
else:
self.warn_missing_options(parser.options('bob'))
for prog in parser.options('bob'):
try:
prog_paths[prog] = parser.get('bob', prog)
except (NoSectionError, NoOptionError):
prog_paths[prog] = prog
return prog_paths
def tobash(self):
lines = []
for item in self._environ.items():
lines.append('export %s="%s"' % item)
lines.append('')
return os.linesep.join(lines)
|
mit
| -2,572,499,518,844,303,000 | 30.490566 | 79 | 0.55003 | false |
saebyn/django-classifieds
|
classifieds/utils.py
|
1
|
13685
|
"""
"""
from PIL import Image
import HTMLParser
import string
import re
import os.path
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext as _
from django.core.paginator import Paginator, InvalidPage
from django.template.loader import get_template
from django.template import TemplateDoesNotExist, RequestContext
from django.forms import ValidationError
from django import forms
from django.http import HttpResponse
from django.forms.fields import EMPTY_VALUES
from classifieds.conf import settings
from classifieds.search import SelectForm, searchForms
from classifieds.models import Ad, Field, Category, Pricing, PricingOptions
def category_template_name(category, page):
return os.path.join(u'classifieds/category',
category.template_prefix, page)
def render_category_page(request, category, page, context):
template_name = category_template_name(category, page)
try:
template = get_template(template_name)
except TemplateDoesNotExist:
template = get_template('classifieds/category/base/%s' % page)
context = RequestContext(request, context)
return HttpResponse(template.render(context))
def clean_adimageformset(self):
max_size = self.instance.category.images_max_size
for form in self.forms:
try:
if not hasattr(form.cleaned_data['full_photo'], 'file'):
continue
except:
continue
if form.cleaned_data['full_photo'].size > max_size:
raise forms.ValidationError(_(u'Maximum image size is %s KB') % \
str(max_size / 1024))
im = Image.open(form.cleaned_data['full_photo'].file)
allowed = self.instance.catoegy.images_allowed_formats
if allowed_formats.filter(format=im.format).count() == 0:
raise forms.ValidationError(
_(u'Your image must be in one of the following formats: ')\
+ ', '.join(allowed_formats.values_list('format',
flat=True)))
def context_sortable(request, ads, perpage=settings.ADS_PER_PAGE):
order = '-'
sort = 'expires_on'
page = 1
if 'perpage' in request.GET and request.GET['perpage'] != '':
perpage = int(request.GET['perpage'])
if 'order' in request.GET and request.GET['order'] != '':
if request.GET['order'] == 'desc':
order = '-'
elif request.GET['order'] == 'asc':
order = ''
if 'page' in request.GET:
page = int(request.GET['page'])
if 'sort' in request.GET and request.GET['sort'] != '':
sort = request.GET['sort']
if sort in ['created_on', 'expires_on', 'category', 'title']:
ads_sorted = ads.extra(select={'featured': """SELECT 1
FROM `classifieds_payment_options`
LEFT JOIN `classifieds_payment` ON `classifieds_payment_options`.`payment_id` = `classifieds_payment`.`id`
LEFT JOIN `classifieds_pricing` ON `classifieds_pricing`.`id` = `classifieds_payment`.`pricing_id`
LEFT JOIN `classifieds_pricingoptions` ON `classifieds_payment_options`.`pricingoptions_id` = `classifieds_pricingoptions`.`id`
WHERE `classifieds_pricingoptions`.`name` = %s
AND `classifieds_payment`.`ad_id` = `classifieds_ad`.`id`
AND `classifieds_payment`.`paid` =1
AND `classifieds_payment`.`paid_on` < NOW()
AND DATE_ADD( `classifieds_payment`.`paid_on` , INTERVAL `classifieds_pricing`.`length`
DAY ) > NOW()"""}, select_params=[PricingOptions.FEATURED_LISTING]).extra(order_by=['-featured', order + sort])
else:
ads_sorted = ads.extra(select=SortedDict([('fvorder', 'select value from classifieds_fieldvalue LEFT JOIN classifieds_field on classifieds_fieldvalue.field_id = classifieds_field.id where classifieds_field.name = %s and classifieds_fieldvalue.ad_id = classifieds_ad.id'), ('featured', """SELECT 1
FROM `classifieds_payment_options`
LEFT JOIN `classifieds_payment` ON `classifieds_payment_options`.`payment_id` = `classifieds_payment`.`id`
LEFT JOIN `classifieds_pricing` ON `classifieds_pricing`.`id` = `classifieds_payment`.`pricing_id`
LEFT JOIN `classifieds_pricingoptions` ON `classifieds_payment_options`.`pricingoptions_id` = `classifieds_pricingoptions`.`id`
WHERE `classifieds_pricingoptions`.`name` = %s
AND `classifieds_payment`.`ad_id` = `classifieds_ad`.`id`
AND `classifieds_payment`.`paid` =1
AND `classifieds_payment`.`paid_on` < NOW()
AND DATE_ADD( `classifieds_payment`.`paid_on` , INTERVAL `classifieds_pricing`.`length`
DAY ) > NOW()""")]), select_params=[sort, PricingOptions.FEATURED_LISTING]).extra(order_by=['-featured', order + 'fvorder'])
pager = Paginator(ads_sorted, perpage)
try:
page = pager.page(page)
except InvalidPage:
page = {'object_list': False}
can_sortby_list = []
sortby_list = ['created_on']
for category in Category.objects.filter(ad__in=ads.values('pk').query).distinct():
can_sortby_list += category.sortby_fields.split(',')
for category in Category.objects.filter(ad__in=ads.values('pk').query).distinct():
for fieldname, in category.field_set.values_list('name'):
if fieldname not in sortby_list and fieldname in can_sortby_list:
sortby_list.append(fieldname)
for fieldname, in Field.objects.filter(category=None).values_list('name'):
if fieldname not in sortby_list and fieldname in can_sortby_list:
sortby_list.append(fieldname)
return {'page': page, 'sortfields': sortby_list, 'no_results': False,
'perpage': perpage}
def prepare_sforms(fields, fields_left, post=None):
sforms = []
select_fields = {}
for field in fields:
if field.field_type == Field.SELECT_FIELD: # is select field
# add select field
options = field.options.split(',')
choices = zip(options, options)
choices.insert(0, ('', 'Any',))
form_field = forms.ChoiceField(label=field.label, required=False, help_text=field.help_text + u'\nHold ctrl or command on Mac for multiple selections.', choices=choices, widget=forms.SelectMultiple)
# remove this field from fields_list
fields_left.remove(field.name)
select_fields[field.name] = form_field
sforms.append(SelectForm.create(select_fields, post))
for sf in searchForms:
f = sf.create(fields, fields_left, post)
if f is not None:
sforms.append(f)
return sforms
class StrippingParser(HTMLParser.HTMLParser):
# These are the HTML tags that we will leave intact
valid_tags = ('b', 'i', 'br', 'p', 'strong', 'h1', 'h2', 'h3', 'em',
'span', 'ul', 'ol', 'li')
from htmlentitydefs import entitydefs # replace entitydefs from sgmllib
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.result = ""
self.endTagList = []
def handle_data(self, data):
if data:
self.result = self.result + data
def handle_charref(self, name):
self.result = "%s&#%s;" % (self.result, name)
def handle_entityref(self, name):
if name in self.entitydefs:
x = ';'
else:
# this breaks unstandard entities that end with ';'
x = ''
self.result = "%s&%s%s" % (self.result, name, x)
def handle_starttag(self, tag, attrs):
""" Delete all tags except for legal ones """
if tag in self.valid_tags:
self.result = self.result + '<' + tag
for k, v in attrs:
if string.lower(k[0:2]) != 'on' and \
string.lower(v[0:10]) != 'javascript':
self.result = '%s %s="%s"' % (self.result, k, v)
endTag = '</%s>' % tag
self.endTagList.insert(0, endTag)
self.result = self.result + '>'
def handle_endtag(self, tag):
if tag in self.valid_tags:
self.result = "%s</%s>" % (self.result, tag)
remTag = '</%s>' % tag
self.endTagList.remove(remTag)
def cleanup(self):
""" Append missing closing tags """
for j in range(len(self.endTagList)):
self.result = self.result + self.endTagList[j]
def strip(s):
""" Strip illegal HTML tags from string s """
parser = StrippingParser()
parser.feed(s)
parser.close()
parser.cleanup()
return parser.result
class TinyMCEWidget(forms.Textarea):
def __init__(self, *args, **kwargs):
attrs = kwargs.setdefault('attrs', {})
if 'class' not in attrs:
attrs['class'] = 'tinymce'
else:
attrs['class'] += ' tinymce'
super(TinyMCEWidget, self).__init__(*args, **kwargs)
class Media:
js = ('js/tiny_mce/tiny_mce.js', 'js/tinymce_forms.js',)
class TinyMCEField(forms.CharField):
def clean(self, value):
"""Validates max_length and min_length. Returns a Unicode object."""
if value in EMPTY_VALUES:
return u''
stripped_value = re.sub(r'<.*?>', '', value)
stripped_value = string.replace(stripped_value, ' ', ' ')
stripped_value = string.replace(stripped_value, '<', '<')
stripped_value = string.replace(stripped_value, '>', '>')
stripped_value = string.replace(stripped_value, '&', '&')
stripped_value = string.replace(stripped_value, '\n', '')
stripped_value = string.replace(stripped_value, '\r', '')
value_length = len(stripped_value)
value_length -= 1
if self.max_length is not None and value_length > self.max_length:
raise forms.ValidationError(self.error_messages['max_length'] % {'max': self.max_length, 'length': value_length})
if self.min_length is not None and value_length < self.min_length:
raise forms.ValidationError(self.error_messages['min_length'] % {'min': self.min_length, 'length': value_length})
return value
def field_list(instance):
class MockField:
def __init__(self, name, field_type, label, required, help_text, enable_wysiwyg, max_length):
self.name = name
self.field_type = field_type
self.label = label
self.required = required
self.help_text = help_text
self.enable_wysiwyg = enable_wysiwyg
self.max_length = max_length
title_field = MockField('title', Field.CHAR_FIELD, _('Title'), True, '', False, 100)
fields = [title_field] # all ads have titles
fields += list(instance.category.field_set.all())
fields += list(Field.objects.filter(category=None))
return fields
def fields_for_ad(instance):
# generate a sorted dict of fields corresponding to the Field model
# for the Ad instance
fields_dict = SortedDict()
fields = field_list(instance)
# this really, really should be refactored
for field in fields:
if field.field_type == Field.BOOLEAN_FIELD:
fields_dict[field.name] = forms.BooleanField(label=field.label, required=False, help_text=field.help_text)
elif field.field_type == Field.CHAR_FIELD:
widget = forms.TextInput
fields_dict[field.name] = forms.CharField(label=field.label, required=field.required, max_length=field.max_length, help_text=field.help_text, widget=widget)
elif field.field_type == Field.DATE_FIELD:
fields_dict[field.name] = forms.DateField(label=field.label, required=field.required, help_text=field.help_text)
elif field.field_type == Field.DATETIME_FIELD:
fields_dict[field.name] = forms.DateTimeField(label=field.label, required=field.required, help_text=field.help_text)
elif field.field_type == Field.EMAIL_FIELD:
fields_dict[field.name] = forms.EmailField(label=field.label, required=field.required, help_text=field.help_text)
elif field.field_type == Field.FLOAT_FIELD:
fields_dict[field.name] = forms.FloatField(label=field.label, required=field.required, help_text=field.help_text)
elif field.field_type == Field.INTEGER_FIELD:
fields_dict[field.name] = forms.IntegerField(label=field.label, required=field.required, help_text=field.help_text)
elif field.field_type == Field.TIME_FIELD:
fields_dict[field.name] = forms.TimeField(label=field.label, required=field.required, help_text=field.help_text)
elif field.field_type == Field.URL_FIELD:
fields_dict[field.name] = forms.URLField(label=field.label, required=field.required, help_text=field.help_text)
elif field.field_type == Field.SELECT_FIELD:
options = field.options.split(',')
fields_dict[field.name] = forms.ChoiceField(label=field.label, required=field.required, help_text=field.help_text, choices=zip(options, options))
elif field.field_type == Field.TEXT_FIELD:
if field.enable_wysiwyg:
widget = TinyMCEWidget
field_type = TinyMCEField
else:
widget = forms.Textarea
field_type = forms.CharField
fields_dict[field.name] = field_type(label=field.label,
required=field.required,
help_text=field.help_text,
max_length=field.max_length,
widget=widget)
else:
raise NotImplementedError(u'Unknown field type "%s"' % field.get_field_type_display())
return fields_dict
|
bsd-3-clause
| -3,965,156,459,822,403,000 | 41.632399 | 304 | 0.628864 | false |
rollasoul/ghosthugs
|
8266_server/8266_complex_high.py
|
1
|
3299
|
import socket
import time
import machine
led1 = machine.Pin(5, machine.Pin.OUT)
led2 = machine.Pin(4, machine.Pin.OUT)
adc = machine.ADC(0)
s = socket.socket()
host = "To Do: Enter ip-address of remote server"
port = 12344
counter = 0
while True:
try:
while True:
s = socket.socket()
s.connect((host, port))
time.sleep(1)
s.send("ready")
output = s.recv(2048)
print(output)
if "disconnect" in output:
s.close()
if counter == 1:
if "hi sis" in output:
p13 = machine.Pin(13)
pwm13 = machine.PWM(p13)
servo_s = machine.PWM(machine.Pin(13), freq=50)
p14 = machine.Pin(14)
pwm14 = machine.PWM(p14)
servo_b = machine.PWM(machine.Pin(14), freq=50)
p12 = machine.Pin(12)
pwm12 = machine.PWM(p12)
servo_a = machine.PWM(machine.Pin(12), freq=50)
servo_s.duty(30)
servo_b.duty(60)
servo_a.duty(100)
time.sleep(3)
servo_s.duty(50)
servo_a.duty(60)
servo_b.duty(50)
time.sleep(2)
servo_s.duty(70)
servo_a.duty(80)
servo_b.duty(30)
time.sleep(1)
counter = 0
elif "High five" in output:
p13 = machine.Pin(13)
pwm13 = machine.PWM(p13)
servo_s = machine.PWM(machine.Pin(13), freq=50)
p14 = machine.Pin(14)
pwm14 = machine.PWM(p14)
servo_b = machine.PWM(machine.Pin(14), freq=50)
p12 = machine.Pin(12)
pwm12 = machine.PWM(p12)
servo_a = machine.PWM(machine.Pin(12), freq=50)
servo_s.duty(30)
servo_b.duty(60)
servo_a.duty(100)
time.sleep(3)
servo_s.duty(50)
servo_a.duty(80)
servo_b.duty(60)
if adc.read() > 200:
for i in range(3):
led1.high()
time.sleep(0.1)
led1.low()
time.sleep(0.1)
led2.high()
time.sleep(0.1)
led2.low()
time.sleep(0.3)
else:
led1.high()
time.sleep(2)
servo_s.duty(70)
servo_a.duty(80)
servo_b.duty(30)
time.sleep(1)
counter = 0
else:
counter = 1
except:
pass
|
mit
| 7,458,516,790,863,299,000 | 38.27381 | 71 | 0.35344 | false |
blaze/dask
|
dask/dataframe/tests/test_indexing.py
|
2
|
19852
|
import pandas as pd
import numpy as np
import pytest
import dask
import dask.dataframe as dd
from dask.dataframe._compat import tm, PANDAS_GT_100
from dask.dataframe.indexing import _coerce_loc_index
from dask.dataframe.utils import assert_eq, make_meta, PANDAS_VERSION
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [3, 2, 1]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [0, 0, 0]}, index=[9, 9, 9]),
}
meta = make_meta({"a": "i8", "b": "i8"}, index=pd.Index([], "i8"))
d = dd.DataFrame(dsk, "x", meta, [0, 5, 9, 9])
full = d.compute()
CHECK_FREQ = {}
if dd._compat.PANDAS_GT_110:
CHECK_FREQ["check_freq"] = False
def test_loc():
assert d.loc[3:8].divisions[0] == 3
assert d.loc[3:8].divisions[-1] == 8
assert d.loc[5].divisions == (5, 5)
assert_eq(d.loc[5], full.loc[5:5])
assert_eq(d.loc[3:8], full.loc[3:8])
assert_eq(d.loc[:8], full.loc[:8])
assert_eq(d.loc[3:], full.loc[3:])
assert_eq(d.loc[[5]], full.loc[[5]])
expected_warning = FutureWarning
if not PANDAS_GT_100:
# removed in pandas 1.0
with pytest.warns(expected_warning):
assert_eq(d.loc[[3, 4, 1, 8]], full.loc[[3, 4, 1, 8]])
with pytest.warns(expected_warning):
assert_eq(d.loc[[3, 4, 1, 9]], full.loc[[3, 4, 1, 9]])
with pytest.warns(expected_warning):
assert_eq(d.loc[np.array([3, 4, 1, 9])], full.loc[np.array([3, 4, 1, 9])])
assert_eq(d.a.loc[5], full.a.loc[5:5])
assert_eq(d.a.loc[3:8], full.a.loc[3:8])
assert_eq(d.a.loc[:8], full.a.loc[:8])
assert_eq(d.a.loc[3:], full.a.loc[3:])
assert_eq(d.a.loc[[5]], full.a.loc[[5]])
if not PANDAS_GT_100:
# removed in pandas 1.0
with pytest.warns(expected_warning):
assert_eq(d.a.loc[[3, 4, 1, 8]], full.a.loc[[3, 4, 1, 8]])
with pytest.warns(expected_warning):
assert_eq(d.a.loc[[3, 4, 1, 9]], full.a.loc[[3, 4, 1, 9]])
with pytest.warns(expected_warning):
assert_eq(
d.a.loc[np.array([3, 4, 1, 9])], full.a.loc[np.array([3, 4, 1, 9])]
)
assert_eq(d.a.loc[[]], full.a.loc[[]])
assert_eq(d.a.loc[np.array([])], full.a.loc[np.array([])])
pytest.raises(KeyError, lambda: d.loc[1000])
assert_eq(d.loc[1000:], full.loc[1000:])
assert_eq(d.loc[-2000:-1000], full.loc[-2000:-1000])
assert sorted(d.loc[5].dask) == sorted(d.loc[5].dask)
assert sorted(d.loc[5].dask) != sorted(d.loc[6].dask)
def test_loc_non_informative_index():
df = pd.DataFrame({"x": [1, 2, 3, 4]}, index=[10, 20, 30, 40])
ddf = dd.from_pandas(df, npartitions=2, sort=True)
ddf.divisions = (None,) * 3
assert not ddf.known_divisions
ddf.loc[20:30].compute(scheduler="sync")
assert_eq(ddf.loc[20:30], df.loc[20:30])
df = pd.DataFrame({"x": [1, 2, 3, 4]}, index=[10, 20, 20, 40])
ddf = dd.from_pandas(df, npartitions=2, sort=True)
assert_eq(ddf.loc[20], df.loc[20:20])
def test_loc_with_text_dates():
A = dd._compat.makeTimeSeries().iloc[:5]
B = dd._compat.makeTimeSeries().iloc[5:]
s = dd.Series(
{("df", 0): A, ("df", 1): B},
"df",
A,
[A.index.min(), B.index.min(), B.index.max()],
)
assert s.loc["2000":"2010"].divisions == s.divisions
assert_eq(s.loc["2000":"2010"], s)
assert len(s.loc["2000-01-03":"2000-01-05"].compute()) == 3
def test_loc_with_series():
assert_eq(d.loc[d.a % 2 == 0], full.loc[full.a % 2 == 0])
assert sorted(d.loc[d.a % 2].dask) == sorted(d.loc[d.a % 2].dask)
assert sorted(d.loc[d.a % 2].dask) != sorted(d.loc[d.a % 3].dask)
def test_loc_with_array():
assert_eq(d.loc[(d.a % 2 == 0).values], full.loc[(full.a % 2 == 0).values])
assert sorted(d.loc[(d.a % 2).values].dask) == sorted(d.loc[(d.a % 2).values].dask)
assert sorted(d.loc[(d.a % 2).values].dask) != sorted(d.loc[(d.a % 3).values].dask)
def test_loc_with_function():
assert_eq(d.loc[lambda df: df["a"] > 3, :], full.loc[lambda df: df["a"] > 3, :])
def _col_loc_fun(_df):
return _df.columns.str.contains("b")
assert_eq(d.loc[:, _col_loc_fun], full.loc[:, _col_loc_fun])
def test_loc_with_array_different_partition():
df = pd.DataFrame(
np.random.randn(20, 5),
index=list("abcdefghijklmnopqrst"),
columns=list("ABCDE"),
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc[(ddf.A > 0).values], df.loc[(df.A > 0).values])
with pytest.raises(ValueError):
ddf.loc[(ddf.A > 0).repartition(["a", "g", "k", "o", "t"]).values]
def test_loc_with_series_different_partition():
df = pd.DataFrame(
np.random.randn(20, 5),
index=list("abcdefghijklmnopqrst"),
columns=list("ABCDE"),
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc[ddf.A > 0], df.loc[df.A > 0])
assert_eq(
ddf.loc[(ddf.A > 0).repartition(["a", "g", "k", "o", "t"])], df.loc[df.A > 0]
)
def test_loc2d():
# index indexer is always regarded as slice for duplicated values
assert_eq(d.loc[5, "a"], full.loc[5:5, "a"])
# assert_eq(d.loc[[5], 'a'], full.loc[[5], 'a'])
assert_eq(d.loc[5, ["a"]], full.loc[5:5, ["a"]])
# assert_eq(d.loc[[5], ['a']], full.loc[[5], ['a']])
assert_eq(d.loc[3:8, "a"], full.loc[3:8, "a"])
assert_eq(d.loc[:8, "a"], full.loc[:8, "a"])
assert_eq(d.loc[3:, "a"], full.loc[3:, "a"])
assert_eq(d.loc[[8], "a"], full.loc[[8], "a"])
assert_eq(d.loc[3:8, ["a"]], full.loc[3:8, ["a"]])
assert_eq(d.loc[:8, ["a"]], full.loc[:8, ["a"]])
assert_eq(d.loc[3:, ["a"]], full.loc[3:, ["a"]])
# 3d
with pytest.raises(pd.core.indexing.IndexingError):
d.loc[3, 3, 3]
# Series should raise
with pytest.raises(pd.core.indexing.IndexingError):
d.a.loc[3, 3]
with pytest.raises(pd.core.indexing.IndexingError):
d.a.loc[3:, 3]
with pytest.raises(pd.core.indexing.IndexingError):
d.a.loc[d.a % 2 == 0, 3]
@pytest.mark.skip(PANDAS_GT_100, reason="Removed in pandas 1.0")
def test_loc2d_some_missing():
with pytest.warns(FutureWarning):
assert_eq(d.loc[[3, 4, 3], ["a"]], full.loc[[3, 4, 3], ["a"]])
def test_loc2d_with_known_divisions():
df = pd.DataFrame(
np.random.randn(20, 5),
index=list("abcdefghijklmnopqrst"),
columns=list("ABCDE"),
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc["a", "A"], df.loc[["a"], "A"])
assert_eq(ddf.loc["a", ["A"]], df.loc[["a"], ["A"]])
assert_eq(ddf.loc["a":"o", "A"], df.loc["a":"o", "A"])
assert_eq(ddf.loc["a":"o", ["A"]], df.loc["a":"o", ["A"]])
assert_eq(ddf.loc[["n"], ["A"]], df.loc[["n"], ["A"]])
assert_eq(ddf.loc[["a", "c", "n"], ["A"]], df.loc[["a", "c", "n"], ["A"]])
assert_eq(ddf.loc[["t", "b"], ["A"]], df.loc[["t", "b"], ["A"]])
assert_eq(
ddf.loc[["r", "r", "c", "g", "h"], ["A"]],
df.loc[["r", "r", "c", "g", "h"], ["A"]],
)
def test_loc2d_with_unknown_divisions():
df = pd.DataFrame(
np.random.randn(20, 5),
index=list("abcdefghijklmnopqrst"),
columns=list("ABCDE"),
)
ddf = dd.from_pandas(df, 3)
ddf.divisions = (None,) * len(ddf.divisions)
assert ddf.known_divisions is False
assert_eq(ddf.loc["a", "A"], df.loc[["a"], "A"])
assert_eq(ddf.loc["a", ["A"]], df.loc[["a"], ["A"]])
assert_eq(ddf.loc["a":"o", "A"], df.loc["a":"o", "A"])
assert_eq(ddf.loc["a":"o", ["A"]], df.loc["a":"o", ["A"]])
def test_loc2d_duplicated_columns():
df = pd.DataFrame(
np.random.randn(20, 5),
index=list("abcdefghijklmnopqrst"),
columns=list("AABCD"),
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc["a", "A"], df.loc[["a"], "A"])
assert_eq(ddf.loc["a", ["A"]], df.loc[["a"], ["A"]])
assert_eq(ddf.loc["j", "B"], df.loc[["j"], "B"])
assert_eq(ddf.loc["j", ["B"]], df.loc[["j"], ["B"]])
assert_eq(ddf.loc["a":"o", "A"], df.loc["a":"o", "A"])
assert_eq(ddf.loc["a":"o", ["A"]], df.loc["a":"o", ["A"]])
assert_eq(ddf.loc["j":"q", "B"], df.loc["j":"q", "B"])
assert_eq(ddf.loc["j":"q", ["B"]], df.loc["j":"q", ["B"]])
assert_eq(ddf.loc["a":"o", "B":"D"], df.loc["a":"o", "B":"D"])
assert_eq(ddf.loc["a":"o", "B":"D"], df.loc["a":"o", "B":"D"])
assert_eq(ddf.loc["j":"q", "B":"A"], df.loc["j":"q", "B":"A"])
assert_eq(ddf.loc["j":"q", "B":"A"], df.loc["j":"q", "B":"A"])
assert_eq(ddf.loc[ddf.B > 0, "B"], df.loc[df.B > 0, "B"])
assert_eq(ddf.loc[ddf.B > 0, ["A", "C"]], df.loc[df.B > 0, ["A", "C"]])
def test_getitem():
df = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"B": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"C": [True, False, True] * 3,
},
columns=list("ABC"),
)
ddf = dd.from_pandas(df, 2)
assert_eq(ddf["A"], df["A"])
# check cache consistency
tm.assert_series_equal(ddf["A"]._meta, ddf._meta["A"])
assert_eq(ddf[["A", "B"]], df[["A", "B"]])
tm.assert_frame_equal(ddf[["A", "B"]]._meta, ddf._meta[["A", "B"]])
assert_eq(ddf[ddf.C], df[df.C])
tm.assert_series_equal(ddf.C._meta, ddf._meta.C)
assert_eq(ddf[ddf.C.repartition([0, 2, 5, 8])], df[df.C])
pytest.raises(KeyError, lambda: df["X"])
pytest.raises(KeyError, lambda: df[["A", "X"]])
pytest.raises(AttributeError, lambda: df.X)
# not str/unicode
df = pd.DataFrame(np.random.randn(10, 5))
ddf = dd.from_pandas(df, 2)
assert_eq(ddf[0], df[0])
assert_eq(ddf[[1, 2]], df[[1, 2]])
pytest.raises(KeyError, lambda: df[8])
pytest.raises(KeyError, lambda: df[[1, 8]])
def test_getitem_slice():
df = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"B": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"C": [True, False, True] * 3,
},
index=list("abcdefghi"),
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf["a":"e"], df["a":"e"])
assert_eq(ddf["a":"b"], df["a":"b"])
assert_eq(ddf["f":], df["f":])
def test_getitem_integer_slice():
df = pd.DataFrame({"A": range(6)})
ddf = dd.from_pandas(df, 2)
# integer slicing is iloc based
with pytest.raises(NotImplementedError):
ddf[1:3]
df = pd.DataFrame({"A": range(6)}, index=[1.0, 2.0, 3.0, 5.0, 10.0, 11.0])
ddf = dd.from_pandas(df, 2)
# except for float dtype indexes
assert_eq(ddf[2:8], df[2:8])
assert_eq(ddf[2:], df[2:])
assert_eq(ddf[:8], df[:8])
def test_loc_on_numpy_datetimes():
df = pd.DataFrame(
{"x": [1, 2, 3]}, index=list(map(np.datetime64, ["2014", "2015", "2016"]))
)
a = dd.from_pandas(df, 2)
a.divisions = list(map(np.datetime64, a.divisions))
assert_eq(a.loc["2014":"2015"], a.loc["2014":"2015"])
def test_loc_on_pandas_datetimes():
df = pd.DataFrame(
{"x": [1, 2, 3]}, index=list(map(pd.Timestamp, ["2014", "2015", "2016"]))
)
a = dd.from_pandas(df, 2)
a.divisions = list(map(pd.Timestamp, a.divisions))
assert_eq(a.loc["2014":"2015"], a.loc["2014":"2015"])
def test_loc_datetime_no_freq():
# https://github.com/dask/dask/issues/2389
datetime_index = pd.date_range("2016-01-01", "2016-01-31", freq="12h")
datetime_index.freq = None # FORGET FREQUENCY
df = pd.DataFrame({"num": range(len(datetime_index))}, index=datetime_index)
ddf = dd.from_pandas(df, npartitions=1)
slice_ = slice("2016-01-03", "2016-01-05")
result = ddf.loc[slice_, :]
expected = df.loc[slice_, :]
assert_eq(result, expected)
def test_coerce_loc_index():
for t in [pd.Timestamp, np.datetime64]:
assert isinstance(_coerce_loc_index([t("2014")], "2014"), t)
def test_loc_timestamp_str():
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
# partial string slice
assert_eq(df.loc["2011-01-02"], ddf.loc["2011-01-02"])
assert_eq(df.loc["2011-01-02":"2011-01-10"], ddf.loc["2011-01-02":"2011-01-10"])
# same reso, dask result is always DataFrame
assert_eq(
df.loc["2011-01-02 10:00"].to_frame().T,
ddf.loc["2011-01-02 10:00"],
**CHECK_FREQ
)
# series
assert_eq(df.A.loc["2011-01-02"], ddf.A.loc["2011-01-02"], **CHECK_FREQ)
assert_eq(
df.A.loc["2011-01-02":"2011-01-10"],
ddf.A.loc["2011-01-02":"2011-01-10"],
**CHECK_FREQ
)
# slice with timestamp (dask result must be DataFrame)
assert_eq(
df.loc[pd.Timestamp("2011-01-02")].to_frame().T,
ddf.loc[pd.Timestamp("2011-01-02")],
**CHECK_FREQ
)
assert_eq(
df.loc[pd.Timestamp("2011-01-02") : pd.Timestamp("2011-01-10")],
ddf.loc[pd.Timestamp("2011-01-02") : pd.Timestamp("2011-01-10")],
**CHECK_FREQ
)
assert_eq(
df.loc[pd.Timestamp("2011-01-02 10:00")].to_frame().T,
ddf.loc[pd.Timestamp("2011-01-02 10:00")],
**CHECK_FREQ
)
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="M", periods=100),
)
ddf = dd.from_pandas(df, 50)
assert_eq(df.loc["2011-01"], ddf.loc["2011-01"])
assert_eq(df.loc["2011"], ddf.loc["2011"])
assert_eq(df.loc["2011-01":"2012-05"], ddf.loc["2011-01":"2012-05"])
assert_eq(df.loc["2011":"2015"], ddf.loc["2011":"2015"])
# series
assert_eq(df.B.loc["2011-01"], ddf.B.loc["2011-01"])
assert_eq(df.B.loc["2011"], ddf.B.loc["2011"])
assert_eq(df.B.loc["2011-01":"2012-05"], ddf.B.loc["2011-01":"2012-05"])
assert_eq(df.B.loc["2011":"2015"], ddf.B.loc["2011":"2015"])
def test_getitem_timestamp_str():
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
# partial string slice
assert_eq(df["2011-01-02"], ddf["2011-01-02"])
assert_eq(df["2011-01-02":"2011-01-10"], df["2011-01-02":"2011-01-10"])
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="D", periods=100),
)
ddf = dd.from_pandas(df, 50)
assert_eq(df["2011-01"], ddf["2011-01"])
assert_eq(df["2011"], ddf["2011"])
assert_eq(df["2011-01":"2012-05"], ddf["2011-01":"2012-05"])
assert_eq(df["2011":"2015"], ddf["2011":"2015"])
def test_loc_period_str():
# .loc with PeriodIndex doesn't support partial string indexing
# https://github.com/pydata/pandas/issues/13429
pass
def test_getitem_period_str():
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.period_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
# partial string slice
assert_eq(df["2011-01-02"], ddf["2011-01-02"])
assert_eq(df["2011-01-02":"2011-01-10"], df["2011-01-02":"2011-01-10"])
# same reso, dask result is always DataFrame
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.period_range("2011-01-01", freq="D", periods=100),
)
ddf = dd.from_pandas(df, 50)
assert_eq(df["2011-01"], ddf["2011-01"])
assert_eq(df["2011"], ddf["2011"])
assert_eq(df["2011-01":"2012-05"], ddf["2011-01":"2012-05"])
assert_eq(df["2011":"2015"], ddf["2011":"2015"])
def test_to_series():
# Test for time index
df = pd.DataFrame(
{"A": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_series(), ddf.index.to_series())
# Test for numerical index
df = pd.DataFrame({"A": np.random.randn(100)}, index=range(100))
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_series(), ddf.index.to_series())
def test_to_frame():
# Test for time index
df = pd.DataFrame(
{"A": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_frame(), ddf.index.to_frame())
# Test for numerical index
df = pd.DataFrame({"A": np.random.randn(100)}, index=range(100))
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_frame(), ddf.index.to_frame())
@pytest.mark.skipif(PANDAS_VERSION < "0.24.0", reason="No renaming for index")
def test_to_frame_name():
# Test for time index
df = pd.DataFrame(
{"A": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_frame(name="foo"), ddf.index.to_frame(name="foo"))
# Test for numerical index
df = pd.DataFrame({"A": np.random.randn(100)}, index=range(100))
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_frame(name="bar"), ddf.index.to_frame(name="bar"))
@pytest.mark.parametrize("indexer", [0, [0], [0, 1], [1, 0], [False, True, True]])
def test_iloc(indexer):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
ddf = dd.from_pandas(df, 2)
result = ddf.iloc[:, indexer]
expected = df.iloc[:, indexer]
assert_eq(result, expected)
def test_iloc_series():
s = pd.Series([1, 2, 3])
ds = dd.from_pandas(s, 2)
with pytest.raises(AttributeError):
ds.iloc[:]
def test_iloc_raises():
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
ddf = dd.from_pandas(df, 2)
with pytest.raises(NotImplementedError):
ddf.iloc[[0, 1], :]
with pytest.raises(NotImplementedError):
ddf.iloc[[0, 1], [0, 1]]
with pytest.raises(ValueError):
ddf.iloc[[0, 1], [0, 1], [1, 2]]
with pytest.raises(IndexError):
ddf.iloc[:, [5, 6]]
def test_iloc_duplicate_columns():
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
ddf = dd.from_pandas(df, 2)
df.columns = ["A", "A", "C"]
ddf.columns = ["A", "A", "C"]
selection = ddf.iloc[:, 2]
# Check that `iloc` is called instead of getitem
assert any([key.startswith("iloc") for key in selection.dask.layers.keys()])
select_first = ddf.iloc[:, 1]
assert_eq(select_first, df.iloc[:, 1])
select_zeroth = ddf.iloc[:, 0]
assert_eq(select_zeroth, df.iloc[:, 0])
select_list_cols = ddf.iloc[:, [0, 2]]
assert_eq(select_list_cols, df.iloc[:, [0, 2]])
select_negative = ddf.iloc[:, -1:-3:-1]
assert_eq(select_negative, df.iloc[:, -1:-3:-1])
def test_iloc_dispatch_to_getitem():
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
ddf = dd.from_pandas(df, 2)
selection = ddf.iloc[:, 2]
assert all([not key.startswith("iloc") for key in selection.dask.layers.keys()])
assert any([key.startswith("getitem") for key in selection.dask.layers.keys()])
select_first = ddf.iloc[:, 1]
assert_eq(select_first, df.iloc[:, 1])
select_zeroth = ddf.iloc[:, 0]
assert_eq(select_zeroth, df.iloc[:, 0])
select_list_cols = ddf.iloc[:, [0, 2]]
assert_eq(select_list_cols, df.iloc[:, [0, 2]])
select_negative = ddf.iloc[:, -1:-3:-1]
assert_eq(select_negative, df.iloc[:, -1:-3:-1])
def test_iloc_out_of_order_selection():
df = pd.DataFrame({"A": [1] * 100, "B": [2] * 100, "C": [3] * 100, "D": [4] * 100})
ddf = dd.from_pandas(df, 2)
ddf = ddf[["C", "A", "B"]]
a = ddf.iloc[:, 0]
b = ddf.iloc[:, 1]
c = ddf.iloc[:, 2]
assert a.name == "C"
assert b.name == "A"
assert c.name == "B"
a1, b1, c1 = dask.compute(a, b, c)
assert a1.name == "C"
assert b1.name == "A"
assert c1.name == "B"
|
bsd-3-clause
| -2,490,895,421,420,393,500 | 30.511111 | 87 | 0.545487 | false |
foursquare/pants
|
contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/test_scrooge_gen_integration.py
|
1
|
3904
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class ScroogeGenTest(PantsRunIntegrationTest):
@classmethod
def hermetic(cls):
return True
def run_pants(self, command, config=None, stdin_data=None, extra_env=None, **kwargs):
full_config = {
'GLOBAL': {
'pythonpath': ["%(buildroot)s/contrib/scrooge/src/python"],
'backend_packages': ["pants.backend.codegen", "pants.backend.jvm", "pants.contrib.scrooge"]
},
'scala': { 'version': '2.11' },
'gen.scrooge': {
'service_deps': {
'java': [
'3rdparty:slf4j-api',
'3rdparty:thrift-0.6.1',
'3rdparty/jvm/com/twitter:finagle-thrift',
'3rdparty/jvm/com/twitter:scrooge-core',
],
'scala': [
'3rdparty:thrift-0.6.1',
'3rdparty/jvm/com/twitter:finagle-thrift',
'3rdparty/jvm/com/twitter:scrooge-core',
],
},
'service_exports': {
'java': [
'3rdparty:thrift-0.6.1',
],
'scala': [
'3rdparty:thrift-0.6.1',
'3rdparty/jvm/com/twitter:finagle-thrift',
'3rdparty/jvm/com/twitter:scrooge-core',
]
},
'structs_deps': {
'java': [
'3rdparty:thrift-0.6.1',
'3rdparty/jvm/com/twitter:scrooge-core',
],
'scala': [
'3rdparty:thrift-0.6.1',
'3rdparty/jvm/com/twitter:scrooge-core',
],
},
'structs_exports': {
'java': [
'3rdparty:thrift-0.6.1',
'3rdparty/jvm/com/twitter:scrooge-core',
],
'scala': [
'3rdparty:thrift-0.6.1',
'3rdparty/jvm/com/twitter:scrooge-core',
],
}
}
}
if config:
for scope, scoped_cfgs in config.items():
updated = full_config.get(scope, {})
updated.update(scoped_cfgs)
full_config[scope] = updated
return super(ScroogeGenTest, self).run_pants(command, full_config, stdin_data, extra_env,
**kwargs)
@staticmethod
def thrift_test_target(name):
return 'contrib/scrooge/tests/thrift/org/pantsbuild/contrib/scrooge/scrooge_gen:' + name
def test_good(self):
# scrooge_gen should pass with correct thrift files.
cmd = ['gen', self.thrift_test_target('good-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
def test_exports_of_thrift(self):
# Compiling against a thrift service with strict_deps=True should work
# because the necessary transitive dependencies will be exported.
cmd = ['compile', 'contrib/scrooge/tests/scala/org/pantsbuild/contrib/scrooge/scrooge_gen']
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
def test_namespace_map(self):
# scrooge_gen should pass with namespace_map specified
cmd = ['gen', self.thrift_test_target('namespace-map-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
def test_default_java_namespace(self):
# scrooge_gen should pass with default_java_namespace specified
cmd = ['gen', self.thrift_test_target('default-java-namespace-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
def test_include_paths(self):
# scrooge_gen should pass with include_paths specified
cmd = ['gen', self.thrift_test_target('include-paths-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
|
apache-2.0
| -673,519,894,424,633,000 | 34.490909 | 99 | 0.587859 | false |
opendatakosovo/bpo
|
app/mod_api/views.py
|
1
|
4818
|
from bson import json_util
from flask import Blueprint, render_template, request
from flask import Response
from datetime import datetime
from app import mongo
from app import utils
import json
mod_api = Blueprint('api', __name__, url_prefix='/api')
@mod_api.route('/', methods=['GET'])
def index():
''' Renders the App index page.
:return:
'''
return render_template('mod_importer/index.html')
@mod_api.route('/search', methods=['POST'])
def search():
params = request.json
# Format date
if 'date' in params:
params['to_date'] = datetime.strptime(params['date'].split('---')[1], '%m-%d-%Y')
params['from_date'] = datetime.strptime(params['date'].split('---')[0], '%m-%d-%Y')
result = {}
result['stats'] = utils.get_stats(params)
result['monthly-stats'] = utils.get_monthly_incidents_stats(params)
result['quarterly-stats'] = utils.get_quarterly_incidents_stats(params)
result['rank-stats'] = utils.get_rank_stats(params)
result['incident-stats'] = utils.get_incidents_stats(params)
result['violence-types'] = utils.get_violence_types(params)
result['daily-stats'] = utils.get_incident_types_by_time(params)
result['top-3'] = utils.get_top_3_stats(params)
result['map-victims-count'] = utils.get_map_victims_count(params)
result['census'] = utils.get_census_info(params)
result['raw-incident-stats'] = utils.get_raw_incidents(params)
result['rank-download-stats'] = utils.get_download_stats(params)
resp = Response(
response=json_util.dumps(result),
mimetype='application/json')
return resp
@mod_api.route('/get_total_victims_number/<string:type>/<string:date>/<string:violence_type>/<string:name>', methods=['GET'])
def get_victims(type, date=None, violence_type=None, name=None):
" Get incidents number based on given params."
if violence_type:
violence_type = violence_type.replace('-', '/')
if date:
from_date = datetime.strptime(date.split('---')[0], '%m-%d-%Y')
to_date = datetime.strptime(date.split('---')[1], '%m-%d-%Y')
match = None
group = None
if name != 'Bangladesh':
match = {
"$match": {
type: {
"$nin": [
""
],
"$in": [
name
]
},
'violence_type': {
"$in": [
str(violence_type)
]
},
"incident_date": {"$gte": from_date, "$lte": to_date}
}
}
else:
match = {
"$match": {
type: {
"$nin": [
""
]
},
"incident_date": {"$gte": from_date, "$lte": to_date}
}
}
if type == 'division':
group = {
"$group": {
"_id": {
'division': '$district'
},
"incidents": {
"$sum": 1
}
}
}
else:
group = {
"$group": {
"_id": {
type: '$' + type
},
"incidents": {
"$sum": 1
}
}
}
sort = {
"$sort": {
"incidents": -1
}
}
project = {
"$project": {
"_id": 0,
type: "$_id." + type,
"incidents": "$incidents"
}
}
aggregation = [match, group, sort, project]
result = mongo.db.mgr.aggregate(aggregation)
resp = Response(
response=json_util.dumps(result['result']),
mimetype='application/json')
return resp
@mod_api.route('/<string:dataset>/get/violence-types', methods=['GET', 'POST'])
def get_violence_types(dataset):
"Get all the violence types based on the given dataset."
violence_types = mongo.db[dataset].distinct('violence_type')
resp = Response(
response=json_util.dumps(violence_types),
mimetype='application/json')
return resp
@mod_api.route('/census/<string:name>/<int:level>', methods=['GET', 'POST'])
def get_census_info(name, level):
"Get census info based on the given Division, District, Upazila."
census_info = None
if level == 0:
census_info = mongo.db.census.find_one({"division": name})
elif level == 1:
census_info = mongo.db.census.find_one({"district": name})
elif level == 2:
census_info = mongo.db.census.find_one({"upazila": name})
resp = Response(
response=json_util.dumps(census_info),
mimetype='application/json')
return resp
|
cc0-1.0
| -4,381,790,031,525,156,000 | 29.5 | 125 | 0.512661 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.