text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# coding:utf-8
import logging
import numpy as np
from scipy.linalg import svd
from mla.base import BaseEstimator
np.random.seed(1000)
class PCA(BaseEstimator):
y_required = False
def __init__(self, n_components, solver="svd"):
"""Principal component analysis (PCA) implementation.
Transforms a dataset of possibly correlated values into n linearly
uncorrelated components. The components are ordered such that the first
has the largest possible variance and each following component as the
largest possible variance given the previous components. This causes
the early components to contain most of the variability in the dataset.
Parameters
----------
n_components : int
solver : str, default 'svd'
{'svd', 'eigen'}
"""
self.solver = solver
self.n_components = n_components
self.components = None
self.mean = None
def fit(self, X, y=None):
self.mean = np.mean(X, axis=0)
self._decompose(X)
def _decompose(self, X):
# Mean centering
X = X.copy()
X -= self.mean
if self.solver == "svd":
_, s, Vh = svd(X, full_matrices=True)
elif self.solver == "eigen":
s, Vh = np.linalg.eig(np.cov(X.T))
Vh = Vh.T
s_squared = s ** 2
variance_ratio = s_squared / s_squared.sum()
logging.info("Explained variance ratio: %s" % (variance_ratio[0: self.n_components]))
self.components = Vh[0: self.n_components]
def transform(self, X):
X = X.copy()
X -= self.mean
return np.dot(X, self.components.T)
def _predict(self, X=None):
return self.transform(X)
|
rushter/MLAlgorithms
|
mla/pca.py
|
Python
|
mit
| 1,758 | 0.000569 |
"""
Add user scren name to whitelist if it is not to be unfollowed
"""
whitelist = [
]
|
kimgea/simple_twitter_functions
|
twitter_functions/twitter/whitelisted_users.py
|
Python
|
mit
| 119 | 0.016807 |
#!/usr/bin/python
# parsing state representing a subgraph
# initialized with dependency graph
#
from __future__ import absolute_import
import copy,sys,re
import cPickle
from parser import *
from common.util import *
from constants import *
from common.SpanGraph import SpanGraph
from common.AMRGraph import *
import numpy as np
class ActionError(Exception):
pass
class ActionTable(dict):
'''to do'''
def add_action(self,action_name):
key = len(self.keys())+1
self[key] = action_name
class GraphState(object):
"""
Starting from dependency graph, each state represents subgraph in parsing process
Indexed by current node being handled
"""
sent = None
#abt_tokens = None
deptree = None
action_table = None
#new_actions = None
sentID = 0
gold_graph = None
model = None
verbose = None
def __init__(self,sigma,A):
self.sigma = sigma
self.idx = self.sigma.top()
self.cidx = None
self.beta = None
#self.beta = Buffer(A.nodes[self.idx].children[:]) if self.idx != -1 else None
#self.cidx = self.beta.top()
#if self.beta:
# self.cidx = self.beta.top()
#else:
# self.cidx = None
self.A = A
self.action_history = []
#self.left_label_set = set([])
#self._init_atomics()
@staticmethod
def init_state(instance,verbose=0):
depGraph = SpanGraph.init_dep_graph(instance,instance.tokens)
#depGraph.pre_merge_netag(instance)
seq = []
#if instance.sentID == 104:
# import pdb
# pdb.set_trace()
for r in sorted(depGraph.multi_roots,reverse=True): seq += depGraph.postorder(root=r)
#seq = uniqify(seq)
seq.append(-1)
sigma = Buffer(seq)
sigma.push(START_ID)
GraphState.text = instance.text
GraphState.sent = instance.tokens
#GraphState.abt_tokens = {}
GraphState.gold_graph = instance.gold_graph
if GraphState.gold_graph: GraphState.gold_graph.abt_node_table = {}
GraphState.deptree = depGraph
GraphState.sentID = instance.comment['id'] if instance.comment and 'id' in instance.comment else GraphState.sentID + 1
GraphState.verbose = verbose
if verbose > 1:
print >> sys.stderr,"Sentence ID:%s, initial sigma:%s" % (GraphState.sentID,sigma)
return GraphState(sigma,copy.deepcopy(depGraph))
@staticmethod
def init_action_table(actions):
actionTable = ActionTable()
for act_type,act_str in actions:
actionTable[act_type] = act_str
#GraphState.new_actions = set()
GraphState.action_table = actionTable
def _init_atomics(self):
"""
atomics features for the initial state
"""
# first parent of current node
sp1 = GraphState.sent[self.A.nodes[self.idx].parents[0]] if self.A.nodes[self.idx].parents else NOT_ASSIGNED
# immediate left sibling, immediate right sibling and second right sibling
if sp1 != NOT_ASSIGNED and len(self.A.nodes[sp1['id']].children) > 1:
children = self.A.nodes[sp1['id']].children
idx_order = sorted(children).index(self.idx)
slsb = GraphState.sent[children[idx_order-1]] if idx_order > 0 else NOT_ASSIGNED
srsb = GraphState.sent[children[idx_order+1]] if idx_order < len(children)-1 else NOT_ASSIGNED
sr2sb = GraphState.sent[children[idx_order+2]] if idx_order < len(children)-2 else NOT_ASSIGNED
else:
slsb = EMPTY
srsb = EMPTY
sr2sb = EMPTY
'''
# left first parent of current node
slp1 = GraphState.sent[self.A.nodes[self.idx].parents[0]] if self.A.nodes[self.idx].parents and self.A.nodes[self.idx].parents[0] < self.idx else NOT_ASSIGNED
# right last child of current child
brc1 = GraphState.sent[self.deptree.nodes[self.cidx].children[-1]] if self.cidx and self.A.nodes[self.cidx].children and self.A.nodes[self.cidx].children[-1] > self.cidx else NOT_ASSIGNED
# left first parent of current child
blp1 = GraphState.sent[self.A.nodes[self.cidx].parents[0]] if self.cidx and self.A.nodes[self.cidx].parents and self.A.nodes[self.cidx].parents[0] < self.cidx else NOT_ASSIGNED
'''
self.atomics = [{'id':tok['id'],
'form':tok['form'],
'lemma':tok['lemma'],
'pos':tok['pos'],
'ne':tok['ne'],
'rel':tok['rel'] if 'rel' in tok else EMPTY,
'sp1':sp1,
'slsb':slsb,
'srsb':srsb,
'sr2sb':sr2sb
}
for tok in GraphState.sent] # atomic features for current state
def pcopy(self):
return cPickle.loads(cPickle.dumps(self,-1))
def is_terminal(self):
"""done traverse the graph"""
return self.idx == -1
def is_permissible(self,action):
#TODO
return True
def is_possible_align(self,currentIdx,goldIdx,ref_graph):
'''
tmp_state = self.pcopy()
oracle = __import__("oracle").DetOracleSC()
next_action,label = oracle.give_ref_action(tmp_state,ref_graph)
while tmp_state.beta:
next_action['edge_label'] = label
tmp_state = tmp_state.apply(next_action)
next_action,label = oracle.give_ref_action(tmp_state,ref_graph)
'''
#ref_children = [ref_graph.abt_node_table[c] if c in ref_graph.abt_node_table else c for c in ref_graph.nodes[goldIdx].children]
#return len(set(self.A.nodes[currentIdx].children) & set(ref_children)) > 1 or self.A.nodes[currentIdx].words[0][0].lower() == goldIdx
if self.A.nodes[currentIdx].words[0].lower() in prep_list:
return False
return True
def get_current_argset(self):
if self.idx == START_ID:
return set([])
currentIdx = self.idx
currentNode = self.get_current_node()
currentGraph = self.A
# record the core arguments current node(predicate) have
return set(currentGraph.get_edge_label(currentIdx,c) for c in currentNode.children if currentGraph.get_edge_label(currentIdx,c).startswith('ARG'))
def get_possible_actions(self,train):
if self.idx == START_ID:
return [{'type':NEXT2}]
actions = []
currentIdx = self.idx
currentChildIdx = self.cidx
currentNode = self.get_current_node()
currentChild = self.get_current_child()
currentGraph = self.A
token_label_set = GraphState.model.token_label_set
token_to_concept_table = GraphState.model.token_to_concept_table
tag_codebook = GraphState.model.tag_codebook
if isinstance(currentIdx,int):
current_tok_lemma = ','.join(tok['lemma'] for tok in GraphState.sent if tok['id'] in range(currentNode.start,currentNode.end))
current_tok_form = ','.join(tok['form'] for tok in GraphState.sent if tok['id'] in range(currentNode.start,currentNode.end))
current_tok_ne = GraphState.sent[currentIdx]['ne']
else:
current_tok_form = ABT_TOKEN['form']
current_tok_lemma = ABT_TOKEN['lemma'] #if currentIdx != START_ID else START_TOKEN['lemma']
current_tok_ne = ABT_TOKEN['ne'] #if currentIdx != START_ID else START_TOKEN['ne']
#if self.action_history and self.action_history[-1]['type'] in [REPLACEHEAD,NEXT2,DELETENODE] and currentNode.num_parent_infer_in_chain < 3 and currentNode.num_parent_infer == 0:
#actions.extend([{'type':INFER,'tag':z} for z in tag_codebook['ABTTag'].labels()])
if currentChildIdx: # beta not empty
#all_candidate_edge_labels = GraphState.model.rel_codebook.labels()
#if current_tok_lemma in token_label_set:
# all_candidate_edge_labels.extend(list(token_label_set[current_tok_lemma]))
#elif current_tok_ne not in ['O','NUMBER']:
# all_candidate_edge_labels.extend(list(token_label_set[current_tok_ne]))
#all_candidate_tags.extend(GraphState.model.tag_codebook['ETag'].labels())
#else:
# all_candidate_tags.append(current_tok_lemma) # for decoding
if currentChildIdx == START_ID:
if currentNode.num_parent_infer_in_chain < 3 and currentNode.num_parent_infer == 0:
actions = [{'type':NEXT1},{'type':INFER}]
else:
actions = [{'type':NEXT1}]
return actions
if currentIdx != 0: # not root
if not currentChild.SWAPPED:
#actions.extend([{'type':MERGE},{'type':REPLACEHEAD}])
##actions.extend([{'type':NEXT1,'edge_label':y} for y in all_candidate_edge_labels])
#actions.append({'type':NEXT1})
#else:
#actions.extend([{'type':MERGE},{'type':REPLACEHEAD},{'type':SWAP}])
#actions.append({'type':NEXT1})
##if len(currentChild.parents) > 1:
##actions.append({'type':REATTACH,'parent_to_attach':None}) # this equals delete edge
actions.append({'type':SWAP})
actions.extend([{'type':REATTACH,'parent_to_attach':p} for p in currentGraph.get_possible_parent_constrained(currentIdx,currentChildIdx)])
#actions.extend([{'type':NEXT1,'edge_label':y} for y in all_candidate_edge_labels])
if isinstance(currentIdx,int) and isinstance(currentChildIdx,int):
actions.append({'type':MERGE})
actions.extend([{'type':NEXT1},{'type':REPLACEHEAD}])
actions.extend({'type':REENTRANCE,'parent_to_add':x} for x in currentGraph.get_possible_reentrance_constrained(currentIdx,currentChildIdx))
else:
actions.extend([{'type':NEXT1}])
#if len(currentChild.parents) > 1:
#actions.append({'type':REATTACH,'parent_to_attach':None}) # this equals delete edge
actions.extend([{'type':REATTACH,'parent_to_attach':p} for p in currentGraph.get_possible_parent_constrained(currentIdx,currentChildIdx)])
#actions.extend({'type':ADDCHILD,'child_type':x} for x in currentGraph.get_possible_children_unconstrained(currentIdx))
else:
all_candidate_tags = []
# MOD
if current_tok_lemma in token_to_concept_table:
all_candidate_tags.extend(list(token_to_concept_table[current_tok_lemma]))
#all_candidate_tags.append(current_tok_lemma.lower())
elif isinstance(currentIdx,int) and (current_tok_ne not in ['O','NUMBER'] or currentNode.end - currentNode.start > 1):
all_candidate_tags.extend(list(token_to_concept_table[current_tok_ne]))
#all_candidate_tags.append(current_tok_lemma.lower())
#all_candidate_tags.extend(GraphState.model.tag_codebook['ETag'].labels())
elif current_tok_lemma == ABT_TOKEN['lemma']:
#all_candidate_tags.extend(tag_codebook['ABTTag'].labels())
pass
#all_candidate_tags.extend(currentGraph.nodes[currentIdx].tag)
else:
all_candidate_tags.append(current_tok_lemma.lower()) # for decoding
if isinstance(currentIdx,int) and 'frmset' in GraphState.sent[currentIdx] \
and GraphState.sent[currentIdx]['frmset'] not in all_candidate_tags:
all_candidate_tags.append(GraphState.sent[currentIdx]['frmset'])
if not currentNode.children and currentIdx != 0:
actions.append({'type':DELETENODE})
actions.append({'type':NEXT2})
actions.extend({'type':NEXT2,'tag':z} for z in all_candidate_tags)
return actions
def get_node_context(self,idx):
# first parent of current node
if self.A.nodes[idx].parents:
p1 = GraphState.sent[self.A.nodes[idx].parents[0]] if isinstance(self.A.nodes[idx].parents[0],int) else ABT_TOKEN
p1_brown_repr = BROWN_CLUSTER[p1['form']]
p1['brown4'] = p1_brown_repr[:4] if len(p1_brown_repr) > 3 else p1_brown_repr
p1['brown6'] = p1_brown_repr[:6] if len(p1_brown_repr) > 5 else p1_brown_repr
p1['brown10'] = p1_brown_repr[:10] if len(p1_brown_repr) > 9 else p1_brown_repr
p1['brown20'] = p1_brown_repr[:20] if len(p1_brown_repr) > 19 else p1_brown_repr
else:
p1 = NOT_ASSIGNED
if isinstance(idx,int):
prs1 = GraphState.sent[idx-1] if idx > 0 else NOT_ASSIGNED
prs2 = GraphState.sent[idx-2] if idx > 1 else NOT_ASSIGNED
else:
prs1 = ABT_TOKEN
prs2 = ABT_TOKEN
# immediate left sibling, immediate right sibling and second right sibling
if p1 != NOT_ASSIGNED and len(self.A.nodes[self.A.nodes[idx].parents[0]].children) > 1:
children = self.A.nodes[self.A.nodes[idx].parents[0]].children
idx_order = sorted(children).index(idx)
if idx_order > 0:
lsb = GraphState.sent[children[idx_order-1]] if isinstance(children[idx_order-1],int) else ABT_TOKEN
else:
lsb = NOT_ASSIGNED
if idx_order < len(children)-1:
rsb = GraphState.sent[children[idx_order+1]] if isinstance(children[idx_order+1],int) else ABT_TOKEN
else:
rsb = NOT_ASSIGNED
if idx_order < len(children)-2:
r2sb = GraphState.sent[children[idx_order+2]] if isinstance(children[idx_order+2],int) else ABT_TOKEN
else:
r2sb = NOT_ASSIGNED
else:
lsb = EMPTY
rsb = EMPTY
r2sb = EMPTY
return prs2,prs1,p1,lsb,rsb,r2sb
def get_feature_context_window(self,action):
"""context window for current node and its child"""
def isprep(token):
return token['pos'] == 'IN' and token['rel'] == 'prep'
def delta_func(tag_to_predict,tok_form):
if isinstance(tag_to_predict,(ConstTag,ETag)):
return 'ECTag'
else:
tok_form = tok_form.lower()
tag_lemma = tag_to_predict.split('-')[0]
if tag_lemma == tok_form:
return '1'
i=0
slength = len(tag_lemma) if len(tag_lemma) < len(tok_form) else len(tok_form)
while i < slength and tag_lemma[i] == tok_form[i]:
i += 1
if i == 0:
return '0'
elif tok_form[i:]:
return tok_form[i:]
elif tag_lemma[i:]:
return tag_lemma[i:]
else:
assert False
s0_atomics = GraphState.sent[self.idx].copy() if isinstance(self.idx,int) else ABT_TOKEN#GraphState.abt_tokens[self.idx]
s0_brown_repr = BROWN_CLUSTER[s0_atomics['form']]
s0_atomics['brown4'] = s0_brown_repr[:4] if len(s0_brown_repr) > 3 else s0_brown_repr
s0_atomics['brown6'] = s0_brown_repr[:6] if len(s0_brown_repr) > 5 else s0_brown_repr
s0_atomics['brown8'] = s0_brown_repr[:8] if len(s0_brown_repr) > 7 else s0_brown_repr
s0_atomics['brown10'] = s0_brown_repr[:10] if len(s0_brown_repr) > 9 else s0_brown_repr
s0_atomics['brown20'] = s0_brown_repr[:20] if len(s0_brown_repr) > 19 else s0_brown_repr
#s0_atomics['pfx'] = s0_atomics['form'][:4] if len(s0_atomics['form']) > 3 else s0_atomics['form']
sprs2,sprs1,sp1,slsb,srsb,sr2sb=self.get_node_context(self.idx)
s0_atomics['prs1']=sprs1
s0_atomics['prs2']=sprs2
s0_atomics['p1']=sp1
s0_atomics['lsb']=slsb
s0_atomics['rsb']=srsb
s0_atomics['r2sb']=sr2sb
s0_atomics['len']=self.A.nodes[self.idx].end - self.A.nodes[self.idx].start if isinstance(self.idx,int) else NOT_ASSIGNED
#s0_atomics['cap']=s0_atomics['form'].istitle()
s0_atomics['dch']=sorted([GraphState.sent[j]['form'].lower() if isinstance(j,int) else ABT_FORM for j in self.A.nodes[self.idx].del_child])
s0_atomics['reph']=sorted([GraphState.sent[j]['form'].lower() if isinstance(j,int) else ABT_FORM for j in self.A.nodes[self.idx].rep_parent])
#s0_atomics['nech'] = len(set(GraphState.sent[j]['ne'] if isinstance(j,int) else ABT_NE for j in self.A.nodes[self.idx].children) & INFER_NETAG) > 0
#s0_atomics['isnom'] = s0_atomics['lemma'] in NOMLIST
core_args = set([self.A.get_edge_label(self.idx,child) for child in self.A.nodes[self.idx].children if self.A.get_edge_label(self.idx,child).startswith('ARG') and child != self.cidx])
s0_atomics['lsl']=str(sorted(core_args)) # core argument
s0_atomics['arg0']='ARG0' in core_args
s0_atomics['arg1']='ARG1' in core_args
s0_atomics['arg2']='ARG2' in core_args
# prop feature
s0_atomics['frmset']=GraphState.sent[self.idx]['frmset'] if isinstance(self.idx,int) and 'frmset' in GraphState.sent[self.idx] else NOT_ASSIGNED
# mod here
# next2 specific features
if not self.cidx:
if 'tag' in action: # next2
tag_to_predict = action['tag']
s0_atomics['eqfrmset'] = s0_atomics['frmset'] == tag_to_predict if s0_atomics['frmset'] is not NOT_ASSIGNED else NOT_ASSIGNED
s0_atomics['txv'] = len(tag_to_predict.split('-'))==2
s0_atomics['txn'] = isinstance(tag_to_predict,ETag)
s0_atomics['txdelta'] = delta_func(tag_to_predict,s0_atomics['form'])
else:
s0_atomics['txv'] = NOT_ASSIGNED
s0_atomics['txn'] = NOT_ASSIGNED
s0_atomics['txdelta'] = NOT_ASSIGNED
s0_atomics['eqfrmset'] = NOT_ASSIGNED
s0_atomics['isleaf'] = len(self.A.nodes[self.idx].children) == 0
else:
s0_atomics['txv'] = NOT_APPLY
s0_atomics['txn'] = NOT_APPLY
s0_atomics['txdelta'] = NOT_APPLY
s0_atomics['eqfrmset'] = NOT_APPLY
s0_atomics['isleaf'] = NOT_APPLY
s0_args = None
s0_prds = None
if isinstance(self.idx,int) and GraphState.sent[self.idx].get('args',{}):
s0_args = GraphState.sent[self.idx]['args']
if isinstance(self.idx,int) and GraphState.sent[self.idx].get('pred',{}):
s0_prds = GraphState.sent[self.idx]['pred']
if self.cidx and self.cidx != START_ID:
b0_atomics = GraphState.sent[self.cidx].copy() if isinstance(self.cidx,int) else ABT_TOKEN #GraphState.abt_tokens[self.cidx]
b0_brown_repr = BROWN_CLUSTER[b0_atomics['form']]
b0_atomics['brown4'] = b0_brown_repr[:4] if len(b0_brown_repr) > 3 else b0_brown_repr
b0_atomics['brown6'] = b0_brown_repr[:6] if len(b0_brown_repr) > 5 else b0_brown_repr
b0_atomics['brown8'] = b0_brown_repr[:8] if len(b0_brown_repr) > 7 else b0_brown_repr
b0_atomics['brown10'] = b0_brown_repr[:10] if len(b0_brown_repr) > 9 else b0_brown_repr
b0_atomics['brown20'] = b0_brown_repr[:20] if len(b0_brown_repr) > 19 else b0_brown_repr
b0_atomics['concept'] = self.A.nodes[self.cidx].tag
bprs2,bprs1,bp1,blsb,brsb,br2sb = self.get_node_context(self.cidx)
b0_atomics['prs1']=bprs1
b0_atomics['prs2']=bprs2
b0_atomics['p1']=bp1
b0_atomics['lsb']=blsb
b0_atomics['rsb']=brsb
b0_atomics['r2sb']=br2sb
b0_atomics['nswp']=self.A.nodes[self.cidx].num_swap
b0_atomics['reph']=sorted([GraphState.sent[rp]['form'] if isinstance(rp,int) else ABT_FORM for rp in self.A.nodes[self.cidx].rep_parent])
b0_atomics['len']=self.A.nodes[self.cidx].end - self.A.nodes[self.cidx].start if isinstance(self.cidx,int) else NOT_ASSIGNED
b0_atomics['dch']=sorted([GraphState.sent[j]['form'].lower() if isinstance(j,int) else ABT_FORM for j in self.A.nodes[self.cidx].del_child])
b0_atomics['eqne']=(s0_atomics['ne']==b0_atomics['ne'] and b0_atomics['ne'] in PRE_MERGE_NETAG)
b0_atomics['isne']=b0_atomics['ne'] in PRE_MERGE_NETAG
b0_atomics['hastrace'] = len(self.A.nodes[self.cidx].incoming_traces) > 0
# prop feature
b0_atomics['isarg']=self.cidx in s0_args if s0_args else NOT_ASSIGNED
b0_atomics['arglabel']=s0_args[self.cidx] if b0_atomics['isarg'] else NOT_ASSIGNED
b0_atomics['isprd']=self.cidx in s0_prds if s0_prds else NOT_ASSIGNED
b0_atomics['prdlabel']=s0_prds[self.cidx] if b0_atomics['isprd'] else NOT_ASSIGNED
if isinstance(self.cidx,int) and isinstance(self.idx,int):
path,direction = GraphState.deptree.get_path(self.cidx,self.idx)
if self.A.nodes[self.idx].end - self.A.nodes[self.idx].start > 1:
path_pos_str = [(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) for i in path[1:-1] if i not in range(self.A.nodes[self.idx].start,self.A.nodes[self.idx].end)]
path_x_str_pp = [('X','X') if not isprep(GraphState.sent[i]) else GraphState.sent[i]['form'] for i in path[1:-1] if i not in range(self.A.nodes[self.idx].start,self.A.nodes[self.idx].end)]
else:
path_pos_str = [(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) for i in path[1:-1]]
path_x_str_pp = [('X','X') if not isprep(GraphState.sent[i]) else GraphState.sent[i]['form'] for i in path[1:-1]]
path_pos_str.insert(0,GraphState.sent[path[0]]['rel'])
path_pos_str.append(GraphState.sent[path[-1]]['rel'])
path_x_str_pp.insert(0,GraphState.sent[path[0]]['rel'])
path_x_str_pp.append(GraphState.sent[path[-1]]['rel'])
b0_atomics['pathp'] = path_pos_str
b0_atomics['pathprep'] = path_x_str_pp
b0_atomics['pathpwd'] = str(path_pos_str) + direction
b0_atomics['pathprepwd'] = str(path_x_str_pp) + direction
else:
b0_atomics['pathp'] = EMPTY
b0_atomics['pathprep'] = EMPTY
b0_atomics['pathpwd'] = EMPTY
b0_atomics['pathprepwd'] = EMPTY
b0_atomics['apathx'] = EMPTY
b0_atomics['apathp'] = EMPTY
b0_atomics['apathprep'] = EMPTY
b0_atomics['apathxwd'] = EMPTY
b0_atomics['apathpwd'] = EMPTY
b0_atomics['apathprepwd'] = EMPTY
else:
b0_atomics = EMPTY
if action['type'] in [REATTACH,REENTRANCE]:
#child_to_add = action['child_to_add']
if action['type'] == REATTACH:
parent_to_attach = action['parent_to_attach']
else:
parent_to_attach = action['parent_to_add']
if parent_to_attach is not None:
a0_atomics = GraphState.sent[parent_to_attach].copy() if isinstance(parent_to_attach,int) else ABT_TOKEN #GraphState.abt_tokens[parent_to_attach]
a0_brown_repr = BROWN_CLUSTER[a0_atomics['form']]
a0_atomics['brown4'] = a0_brown_repr[:4] if len(a0_brown_repr) > 3 else a0_brown_repr
a0_atomics['brown6'] = a0_brown_repr[:6] if len(a0_brown_repr) > 5 else a0_brown_repr
a0_atomics['brown8'] = a0_brown_repr[:8] if len(a0_brown_repr) > 7 else a0_brown_repr
a0_atomics['brown10'] = a0_brown_repr[:10] if len(a0_brown_repr) > 9 else a0_brown_repr
a0_atomics['brown20'] = a0_brown_repr[:20] if len(a0_brown_repr) > 19 else a0_brown_repr
a0_atomics['concept'] = self.A.nodes[parent_to_attach].tag
aprs2,aprs1,ap1,alsb,arsb,ar2sb = self.get_node_context(parent_to_attach)
a0_atomics['p1']=ap1
a0_atomics['lsb']=alsb
a0_atomics['rsb']=arsb
a0_atomics['r2sb']=ar2sb
a0_atomics['nswp']=self.A.nodes[parent_to_attach].num_swap
a0_atomics['isne']=a0_atomics['ne'] is not 'O'
itr = list(self.A.nodes[self.cidx].incoming_traces)
tr = [t for r,t in itr]
a0_atomics['istrace'] = parent_to_attach in tr if len(tr) > 0 else EMPTY
a0_atomics['rtr'] = itr[tr.index(parent_to_attach)][0] if parent_to_attach in tr else EMPTY
a0_atomics['hasnsubj'] = b0_atomics['rel'] in set(GraphState.sent[c]['rel'] for c in self.A.nodes[parent_to_attach].children if isinstance(c,int))
#a0_atomics['iscycle'] = parent_to_attach in self.A.nodes[self.cidx].children or parent_to_attach in self.A.nodes[self.cidx].parents
# prop feature
b0_prds = None
b0_args = None
if isinstance(self.cidx,int) and GraphState.sent[self.cidx].get('pred',{}):
b0_prds = GraphState.sent[self.cidx]['pred']
if isinstance(self.cidx,int) and GraphState.sent[self.cidx].get('args',{}):
b0_args = GraphState.sent[self.cidx]['args']
a0_atomics['isprd']=parent_to_attach in b0_prds if b0_prds else NOT_ASSIGNED
a0_atomics['prdlabel']=b0_prds[parent_to_attach] if a0_atomics['isprd'] else NOT_ASSIGNED
a0_atomics['isarg']=parent_to_attach in b0_args if b0_args else NOT_ASSIGNED
a0_atomics['arglabel']=b0_args[parent_to_attach] if a0_atomics['isarg'] else NOT_ASSIGNED
if isinstance(self.cidx,int) and isinstance(parent_to_attach,int):
path,direction = GraphState.deptree.get_path(self.cidx,parent_to_attach)
#path_x_str=[(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) for i in path[1:-1]]
if self.A.nodes[parent_to_attach].end - self.A.nodes[parent_to_attach].start > 1:
apath_x_str = [('X','X') for i in path[1:-1] if i not in range(self.A.nodes[parent_to_attach].start,self.A.nodes[parent_to_attach].end)]
apath_pos_str = [(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) for i in path[1:-1] if i not in range(self.A.nodes[parent_to_attach].start,self.A.nodes[parent_to_attach].end)]
apath_pos_str_pp = [(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) if not isprep(GraphState.sent[i]) else GraphState.sent[i]['form'] for i in path[1:-1] if i not in range(self.A.nodes[parent_to_attach].start,self.A.nodes[parent_to_attach].end)]
else:
apath_x_str = [('X','X') for i in path[1:-1]]
apath_pos_str = [(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) for i in path[1:-1]]
apath_pos_str_pp = [(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) if not isprep(GraphState.sent[i]) else GraphState.sent[i]['form'] for i in path[1:-1]]
apath_x_str.insert(0,GraphState.sent[path[0]]['rel'])
apath_x_str.append(GraphState.sent[path[-1]]['rel'])
apath_pos_str.insert(0,GraphState.sent[path[0]]['rel'])
apath_pos_str.append(GraphState.sent[path[-1]]['rel'])
apath_pos_str_pp.insert(0,GraphState.sent[path[0]]['rel'])
apath_pos_str_pp.append(GraphState.sent[path[-1]]['rel'])
#path_label_str = [GraphState.sent[i]['rel'] for i in path] # dependency label
#path_lemma_str.insert(0,GraphState.sent[path[0]]['rel'])
#path_lemma_str.append(GraphState.sent[path[-1]]['rel'])
b0_atomics['apathx'] = apath_x_str
b0_atomics['apathp'] = apath_pos_str
b0_atomics['apathprep'] = apath_pos_str_pp
b0_atomics['apathxwd'] = str(apath_x_str) + direction
b0_atomics['apathpwd'] = str(apath_pos_str) + direction
b0_atomics['apathprepwd'] = str(apath_pos_str_pp) + direction
#a0_atomics['pathl'] = path_label_str
else:
b0_atomics['pathp'] = EMPTY
b0_atomics['pathprep'] = EMPTY
b0_atomics['pathpwd'] = EMPTY
b0_atomics['pathprepwd'] = EMPTY
b0_atomics['apathx'] = EMPTY
b0_atomics['apathp'] = EMPTY
b0_atomics['apathprep'] = EMPTY
b0_atomics['apathxwd'] = EMPTY
b0_atomics['apathpwd'] = EMPTY
b0_atomics['apathprepwd'] = EMPTY
else:
a0_atomics = EMPTY
else:
a0_atomics = EMPTY
#a0_atomics = s0_atomics
'''
if action['type'] == REENTRANCE:
parent_to_add = action['parent_to_add']
itr = list(self.A.nodes[self.cidx].incoming_traces)
tr = [t for r,t in itr]
a0_atomics['istrace'] = parent_to_add in tr if len(tr) > 0 else EMPTY
#a0_atomics['rtr'] = itr[tr.index(parent_to_add)][0] if parent_to_add in tr else EMPTY
else:
a0_atomics = EMPTY
'''
if self.cidx == START_ID:
s0_atomics['nech'] = len(set(GraphState.sent[j]['ne'] if isinstance(j,int) else ABT_NE for j in self.A.nodes[self.idx].children) & INFER_NETAG) > 0
s0_atomics['isnom'] = s0_atomics['lemma'].lower() in NOMLIST
s0_atomics['concept']=self.A.nodes[self.idx].tag
if self.A.nodes[self.idx].children:
c1 = self.A.nodes[self.idx].children[0]
s0_atomics['c1lemma'] = GraphState.sent[c1]['lemma'].lower() if isinstance(c1,int) else ABT_LEMMA
s0_atomics['c1dl'] = GraphState.sent[c1]['rel'] if isinstance(c1,int) else ABT_LEMMA
else:
s0_atomics['c1lemma'] = EMPTY
s0_atomics['c1dl'] = EMPTY
else:
s0_atomics['c1lemma'] = NOT_APPLY#EMPTY
s0_atomics['concept'] = NOT_APPLY#EMPTY
s0_atomics['nech'] = NOT_APPLY#EMPTY
s0_atomics['isnom'] = NOT_APPLY#EMPTY
s0_atomics['c1dl'] = NOT_APPLY#EMPTY
'''
if action['type'] == REENTRANCE and 'parent_to_add' in action: # reattach
#child_to_add = action['child_to_add']
parent_to_add = action['parent_to_add']
r0_atomics = GraphState.sent[parent_to_add]
rprs2,rprs1,rp1,rlsb,rrsb,rr2sb = self.get_node_context(parent_to_add)
r0_atomics['p1']=rp1
r0_atomics['lsb']=rlsb
r0_atomics['rsb']=rrsb
r0_atomics['r2sb']=rr2sb
r0_atomics['nswp']=self.A.nodes[parent_to_add].num_swap
r0_atomics['isne']=r0_atomics['ne'] is not 'O'
#path,direction = self.A.get_path(self.cidx,parent_to_attach)
path,direction = GraphState.deptree.get_path(self.cidx,parent_to_attach)
#path_x_str=[(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) for i in path[1:-1]]
if self.A.nodes[parent_to_attach].end - self.A.nodes[parent_to_attach].start > 1:
apath_x_str = [('X','X') for i in path[1:-1] if i not in range(self.A.nodes[parent_to_attach].start,self.A.nodes[parent_to_attach].end)]
apath_pos_str = [(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) for i in path[1:-1] if i not in range(self.A.nodes[parent_to_attach].start,self.A.nodes[parent_to_attach].end)]
apath_pos_str_pp = [(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) if not isprep(GraphState.sent[i]) else GraphState.sent[i]['form'] for i in path[1:-1] if i not in range(self.A.nodes[parent_to_attach].start,self.A.nodes[parent_to_attach].end)]
else:
apath_x_str = [('X','X') for i in path[1:-1]]
apath_pos_str = [(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) for i in path[1:-1]]
apath_pos_str_pp = [(GraphState.sent[i]['pos'],GraphState.sent[i]['rel']) if not isprep(GraphState.sent[i]) else GraphState.sent[i]['form'] for i in path[1:-1]]
apath_x_str.insert(0,GraphState.sent[path[0]]['rel'])
apath_x_str.append(GraphState.sent[path[-1]]['rel'])
apath_pos_str.insert(0,GraphState.sent[path[0]]['rel'])
apath_pos_str.append(GraphState.sent[path[-1]]['rel'])
apath_pos_str_pp.insert(0,GraphState.sent[path[0]]['rel'])
apath_pos_str_pp.append(GraphState.sent[path[-1]]['rel'])
#path_label_str = [GraphState.sent[i]['rel'] for i in path] # dependency label
#path_lemma_str.insert(0,GraphState.sent[path[0]]['rel'])
#path_lemma_str.append(GraphState.sent[path[-1]]['rel'])
b0_atomics['apathx'] = apath_x_str
b0_atomics['apathp'] = apath_pos_str
b0_atomics['apathprep'] = apath_pos_str_pp
b0_atomics['apathxwd'] = str(apath_x_str) + direction
b0_atomics['apathpwd'] = str(apath_pos_str) + direction
b0_atomics['apathprepwd'] = str(apath_pos_str_pp) + direction
#a0_atomics['pathl'] = path_label_str
else:
a0_atomics = EMPTY
'''
return (s0_atomics,b0_atomics,a0_atomics)
def get_gold_edge_graph(self):
gold_edge_graph = copy.deepcopy(self.A)
parsed_tuples = gold_edge_graph.tuples()
gold_tuples = self.gold_graph.tuples()
for t_tuple in parsed_tuples:
if t_tuple in gold_tuples:
gold_arc_label = self.gold_graph.get_edge_label(t_tuple[0],t_tuple[1])
gold_edge_graph.set_edge_label(t_tuple[0],t_tuple[1],gold_arc_label)
return gold_edge_graph
def get_gold_tag_graph(self):
gold_tag_graph = copy.deepcopy(self.A)
for nid in gold_tag_graph.nodes.keys()[:]:
if nid in self.gold_graph.nodes:
gold_tag_label = self.gold_graph.get_node_tag(nid)
gold_tag_graph.set_node_tag(nid,gold_tag_label)
return gold_tag_graph
def get_gold_label_graph(self):
gold_label_graph = copy.deepcopy(self.A)
parsed_tuples = gold_label_graph.tuples()
gold_tuples = self.gold_graph.tuples()
for t_tuple in parsed_tuples:
if t_tuple in gold_tuples:
gold_arc_label = self.gold_graph.get_edge_label(t_tuple[0],t_tuple[1])
gold_label_graph.set_edge_label(t_tuple[0],t_tuple[1],gold_arc_label)
gold_tag_label1 = self.gold_graph.get_node_tag(t_tuple[0])
gold_label_graph.set_node_tag(t_tuple[0],gold_tag_label1)
gold_tag_label2 = self.gold_graph.get_node_tag(t_tuple[1])
gold_label_graph.set_node_tag(t_tuple[1],gold_tag_label2)
return gold_label_graph
def evaluate(self):
num_correct_arcs = .0
num_correct_labeled_arcs = .0
parsed_tuples = self.A.tuples()
if self.verbose > 1:
print >> sys.stderr, 'Parsed tuples:'+str(parsed_tuples)
num_parsed_arcs = len(parsed_tuples)
gold_tuples = self.gold_graph.tuples()
num_gold_arcs = len(gold_tuples)
num_correct_tags = .0
num_parsed_tags = .0
num_gold_tags = .0
visited_nodes = set()
for t_tuple in parsed_tuples:
p,c = t_tuple
p_p,c_p = p,c
if p in self.A.abt_node_table: p = self.A.abt_node_table[p]
if c in self.A.abt_node_table: c = self.A.abt_node_table[c]
if p_p not in visited_nodes:
visited_nodes.add(p_p)
p_tag = self.A.get_node_tag(p_p)
if p in self.gold_graph.nodes:
g_p_tag = self.gold_graph.get_node_tag(p)
if p_tag == g_p_tag:# and not (isinstance(g_p_tag,(ETag,ConstTag)) or re.match('\w+-\d+',g_p_tag)): #and isinstance(g_p_tag,(ETag,ConstTag)):
num_correct_tags += 1.0
else:
self.A.nodes_error_table[p_p]=NODE_TYPE_ERROR
else:
self.A.nodes_error_table[p_p]=NODE_MATCH_ERROR
# if p_tag == NULL_TAG:
# num_correct_tags += 1.0
if c_p not in visited_nodes:
visited_nodes.add(c_p)
c_tag = self.A.get_node_tag(c_p)
if c in self.gold_graph.nodes:
g_c_tag = self.gold_graph.get_node_tag(c)
if c_tag == g_c_tag:# and not (isinstance(g_c_tag,(ETag,ConstTag)) or re.match('\w+-\d+',g_c_tag)): #and isinstance(g_c_tag,(ETag,ConstTag)):
num_correct_tags += 1.0
else:
self.A.nodes_error_table[c_p]=NODE_TYPE_ERROR
else:
self.A.nodes_error_table[c_p]=NODE_MATCH_ERROR
#else:
# if c_tag == NULL_TAG:
# num_correct_tags += 1.0
if (p,c) in gold_tuples:
num_correct_arcs += 1.0
parsed_arc_label = self.A.get_edge_label(p_p,c_p)
gold_arc_label = self.gold_graph.get_edge_label(p,c)
if parsed_arc_label == gold_arc_label:
num_correct_labeled_arcs += 1.0
else:
self.A.edges_error_table[(p_p,c_p)]=EDGE_TYPE_ERROR
else:
self.A.edges_error_table[(p_p,c_p)]=EDGE_MATCH_ERROR
#num_parsed_tags = len([i for i in visited_nodes if re.match('\w+-\d+',self.A.get_node_tag(i))])
#num_gold_tags = len([j for j in self.gold_graph.nodes if re.match('\w+-\d+',self.gold_graph.get_node_tag(j))])
#num_parsed_tags = len([i for i in visited_nodes if isinstance(self.A.get_node_tag(i),(ETag,ConstTag))])
#num_gold_tags = len([j for j in self.gold_graph.nodes if isinstance(self.gold_graph.get_node_tag(j),(ETag,ConstTag))])
#num_parsed_tags = len([i for i in visited_nodes if not (isinstance(self.A.get_node_tag(i),(ETag,ConstTag)) or re.match('\w+-\d+',self.A.get_node_tag(i)))])
#num_gold_tags = len([j for j in self.gold_graph.nodes if not (isinstance(self.gold_graph.get_node_tag(j),(ETag,ConstTag)) or re.match('\w+-\d+',self.gold_graph.get_node_tag(j)))])
num_parsed_tags = len(visited_nodes)
num_gold_tags = len(self.gold_graph.nodes)
return num_correct_labeled_arcs,num_correct_arcs,num_parsed_arcs,num_gold_arcs,num_correct_tags,num_parsed_tags,num_gold_tags
'''
def evaluate_actions(self,gold_state):
gold_act_seq = gold_state.action_history
parsed_act_seq = self.action_history
confusion_matrix = np.zeros(shape=(len(GraphState.action_table),len(GraphState.action_table)))
edge_label_count = defaultdict(float)
# chop out the longer one
common_step = len(gold_act_seq) if len(gold_act_seq) <= len(parsed_act_seq) else len(parsed_act_seq)
for i in range(common_step):
g_act = gold_act_seq[i]
p_act = parsed_act_seq[i]
confusion_matrix[g_act['type'],p_act['type']]+=1
if g_act['type'] == p_act['type'] and g_act['type'] in ACTION_WITH_EDGE:
if g_act == p_act:
edge_label_count[g_act['type']]+=1.0
#for j in range(confusion_matrix.shape[0]):
# if j in ACTION_WITH_EDGE:
# confusion_matrix[j,j] = edge_label_count[j]/confusion_matrix[j,j] if confusion_matrix[j,j] != 0.0 else 0.0
return confusion_matrix
'''
def get_score(self,act_type,feature,train=True):
act_idx = GraphState.model.class_codebook.get_index(act_type)
#if GraphState.model.weight[act_idx].shape[0] <= GraphState.model.feature_codebook[act_idx].size():
# GraphState.model.reshape_weight(act_idx)
weight = GraphState.model.weight[act_idx] if train else GraphState.model.avg_weight[act_idx]
feat_idx = map(GraphState.model.feature_codebook[act_idx].get_index,feature)
return np.sum(weight[ [i for i in feat_idx if i is not None] ],axis = 0)
def make_feat(self,action):
feat = GraphState.model.feats_generator(self,action)
return feat
def get_current_node(self):
return self.A.nodes[self.idx]
def get_current_child(self):
if self.cidx and self.cidx in self.A.nodes:
return self.A.nodes[self.cidx]
else:
return None
def apply(self,action):
action_type = action['type']
other_params = dict([(k,v) for k,v in action.items() if k!='type' and v is not None])
self.action_history.append(action)
return getattr(self,GraphState.action_table[action_type])(**other_params)
def next1(self, edge_label=None):
newstate = self.pcopy()
if edge_label and edge_label is not START_EDGE:newstate.A.set_edge_label(newstate.idx,newstate.cidx,edge_label)
newstate.beta.pop()
newstate.cidx = newstate.beta.top() if newstate.beta else None
#newstate.action_history.append(NEXT1)
return newstate
def next2(self, tag=None):
newstate = self.pcopy()
if tag: newstate.A.set_node_tag(newstate.idx,tag)
newstate.sigma.pop()
newstate.idx = newstate.sigma.top()
newstate.beta = Buffer(newstate.A.nodes[newstate.idx].children) if newstate.idx != -1 else None
if newstate.beta is not None: newstate.beta.push(START_ID)
newstate.cidx = newstate.beta.top() if newstate.beta else None
#newstate.action_history.append(NEXT2)
return newstate
def delete_node(self):
newstate = self.pcopy()
newstate.A.remove_node(newstate.idx,RECORD=True)
newstate.sigma.pop()
newstate.idx = newstate.sigma.top()
newstate.beta = Buffer(newstate.A.nodes[newstate.idx].children) if newstate.idx != -1 else None
if newstate.beta is not None: newstate.beta.push(START_ID)
newstate.cidx = newstate.beta.top() if newstate.beta else None
#newstate.action_history.append(DELETENODE)
return newstate
def infer(self, tag):
'''
infer abstract node on core noun
'''
newstate = self.pcopy()
abt_node_index = newstate.A.new_abt_node(newstate.idx,tag)
# add the atomic info from its core noun
#abt_atomics = {}
#abt_atomics['id'] = abt_node_index
#abt_atomics['form'] = ABT_FORM
#abt_atomics['lemma'] = ABT_LEMMA
#abt_atomics['pos'] = GraphState.sent[newstate.idx]['pos'] if isinstance(newstate.idx,int) else GraphState.abt_tokens[newstate.idx]['pos']
#abt_atomics['ne'] = GraphState.sent[newstate.idx]['ne'] if isinstance(newstate.idx,int) else GraphState.abt_tokens[newstate.idx]['ne']
#abt_atomics['rel'] = GraphState.sent[newstate.idx]['rel'] if isinstance(newstate.idx,int) else GraphState.abt_tokens[newstate.idx]['rel']
#GraphState.abt_tokens[abt_node_index] = abt_atomics
tmp = newstate.sigma.pop()
newstate.sigma.push(abt_node_index)
newstate.sigma.push(tmp)
return newstate
'''
infer abstract node on edge pair: may cause feature inconsistency
def infer1(self):
newstate = self.pcopy()
abt_node_index = newstate.A.new_abt_node(newstate.idx)
newstate.A.reattach_node(newstate.idx,newstate.cidx,abt_node_index,NULL_EDGE)
tmp = newstate.sigma.pop()
newstate.sigma.push(abt_node_index)
newstate.sigma.push(tmp)
newstate.beta.pop()
newstate.beta.append(abt_node_index)
newstate.cidx = newstate.beta.top() if newstate.beta else None
return newstate
'''
'''
def delete_edge(self):
newstate = self.pcopy()
newstate.A.remove_edge(newstate.idx,newstate.cidx)
#catm = self.atomics[newstate.cidx]
#cparents = sorted(newstate.A.nodes[self.cidx].parents)
#catm['blp1'] = GraphState.sent[cparents[0]] if cparents and cparents[0] < self.cidx else NOT_ASSIGNED
newstate.beta.pop()
newstate.cidx = newstate.beta.top() if newstate.beta else None
#newstate.action_history.append(DELETEEDGE)
return newstate
'''
def reattach(self,parent_to_attach=None,edge_label=None):
newstate = self.pcopy()
newstate.A.reattach_node(newstate.idx,newstate.cidx,parent_to_attach,edge_label)
newstate.beta.pop()
newstate.cidx = newstate.beta.top() if newstate.beta else None
return newstate
def swap(self,edge_label):
newstate = self.pcopy()
newstate.A.swap_head2(newstate.idx,newstate.cidx,newstate.sigma,edge_label)
newstate._fix_prop_feature(newstate.idx,newstate.cidx)
#newstate.idx = newstate.cidx
tmp = newstate.sigma.pop()
tmp1 = newstate.sigma.pop() if newstate.A.nodes[tmp].num_parent_infer > 0 else None
if newstate.cidx not in newstate.sigma: newstate.sigma.push(newstate.cidx)
if tmp1: newstate.sigma.push(tmp1)
newstate.sigma.push(tmp)
# TODO revisit
#newstate.beta.pop()
newstate.beta = Buffer([c for c in newstate.A.nodes[newstate.idx].children if c != newstate.cidx and c not in newstate.A.nodes[newstate.cidx].parents])
newstate.cidx = newstate.beta.top() if newstate.beta else None
#newstate.action_history.append(SWAP)
return newstate
'''
def change_head(self,goldParent):
newstate = self.pcopy()
newstate.A.remove_edge(newstate.idx,newstate.cidx)
newstate.A.add_edge(goldParent,newstate.cidx)
newstate.A.relativePos(newstate.cidx,goldParent)
'''
def reentrance(self,parent_to_add,edge_label=None):
newstate = self.pcopy()
#delnodes = newstate.A.clear_up(parent_to_add,newstate.cidx)
#for dn in delnodes:
# if dn in newstate.sigma:
# newstate.sigma.remove(dn)
if edge_label:
try:
newstate.A.add_edge(parent_to_add,newstate.cidx,edge_label)
except KeyError:
import pdb
pdb.set_trace()
else:
newstate.A.add_edge(parent_to_add,newstate.cidx)
return newstate
def add_child(self,child_to_add,edge_label=None):
newstate = self.pcopy()
if edge_label:
newstate.A.add_edge(newstate.idx,child_to_add,edge_label)
else:
newstate.A.add_edge(newstate.idx,child_to_add)
#hoffset,voffset = GraphState.deptree.relativePos(newstate.idx,node_to_add)
#atype = GraphState.deptree.relativePos2(newstate.idx,child_to_add)
#self.new_actions.add('add_child_('+str(hoffset)+')_('+str(voffset)+')_'+str(GraphState.sentID))
#self.new_actions.add('add_child_%s_%s'%(atype,str(GraphState.sentID)))
#newstate.action_history.append(ADDCHILD)
return newstate
def _fix_prop_feature(self,idx,cidx):
'''update cidx's prop feature with idx's prop feature'''
if isinstance(idx,int) and isinstance(cidx,int):
ctok = GraphState.sent[cidx]
tok = GraphState.sent[idx]
ctok['pred'] = ctok.get('pred',{})
ctok['pred'].update(dict((k,v) for k,v in tok.get('pred',{}).items() if k!=cidx))
for prd in tok.get('pred',{}).copy():
if prd != cidx:
try:
tmp = GraphState.sent[prd]['args'].pop(idx)
GraphState.sent[idx]['pred'].pop(prd)
except KeyError:
import pdb
pdb.set_trace()
GraphState.sent[prd]['args'][cidx] = tmp
ctok['args'] = ctok.get('args',{})
ctok['args'].update(dict((k,v) for k,v in tok.get('args',{}).items() if k!=cidx))
for arg in tok.get('args',{}).copy():
if arg != cidx:
try:
atmp = GraphState.sent[arg]['pred'].pop(idx)
GraphState.sent[idx]['args'].pop(arg)
except KeyError:
import pdb
pdb.set_trace()
GraphState.sent[arg]['pred'][cidx] = atmp
def replace_head(self):
"""
Use current child to replace current node
"""
newstate = self.pcopy()
newstate.beta = Buffer([c for c in newstate.A.nodes[newstate.idx].children if c != newstate.cidx and c not in newstate.A.nodes[newstate.cidx].parents])
#for old_c in newstate.A.nodes[newstate.cidx].children: newstate.beta.push(old_c)
newstate.A.replace_head(newstate.idx,newstate.cidx)
newstate._fix_prop_feature(newstate.idx,newstate.cidx)
if newstate.idx in newstate.sigma: newstate.sigma.remove(newstate.idx)
if newstate.cidx in newstate.sigma: newstate.sigma.remove(newstate.cidx) # pushing cidx to top
newstate.sigma.push(newstate.cidx)
newstate.A.record_rep_head(newstate.cidx,newstate.idx)
newstate.idx = newstate.cidx
newstate.cidx = newstate.beta.top() if newstate.beta else None
#newstate.action_history.append(REPLACEHEAD)
return newstate
def merge(self):
"""
merge nodes to form entity
"""
newstate = self.pcopy()
tmp1 = newstate.idx
tmp2 = newstate.cidx
#try:
newstate.A.merge_node(tmp1,tmp2)
#except KeyError:
# import pdb
# pdb.set_trace()
if tmp1 < tmp2:
if tmp2 in newstate.sigma:
newstate.sigma.remove(tmp2)
else:
if tmp2 in newstate.sigma: newstate.sigma.remove(tmp2) # pushing tmp2 to the top
newstate.sigma.push(tmp2)
if tmp1 in newstate.sigma: newstate.sigma.remove(tmp1)
newstate.idx = tmp1 if tmp1 < tmp2 else tmp2
newstate.cidx = tmp2 if tmp1 < tmp2 else tmp1
GraphState.sent[newstate.idx]['rel'] = GraphState.sent[tmp1]['rel']
newstate._fix_prop_feature(newstate.cidx,newstate.idx)
#newstate.A.merge_node(newstate.idx,newstate.cidx)
newstate.beta = Buffer(newstate.A.nodes[newstate.idx].children[:])
newstate.cidx = newstate.beta.top() if newstate.beta else None
#newstate.action_history.append(MERGE)
return newstate
@staticmethod
def get_parsed_amr(span_graph):
def unpack_node(node,amr,variable):
node_id = node.start
node_tag = node.tag
#if node.tag is None:
# import pdb
# pdb.set_trace()
core_var = None
tokens_in_span = GraphState.sent[node.start:node.end] if isinstance(node_id,int) else node.words
if isinstance(node_tag,ETag):
foo = amr[variable]
pre_abs_id = None
rel = None
for i,abs_tag in enumerate(node_tag.split('+')):
if i == 0: # node already initialized
if '@' in abs_tag: abs_tag,rel = abs_tag.split('@')
amr.node_to_concepts[variable] = abs_tag
pre_abs_id = variable
elif abs_tag == '-': # negation
abs_id = Polarity(abs_tag)
foo = amr[abs_id]
rel = 'polarity'
amr._add_triple(pre_abs_id,rel,abs_id)
pre_abs_id = abs_id
else:
abs_id = abs_tag[0].lower()
j = 0
while abs_id in amr:
j+=1
abs_id = abs_id[0]+str(j)
foo = amr[abs_id]
amr._add_triple(pre_abs_id,rel,abs_id)
if '@' in abs_tag:
abs_tag,rel = abs_tag.split('@')
else:
rel = None
amr.node_to_concepts[abs_id] = abs_tag
#rel = abs_tag
pre_abs_id = abs_id
last_abs_id = pre_abs_id
last_abs_tag = abs_tag
if last_abs_tag == '-':
return variable,core_var
rel_in_span = 'op' if rel is None else rel
for i,tok in enumerate(tokens_in_span):
foo = amr[tok['form']]
if last_abs_tag == 'name':
amr._add_triple(last_abs_id,'op'+str(i+1),StrLiteral(tok['form']))
elif last_abs_tag == 'date-entity':
date_pattern = [
('d1','^({0}{0}{0}{0})(\-{0}{0})?(\-{0}{0})?$'.format('[0-9]')),
('d2','^({0}{0})({0}{0})({0}{0})$'.format('[0-9]'))
]
date_rule = '|'.join('(?P<%s>%s)'%(p,d) for p,d in date_pattern)
m = re.match(date_rule,tok['form'])
if m:
year,month,day = None,None,None
date_type = m.lastgroup
if date_type == 'd1':
year = m.group(2)
if m.group(3) is not None: month = str(int(m.group(3)[1:]))
if m.group(4) is not None: day = str(int(m.group(4)[1:]))
elif date_type == 'd2':
year = '20'+m.group(6) if int(m.group(6)) <= 20 else '19'+m.group(6) # year range from 1921 till 2020
month = str(int(m.group(7)))
day = str(int(m.group(8)))
else:
#raise ValueError('undefined date pattern')
pass
foo = amr[year]
if year != '0000':
amr._add_triple(last_abs_id,'year',Quantity(year))
if month and month != '0':
foo = amr[month]
amr._add_triple(last_abs_id,'month',Quantity(month))
if day and day != '0':
foo = amr[day]
amr._add_triple(last_abs_id,'day',Quantity(day))
elif last_abs_tag.endswith('-quantity'):
new_id = tok['form'][0].lower()
j = 0
while new_id in amr:
j+=1
new_id = new_id[0]+str(j)
foo = amr[new_id]
amr.node_to_concepts[new_id] = tok['form']
amr._add_triple(last_abs_id,'unit',new_id)
elif last_abs_tag == 'have-org-role-91':
new_id = tok['lemma'][0].lower()
j = 0
while new_id in amr:
j+=1
new_id = new_id[0]+str(j)
foo = amr[new_id]
core_var = new_id
amr.node_to_concepts[new_id] = tok['lemma'].lower()
amr._add_triple(last_abs_id,rel_in_span,new_id)
else:
if re.match('[0-9\-]+',tok['form']):
amr._add_triple(last_abs_id,rel_in_span,Quantity(tok['form']))
else:
new_id = tok['lemma'][0].lower()
j = 0
while new_id in amr:
j+=1
new_id = new_id[0]+str(j)
foo = amr[new_id]
amr.node_to_concepts[new_id] = tok['lemma'].lower()
amr._add_triple(last_abs_id,rel_in_span,new_id)
elif isinstance(node_tag,ConstTag):
foo = amr[node_tag]
variable = node_tag
else:
if r'/' in node_tag:
#import pdb
#pdb.set_trace()
variable = StrLiteral(node_tag)
foo = amr[variable]
else:
foo = amr[variable]
amr.node_to_concepts[variable] = node_tag # concept tag
return variable,core_var
amr = AMR()
span_graph.flipConst()
node_prefix = 'x'
cpvar_cache = {}
for parent,child in span_graph.tuples():
pvar = node_prefix+str(parent)
cvar = node_prefix+str(child)
try:
if parent == 0:
if cvar not in amr:
cvar,ccvar = unpack_node(span_graph.nodes[child],amr,cvar)
cpvar_cache[cvar] = ccvar
if cvar not in amr.roots: amr.roots.append(cvar)
else:
rel_label = span_graph.get_edge_label(parent,child)
if pvar not in amr:
pvar,cpvar = unpack_node(span_graph.nodes[parent],amr,pvar)
cpvar_cache[pvar]=cpvar
if cvar not in amr:
cvar,ccvar = unpack_node(span_graph.nodes[child],amr,cvar)
cpvar_cache[cvar]=ccvar
if cpvar_cache.get(pvar,None) and rel_label == 'mod':
amr._add_triple(cpvar_cache[pvar],rel_label,cvar)
else:
amr._add_triple(pvar,rel_label,cvar)
except ValueError as e:
print e
#print span_graph.graphID
if len(amr.roots) > 1:
foo = amr[FAKE_ROOT_VAR]
amr.node_to_concepts[FAKE_ROOT_VAR] = FAKE_ROOT_CONCEPT
for multi_root in amr.roots:
amr._add_triple(FAKE_ROOT_VAR,FAKE_ROOT_EDGE,multi_root)
amr.roots = [FAKE_ROOT_VAR]
elif len(amr.roots) == 0 and len(amr.keys()) != 0:
foo = amr[FAKE_ROOT_VAR]
amr.node_to_concepts[FAKE_ROOT_VAR] = FAKE_ROOT_CONCEPT
for mlt_root in span_graph.get_multi_roots():
mrvar = node_prefix + str(mlt_root)
if mrvar in amr:
amr._add_triple(FAKE_ROOT_VAR,FAKE_ROOT_EDGE,mrvar)
amr.roots=[FAKE_ROOT_VAR]
elif len(amr.roots) == 1 and amr.roots[0] not in amr.node_to_concepts: # Const tag
foo = amr[FAKE_ROOT_VAR]
amr.node_to_concepts[FAKE_ROOT_VAR] = FAKE_ROOT_CONCEPT
amr._add_triple(FAKE_ROOT_VAR,FAKE_ROOT_EDGE,amr.roots[0])
amr.roots = [FAKE_ROOT_VAR]
elif len(amr.keys()) == 0:
foo = amr[FAKE_ROOT_VAR]
amr.node_to_concepts[FAKE_ROOT_VAR] = FAKE_ROOT_CONCEPT
for mlt_root in span_graph.get_multi_roots():
mrvar = node_prefix + str(mlt_root)
foo = amr[mrvar]
amr.node_to_concepts[mrvar] = span_graph.nodes[mlt_root].tag
amr._add_triple(FAKE_ROOT_VAR,FAKE_ROOT_EDGE,mrvar)
amr.roots=[FAKE_ROOT_VAR]
else:
pass
return amr
def print_config(self, column_len = 80):
output = ''
if self.cidx:
if self.idx == START_ID:
span_g = START_FORM
else:
span_g = ','.join(tok['form'] for tok in GraphState.sent[self.idx:self.A.nodes[self.idx].end]) if isinstance(self.idx,int) else ','.join(self.A.nodes[self.idx].words)
if self.cidx == START_ID:
span_d = START_FORM
else:
span_d = ','.join(tok['form'] for tok in GraphState.sent[self.cidx:self.A.nodes[self.cidx].end]) if isinstance(self.cidx,int) else ','.join(self.A.nodes[self.cidx].words)
output += 'ID:%s %s\nParent:(%s-%s) Child:(%s-%s)'%(str(GraphState.sentID),self.text,\
span_g, self.idx, \
span_d, self.cidx)
else:
'''
if self.action_history and self.action_history[-1] == ADDCHILD: # add child
added_child_idx = self.A.nodes[self.idx].children[-1]
output += 'ID:%s %s\nParent:(%s-%s) add child:(%s-%s)'%(str(GraphState.sentID),self.text,\
','.join(tok['form'] for tok in GraphState.sent[self.idx:self.A.nodes[self.idx].end]), self.idx, \
','.join(tok['form'] for tok in GraphState.sent[added_child_idx:self.A.nodes[added_child_idx].end]), added_child_idx)
else:
'''
if self.idx == START_ID:
span_g = START_FORM
output += 'ID:%s %s\nParent:(%s-%s) Children:%s'%(str(GraphState.sentID),self.text,\
span_g, self.idx, 'None')
else:
span_g = ','.join(tok['form'] for tok in GraphState.sent[self.idx:self.A.nodes[self.idx].end]) if isinstance(self.idx,int) else ','.join(self.A.nodes[self.idx].words)
output += 'ID:%s %s\nParent:(%s-%s) Children:%s'%(str(GraphState.sentID),self.text,\
span_g, self.idx, \
['('+','.join(tok['form'] for tok in GraphState.sent[c:self.A.nodes[c].end])+')' if isinstance(c,int) else '('+','.join(self.A.nodes[c].words)+')' for c in self.A.nodes[self.idx].children])
output += '\n'
parsed_tuples = self.A.tuples()
ref_tuples = self.gold_graph.tuples()
num_p = len(parsed_tuples)
num_r = len(ref_tuples)
tnum = num_r if num_r > num_p else num_p
for i in range(tnum):
strformat = '{0:<%s}|{1:<%s}' % (column_len,column_len)
if i < num_p and i < num_r:
g,d = parsed_tuples[i]
gg,gd = ref_tuples[i]
parsed_edge_label = self.A.get_edge_label(g,d)
gold_edge_label = self.gold_graph.get_edge_label(gg,gd)
gold_span_gg = ','.join(tok['form'] for tok in GraphState.sent[gg:self.gold_graph.nodes[gg].end]) if isinstance(gg,int) else ','.join(self.gold_graph.nodes[gg].words)
gold_span_gd = ','.join(tok['form'] for tok in GraphState.sent[gd:self.gold_graph.nodes[gd].end]) if isinstance(gd,int) else ','.join(self.gold_graph.nodes[gd].words)
parsed_span_g = ','.join(tok['form'] for tok in GraphState.sent[g:self.A.nodes[g].end]) if isinstance(g,int) else ','.join(self.A.nodes[g].words)
parsed_span_d = ','.join(tok['form'] for tok in GraphState.sent[d:self.A.nodes[d].end]) if isinstance(d,int) else ','.join(self.A.nodes[d].words)
parsed_tag_g = self.A.get_node_tag(g)
parsed_tag_d = self.A.get_node_tag(d)
gold_tag_gg = self.gold_graph.get_node_tag(gg)
gold_tag_gd = self.gold_graph.get_node_tag(gd)
parsed_tuple_str = "(%s(%s-%s:%s),(%s-%s:%s))" % (parsed_edge_label, parsed_span_g, g, parsed_tag_g, parsed_span_d, d, parsed_tag_d)
ref_tuple_str = "(%s(%s-%s:%s),(%s-%s:%s))" % (gold_edge_label, gold_span_gg, gg, gold_tag_gg, gold_span_gd, gd, gold_tag_gd)
output += strformat.format(parsed_tuple_str,ref_tuple_str)
output += '\n'
elif i < num_p and i >= num_r:
g,d = parsed_tuples[i]
parsed_edge_label = self.A.get_edge_label(g,d)
parsed_tag_g = self.A.get_node_tag(g)
parsed_tag_d = self.A.get_node_tag(d)
parsed_span_g = ','.join(tok['form'] for tok in GraphState.sent[g:self.A.nodes[g].end]) if isinstance(g,int) else ','.join(self.A.nodes[g].words)
parsed_span_d = ','.join(tok['form'] for tok in GraphState.sent[d:self.A.nodes[d].end]) if isinstance(d,int) else ','.join(self.A.nodes[d].words)
parsed_tuple_str = "(%s(%s-%s:%s),(%s-%s:%s))" % (parsed_edge_label, parsed_span_g, g, parsed_tag_g, parsed_span_d, d, parsed_tag_d)
output += strformat.format(parsed_tuple_str,'*'*column_len)
output += '\n'
elif i >= num_p and i < num_r:
gg,gd = ref_tuples[i]
gold_edge_label = self.gold_graph.get_edge_label(gg,gd)
gold_span_gg = ','.join(tok['form'] for tok in GraphState.sent[gg:self.gold_graph.nodes[gg].end]) if isinstance(gg,int) else ','.join(self.gold_graph.nodes[gg].words)
gold_span_gd = ','.join(tok['form'] for tok in GraphState.sent[gd:self.gold_graph.nodes[gd].end]) if isinstance(gd,int) else ','.join(self.gold_graph.nodes[gd].words)
gold_tag_gg = self.gold_graph.get_node_tag(gg)
gold_tag_gd = self.gold_graph.get_node_tag(gd)
ref_tuple_str = "(%s(%s-%s:%s),(%s-%s:%s))" % (gold_edge_label, gold_span_gg, gg, gold_tag_gg, gold_span_gd, gd, gold_tag_gd)
output += strformat.format('*'*column_len,ref_tuple_str)
output += '\n'
else:
pass
return output
def write_basic_amr(self,out,CONST_REL='ARG0'):
'''
this method takes the unlabeled edges produced by the parser and
adds them with fake amr relation which is mapped from dependency tag set
'''
CoNLLSent = GraphState.sent
parsed_tuples = self.A.tuples()
out.write(str(GraphState.sentID)+'\n')
fake_amr_triples = []
for g,d in parsed_tuples:
gov = CoNLLSent[g]
dep = CoNLLSent[d]
if dep['head'] == gov['id']: # tuple also in dependency tree
rel = get_fake_amr_relation_mapping(dep['rel']) if get_fake_amr_relation_mapping(dep['rel']) != 'NONE' else CONST_REL
fake_amr_triples.append((rel,gov['lemma'],dep['lemma']))
else:
fake_amr_triples.append((CONST_REL,gov['lemma'],dep['lemma']))
out.write(str(fake_amr_triples[-1])+'\n')
return fake_amr_triples
|
didzis/CAMR
|
graphstate.py
|
Python
|
gpl-2.0
| 68,252 | 0.013406 |
# -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : contact@netzob.org |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports
#+---------------------------------------------------------------------------+
import logging
#+---------------------------------------------------------------------------+
#| Local application imports
#+---------------------------------------------------------------------------+
from netzob.Common.Type.Format import Format
from netzob.Common.Models.L2NetworkMessage import L2NetworkMessage
from netzob.Common.Models.Factories.L3NetworkMessageFactory import L3NetworkMessageFactory
from netzob.Common.Property import Property
## Remarques :
# - Peut-être moins clair de parler de Layer 3 source Adress que IP Adress...
class L3NetworkMessage(L2NetworkMessage):
"""Definition of a layer 3 network message"""
def __init__(self, id, timestamp, data, l2Protocol, l2SourceAddress,
l2DestinationAddress, l3Protocol, l3SourceAddress,
l3DestinationAddress, pattern=[]):
if len(pattern) == 1:
pattern.insert(0, str(l3DestinationAddress))
super(L3NetworkMessage, self).__init__(id, timestamp, data, l2Protocol,
l2SourceAddress, l2DestinationAddress, pattern=[])
self.type = "L3Network"
self.l3Protocol = str(l3Protocol)
self.l3SourceAddress = str(l3SourceAddress)
self.l3DestinationAddress = str(l3DestinationAddress)
def getFactory(self):
return L3NetworkMessageFactory
def getL3Protocol(self):
return self.l3Protocol
def getL3SourceAddress(self):
return self.l3SourceAddress
def getL3DestinationAddress(self):
return self.l3DestinationAddress
def getProperties(self):
properties = super(L3NetworkMessage, self).getProperties()
properties.append(Property('Layer 3 Protocol', Format.STRING, self.getL3Protocol()))
properties.append(Property('Layer 3 Source Address', Format.IP, self.getL3SourceAddress()))
properties.append(Property('Layer 3 Destination Address', Format.IP, self.getL3DestinationAddress()))
return properties
|
nagyistoce/netzob
|
src/netzob/Common/Models/L3NetworkMessage.py
|
Python
|
gpl-3.0
| 4,102 | 0.008787 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
__author__='atareao'
__date__ ='$25/04/2011'
#
# Remember-me
# An indicator for Google Calendar
#
# Copyright (C) 2011 Lorenzo Carbonell
# lorenzo.carbonell.cerezo@gmail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
#
import os
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import GLib
from gi.repository import AppIndicator3 as appindicator
from gi.repository import Gtk
from gi.repository import GdkPixbuf
from gi.repository import Notify
import urllib
import time
import dbus
import locale
import gettext
import datetime
import webbrowser
from calendardialog import CalendarDialog
from calendarwindow import CalendarWindow
from addcalendarwindow import AddCalendarWindow
from eventwindow import EventWindow
from googlecalendarapi import GoogleCalendar
#
import comun
from configurator import Configuration
from preferences_dialog import Preferences
#
locale.setlocale(locale.LC_ALL, '')
gettext.bindtextdomain(comun.APP, comun.LANGDIR)
gettext.textdomain(comun.APP)
_ = gettext.gettext
def wait(time_lapse):
time_start = time.time()
time_end = (time_start + time_lapse)
while time_end > time.time():
while Gtk.events_pending():
Gtk.main_iteration()
def short_msg(msg,length=50):
if len(msg)>length:
return msg[:length]
return msg
def internet_on():
try:
response=urllib.request.urlopen('http://google.com',timeout=1)
return True
except Exception as e:
print(e)
return False
def check_events(event1,event2):
return event1['id'] == event2['id']
def is_event_in_events(an_event,events):
for event in events:
if check_events(an_event,event):
return True
return False
def add2menu(menu, text = None, icon = None, conector_event = None, conector_action = None):
if text != None:
menu_item = Gtk.ImageMenuItem.new_with_label(text)
if icon:
image = Gtk.Image.new_from_stock(icon, Gtk.IconSize.MENU)
menu_item.set_image(image)
menu_item.set_always_show_image(True)
else:
if icon == None:
menu_item = Gtk.SeparatorMenuItem()
else:
menu_item = Gtk.ImageMenuItem.new_from_stock(icon, None)
menu_item.set_always_show_image(True)
if conector_event != None and conector_action != None:
menu_item.connect(conector_event,conector_action)
menu_item.show()
menu.append(menu_item)
return menu_item
class EventMenuItem(Gtk.MenuItem):
def __init__(self,label):
Gtk.MenuItem.__init__(self,label)
self.event = None
def get_event(self):
return self.event
def set_event(self,event):
self.event = event
if 'summary' in event.keys():
self.set_label(event.get_start_date_string()+' - '+short_msg(event['summary']))
else:
self.set_label(event.get_start_date_string())
class CalendarIndicator():
def __init__(self):
if dbus.SessionBus().request_name("es.atareao.calendar-indicator") != dbus.bus.REQUEST_NAME_REPLY_PRIMARY_OWNER:
print("application already running")
exit(0)
self.indicator = appindicator.Indicator.new('Calendar-Indicator', 'Calendar-Indicator', appindicator.IndicatorCategory.APPLICATION_STATUS)
self.notification = Notify.Notification.new('','', None)
self.googlecalendar = GoogleCalendar(token_file = comun.TOKEN_FILE)
error = True
while(error):
if self.googlecalendar.do_refresh_authorization() is None:
p = Preferences()
if p.run() == Gtk.ResponseType.ACCEPT:
p.save_preferences()
p.destroy()
self.googlecalendar = GoogleCalendar(token_file = comun.TOKEN_FILE)
if (not os.path.exists(comun.TOKEN_FILE)) or (self.googlecalendar.do_refresh_authorization() is None):
md = Gtk.MessageDialog( parent = None,
flags = Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
type = Gtk.MessageType.ERROR,
buttons = Gtk.ButtonsType.OK_CANCEL,
message_format = _('You have to authorize Calendar-Indicator to manage your Google Calendar.\n Do you want to authorize?'))
if md.run() == Gtk.ResponseType.CANCEL:
exit(3)
md.destroy()
else:
self.googlecalendar = GoogleCalendar(token_file = comun.TOKEN_FILE)
if self.googlecalendar.do_refresh_authorization() is None:
error = False
else:
error = False
self.load_preferences()
#
self.events = []
self.create_menu()
self.sync()
self.update_menu()
self.actualization_time = time.time()
GLib.timeout_add_seconds(60, self.work)
def sync(self):
self.googlecalendar.read()
def load_preferences(self):
configuration = Configuration()
self.time = configuration.get('time')
self.theme = configuration.get('theme')
self.calendars = configuration.get('calendars')
self.visible_calendars = []
for calendar in self.calendars:
if calendar['visible']:
self.visible_calendars.append(calendar['id'])
def work(self):
self.update_menu(check=True)
if (time.time()-self.actualization_time) > self.time*3600:
if internet_on():
self.sync()
self.actualization_time = time.time()
return True
def create_menu(self):
self.menu = Gtk.Menu()
self.menu_events = []
for i in range(10):
menu_event = EventMenuItem('%s'%i)
menu_event.show()
menu_event.set_visible(False)
menu_event.connect('activate',self.on_menu_event_activate)
self.menu.append(menu_event)
self.menu_events.append(menu_event)
add2menu(self.menu)
self.menu_add_new_calendar = add2menu(self.menu, text = _('Add new calendar'), conector_event = 'activate',conector_action = self.on_menu_add_new_calendar)
self.menu_add_new_event = add2menu(self.menu, text = _('Add new event'), conector_event = 'activate',conector_action = self.on_menu_add_new_event)
add2menu(self.menu)
self.menu_refresh = add2menu(self.menu, text = _('Sync with google calendar'), conector_event = 'activate',conector_action = self.on_menu_refresh)
self.menu_show_calendar = add2menu(self.menu, text = _('Show Calendar'), conector_event = 'activate',conector_action = self.menu_show_calendar_response)
self.menu_preferences = add2menu(self.menu, text = _('Preferences'), conector_event = 'activate',conector_action = self.menu_preferences_response)
add2menu(self.menu)
menu_help = add2menu(self.menu, text =_('Help'))
menu_help.set_submenu(self.get_help_menu())
add2menu(self.menu)
add2menu(self.menu, text = _('Exit'), conector_event = 'activate',conector_action = self.menu_exit_response)
self.menu.show()
self.indicator.set_menu(self.menu)
def set_menu_sensitive(self,sensitive = False):
self.menu_add_new_calendar.set_sensitive(sensitive)
self.menu_add_new_event.set_sensitive(sensitive)
self.menu_refresh.set_sensitive(sensitive)
self.menu_show_calendar.set_sensitive(sensitive)
self.menu_preferences.set_sensitive(sensitive)
self.menu_about.set_sensitive(sensitive)
def update_menu(self,check=False):
#
now = datetime.datetime.now()
normal_icon = os.path.join(comun.ICONDIR,'%s-%s-normal.svg'%(now.day,self.theme))
starred_icon = os.path.join(comun.ICONDIR,'%s-%s-starred.svg'%(now.day,self.theme))
#
self.indicator.set_icon(normal_icon)
self.indicator.set_attention_icon(starred_icon)
#
events2 = self.googlecalendar.getNextTenEvents(self.visible_calendars)
if check and len(self.events)>0:
for event in events2:
if not is_event_in_events(event,self.events):
msg = _('New event:')+'\n'
if 'summary' in event.keys:
msg += event.get_start_date_string() + ' - '+ event['summary']
else:
msg += event.get_start_date_string()
self.notification.update('Calendar Indicator',msg,comun.ICON_NEW_EVENT)
self.notification.show()
for event in self.events:
if not is_event_in_events(event,events2):
msg = _('Event finished:') + '\n'
if 'summary' in event.keys:
msg += event.get_start_date_string()+' - '+event['summary']
else:
msg += event.get_start_date_string()
self.notification.update('Calendar Indicator',msg,comun.ICON_FINISHED_EVENT)
self.notification.show()
self.events = events2
for i,event in enumerate(self.events):
self.menu_events[i].set_event(event)
self.menu_events[i].set_visible(True)
for i in range(len(self.events),10):
self.menu_events[i].set_visible(False)
now = datetime.datetime.now()
if len(self.events)>0:
com = self.events[0].get_start_date()
if now.year == com.year and now.month == com.month and now.day == com.day and now.hour == com.hour:
self.indicator.set_status(appindicator.IndicatorStatus.ATTENTION)
else:
self.indicator.set_status(appindicator.IndicatorStatus.ACTIVE)
else:
self.indicator.set_status(appindicator.IndicatorStatus.ACTIVE)
while Gtk.events_pending():
Gtk.main_iteration()
def get_help_menu(self):
help_menu =Gtk.Menu()
#
add2menu(help_menu,text = _('In Launchpad'),conector_event = 'activate',conector_action = lambda x: webbrowser.open('https://launchpad.net/calendar-indicator'))
add2menu(help_menu,text = _('Get help online...'),conector_event = 'activate',conector_action = lambda x: webbrowser.open('https://answers.launchpad.net/calendar-indicator'))
add2menu(help_menu,text = _('Translate this application...'),conector_event = 'activate',conector_action = lambda x: webbrowser.open('https://translations.launchpad.net/calendar-indicator'))
add2menu(help_menu,text = _('Report a bug...'),conector_event = 'activate',conector_action = lambda x: webbrowser.open('https://bugs.launchpad.net/calendar-indicator'))
add2menu(help_menu)
web = add2menu(help_menu,text = _('Homepage'),conector_event = 'activate',conector_action = lambda x: webbrowser.open('http://www.atareao.es/tag/calendar-indicator'))
twitter = add2menu(help_menu,text = _('Follow us in Twitter'),conector_event = 'activate',conector_action = lambda x: webbrowser.open('https://twitter.com/atareao'))
googleplus = add2menu(help_menu,text = _('Follow us in Google+'),conector_event = 'activate',conector_action = lambda x: webbrowser.open('https://plus.google.com/118214486317320563625/posts'))
facebook = add2menu(help_menu,text = _('Follow us in Facebook'),conector_event = 'activate',conector_action = lambda x: webbrowser.open('http://www.facebook.com/elatareao'))
add2menu(help_menu)
self.menu_about = add2menu(help_menu,text = _('About'),conector_event = 'activate',conector_action = self.menu_about_response)
#
web.set_image(Gtk.Image.new_from_file(os.path.join(comun.SOCIALDIR,'web.svg')))
web.set_always_show_image(True)
twitter.set_image(Gtk.Image.new_from_file(os.path.join(comun.SOCIALDIR,'twitter.svg')))
twitter.set_always_show_image(True)
googleplus.set_image(Gtk.Image.new_from_file(os.path.join(comun.SOCIALDIR,'googleplus.svg')))
googleplus.set_always_show_image(True)
facebook.set_image(Gtk.Image.new_from_file(os.path.join(comun.SOCIALDIR,'facebook.svg')))
facebook.set_always_show_image(True)
#
help_menu.show()
return help_menu
def on_menu_add_new_event(self,widget):
ew = EventWindow(self.googlecalendar.calendars.values())
if ew.run() == Gtk.ResponseType.ACCEPT:
calendar_id = ew.get_calendar_id()
summary = ew.get_summary()
start_date = ew.get_start_date()
end_date = ew.get_end_date()
description = ew.get_description()
ew.destroy()
new_event = self.googlecalendar.add_event(calendar_id, summary, start_date, end_date, description)
if new_event is not None:
self.googlecalendar.calendars[calendar_id]['events'][new_event['id']] = new_event
self.update_menu(check=True)
ew.destroy()
def on_menu_event_activate(self,widget):
ew = EventWindow(self.googlecalendar.calendars.values(),widget.get_event())
if ew.run() == Gtk.ResponseType.ACCEPT:
if ew.get_operation() == 'DELETE':
ew.destroy()
md = Gtk.MessageDialog( parent = None,
flags = Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
type = Gtk.MessageType.ERROR,
buttons = Gtk.ButtonsType.OK_CANCEL,
message_format = _('Are you sure you want to revove this event?'))
if md.run() == Gtk.ResponseType.OK:
md.destroy()
event = widget.get_event()
if self.googlecalendar.remove_event(event['calendar_id'],event['id']):
self.googlecalendar.calendars[event['calendar_id']]['events'].pop(event['id'],True)
self.update_menu(check=True)
md.destroy()
elif ew.get_operation() == 'EDIT':
event = widget.get_event()
event_id = event['id']
calendar_id = ew.get_calendar_id()
summary = ew.get_summary()
start_date = ew.get_start_date()
end_date = ew.get_end_date()
description = ew.get_description()
ew.destroy()
md = Gtk.MessageDialog( parent = None,
flags = Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
type = Gtk.MessageType.ERROR,
buttons = Gtk.ButtonsType.OK_CANCEL,
message_format = _('Are you sure you want to edit this event?'))
if md.run() == Gtk.ResponseType.OK:
md.destroy()
edit_event = self.googlecalendar.edit_event(calendar_id, event_id, summary, start_date, end_date, description)
if edit_event is not None:
self.googlecalendar.calendars[calendar_id]['events'][edit_event['id']] = edit_event
self.update_menu(check=True)
md.destroy()
ew.destroy()
def on_menu_add_new_calendar(self,widget):
acw = AddCalendarWindow()
if acw.run() == Gtk.ResponseType.ACCEPT:
calendar_name = acw.entry.get_text()
acw.destroy()
new_calendar = self.googlecalendar.add_calendar(calendar_name)
if new_calendar is not None:
self.googlecalendar.calendars[new_calendar['id']] = new_calendar
acw.destroy()
def menu_preferences_response(self,widget):
self.set_menu_sensitive(False)
p1 = Preferences(self.googlecalendar)
if p1.run() == Gtk.ResponseType.ACCEPT:
p1.save_preferences()
if not os.path.exists(comun.TOKEN_FILE) or self.googlecalendar.do_refresh_authorization() is None:
exit(-1)
self.load_preferences()
self.events = []
self.update_menu()
p1.destroy()
self.set_menu_sensitive(True)
def menu_show_calendar_response(self,widget):
self.set_menu_sensitive(False)
cd = CalendarWindow(self.googlecalendar,calendars=self.visible_calendars)
cd.run()
edited =cd.get_edited()
cd.destroy()
if edited:
self.update_menu(check=True)
self.set_menu_sensitive(True)
def on_menu_refresh(self,widget):
self.sync()
self.update_menu(check=True)
def menu_exit_response(self,widget):
exit(0)
def menu_about_response(self,widget):
self.set_menu_sensitive(False)
ad=Gtk.AboutDialog()
ad.set_name(comun.APPNAME)
ad.set_version(comun.VERSION)
ad.set_copyright('Copyrignt (c) 2011-2014\nLorenzo Carbonell')
ad.set_comments(_('An indicator for Google Calendar'))
ad.set_license(''+
'This program is free software: you can redistribute it and/or modify it\n'+
'under the terms of the GNU General Public License as published by the\n'+
'Free Software Foundation, either version 3 of the License, or (at your option)\n'+
'any later version.\n\n'+
'This program is distributed in the hope that it will be useful, but\n'+
'WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY\n'+
'or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n'+
'more details.\n\n'+
'You should have received a copy of the GNU General Public License along with\n'+
'this program. If not, see <http://www.gnu.org/licenses/>.')
ad.set_website('http://www.atareao.es')
ad.set_website_label('http://www.atareao.es')
ad.set_authors(['Lorenzo Carbonell <lorenzo.carbonell.cerezo@gmail.com>'])
ad.set_documenters(['Lorenzo Carbonell <lorenzo.carbonell.cerezo@gmail.com>'])
ad.set_translator_credits('Štefan Lučivjanský <https://launchpad.net/~stefan-lucivjansky>\n\
abuyop <https://launchpad.net/~abuyop>\n\
pisartshik <https://launchpad.net/~pisartshik>\n\
ma$terok <https://launchpad.net/~m-shein>\n\
Henrique Gressler <https://launchpad.net/~gresslerbwg>\n\
Luka Korošec <https://launchpad.net/~pizmovc>\n\
CJPark <https://launchpad.net/~pcjpcj2>\n\
Łukasz M <https://launchpad.net/~december0123>\n\
Miguel Anxo Bouzada <https://launchpad.net/~mbouzada>\n\
mgomezbuceta <https://launchpad.net/~mgomezbuceta>\n\
Wang Dianjin <https://launchpad.net/~tuhaihe>\n\
Bence Lukács <https://launchpad.net/~lukacs-bence1>\n\
Aliyar Güneş <https://launchpad.net/~aliyargunes>\n\
Antonio Vicién Faure <https://launchpad.net/~antoniopolonio>\n\
Manos Nikiforakis <https://launchpad.net/~nikiforakis-m>\n\
gogo <https://launchpad.net/~trebelnik-stefina>\n\
A.J. Baudrez <https://launchpad.net/~a.baudrez>\n\
simonbor <https://launchpad.net/~simon-bor>\n\
Jiri Grönroos <https://launchpad.net/~jiri-gronroos>\n')
ad.set_logo(GdkPixbuf.Pixbuf.new_from_file(comun.ICON))
ad.set_program_name(comun.APPNAME)
ad.run()
ad.destroy()
self.set_menu_sensitive(True)
if __name__ == "__main__":
Notify.init("calendar-indicator")
ci=CalendarIndicator()
Gtk.main()
|
christopherjbly/calendar-indicator
|
src/calendarindicator.py
|
Python
|
gpl-3.0
| 17,542 | 0.039813 |
#!/usr/bin/env python
import ctypes
import json
import os
from binascii import hexlify, unhexlify
import pytest
from pyasn1.codec.ber.decoder import decode as ber_decode
from pyasn1.codec.der.decoder import decode as der_decode
from pyasn1.codec.der.encoder import encode as der_encode
from pyasn1.type import namedtype, univ
class EcSignature(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("r", univ.Integer()),
namedtype.NamedType("s", univ.Integer()),
)
class EcKeyInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("key_type", univ.ObjectIdentifier()),
namedtype.NamedType("curve_name", univ.ObjectIdentifier()),
)
class EcPublicKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("key_info", EcKeyInfo()),
namedtype.NamedType("public_key", univ.BitString()),
)
class EdKeyInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("key_type", univ.ObjectIdentifier())
)
class EdPublicKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("key_info", EdKeyInfo()),
namedtype.NamedType("public_key", univ.BitString()),
)
class ParseError(Exception):
pass
class NotSupported(Exception):
pass
class DataError(Exception):
pass
class curve_info(ctypes.Structure):
_fields_ = [("bip32_name", ctypes.c_char_p), ("params", ctypes.c_void_p)]
def keys_in_dict(dictionary, keys):
return keys <= set(dictionary.keys())
def parse_eddsa_signature(signature):
if len(signature) != 64:
raise ParseError("Not a valid EdDSA signature")
return signature
def parse_ecdh256_privkey(private_key):
if private_key < 0 or private_key.bit_length() > 256:
raise ParseError("Not a valid 256 bit ECDH private key")
return private_key.to_bytes(32, byteorder="big")
def parse_signed_hex(string):
if len(string) % 2 == 1:
string = "0" + string
number = int(string, 16)
if int(string[0], 16) & 8:
return -number
else:
return number
def parse_result(result):
if result == "valid":
return True
elif result == "invalid":
return False
elif result == "acceptable":
return None
else:
raise DataError()
def is_valid_der(data):
try:
structure, _ = der_decode(data)
return data == der_encode(structure)
except Exception:
return False
def parse_ed_pubkey(public_key):
try:
public_key, _ = ber_decode(public_key, asn1Spec=EdPublicKey())
except Exception:
raise ParseError("Not a BER encoded Edwards curve public key")
if not public_key["key_info"]["key_type"] == univ.ObjectIdentifier("1.3.101.112"):
raise ParseError("Not a BER encoded Edwards curve public key")
public_key = bytes(public_key["public_key"].asOctets())
return public_key
def parse_ec_pubkey(public_key):
try:
public_key, _ = ber_decode(public_key, asn1Spec=EcPublicKey())
except Exception:
raise ParseError("Not a BER encoded named elliptic curve public key")
if not public_key["key_info"]["key_type"] == univ.ObjectIdentifier(
"1.2.840.10045.2.1"
):
raise ParseError("Not a BER encoded named elliptic curve public key")
curve_identifier = public_key["key_info"]["curve_name"]
curve_name = get_curve_name_by_identifier(curve_identifier)
if curve_name is None:
raise NotSupported(
"Unsupported named elliptic curve: {}".format(curve_identifier)
)
try:
public_key = bytes(public_key["public_key"].asOctets())
except Exception:
raise ParseError("Not a BER encoded named elliptic curve public key")
return curve_name, public_key
def parse_ecdsa256_signature(signature):
s = signature
if not is_valid_der(signature):
raise ParseError("Not a valid DER")
try:
signature, _ = der_decode(signature, asn1Spec=EcSignature())
except Exception:
raise ParseError("Not a valid DER encoded ECDSA signature")
try:
r = int(signature["r"]).to_bytes(32, byteorder="big")
s = int(signature["s"]).to_bytes(32, byteorder="big")
signature = r + s
except Exception:
raise ParseError("Not a valid DER encoded 256 bit ECDSA signature")
return signature
def parse_digest(name):
if name == "SHA-256":
return 0
else:
raise NotSupported("Unsupported hash function: {}".format(name))
def get_curve_by_name(name):
lib.get_curve_by_name.restype = ctypes.c_void_p
curve = lib.get_curve_by_name(bytes(name, "ascii"))
if curve is None:
return None
curve = ctypes.cast(curve, ctypes.POINTER(curve_info))
return ctypes.c_void_p(curve.contents.params)
def parse_curve_name(name):
if name == "secp256r1":
return "nist256p1"
elif name == "secp256k1":
return "secp256k1"
elif name == "curve25519":
return "curve25519"
else:
return None
def get_curve_name_by_identifier(identifier):
if identifier == univ.ObjectIdentifier("1.3.132.0.10"):
return "secp256k1"
elif identifier == univ.ObjectIdentifier("1.2.840.10045.3.1.7"):
return "nist256p1"
else:
return None
def chacha_poly_encrypt(key, iv, associated_data, plaintext):
context = bytes(context_structure_length)
tag = bytes(16)
ciphertext = bytes(len(plaintext))
lib.rfc7539_init(context, key, iv)
lib.rfc7539_auth(context, associated_data, len(associated_data))
lib.chacha20poly1305_encrypt(context, plaintext, ciphertext, len(plaintext))
lib.rfc7539_finish(context, len(associated_data), len(plaintext), tag)
return ciphertext, tag
def chacha_poly_decrypt(key, iv, associated_data, ciphertext, tag):
context = bytes(context_structure_length)
computed_tag = bytes(16)
plaintext = bytes(len(ciphertext))
lib.rfc7539_init(context, key, iv)
lib.rfc7539_auth(context, associated_data, len(associated_data))
lib.chacha20poly1305_decrypt(context, ciphertext, plaintext, len(ciphertext))
lib.rfc7539_finish(context, len(associated_data), len(ciphertext), computed_tag)
return plaintext if tag == computed_tag else False
def add_pkcs_padding(data):
padding_length = 16 - len(data) % 16
return data + bytes([padding_length] * padding_length)
def remove_pkcs_padding(data):
padding_length = data[-1]
if not (
0 < padding_length <= 16
and data[-padding_length:] == bytes([padding_length] * padding_length)
):
return False
else:
return data[:-padding_length]
def aes_encrypt_initialise(key, context):
if len(key) == (128 / 8):
lib.aes_encrypt_key128(key, context)
elif len(key) == (192 / 8):
lib.aes_encrypt_key192(key, context)
elif len(key) == (256 / 8):
lib.aes_encrypt_key256(key, context)
else:
raise NotSupported("Unsupported key length: {}".format(len(key) * 8))
def aes_cbc_encrypt(key, iv, plaintext):
plaintext = add_pkcs_padding(plaintext)
context = bytes(context_structure_length)
ciphertext = bytes(len(plaintext))
aes_encrypt_initialise(key, context)
lib.aes_cbc_encrypt(
plaintext, ciphertext, len(plaintext), bytes(bytearray(iv)), context
)
return ciphertext
def aes_decrypt_initialise(key, context):
if len(key) == (128 / 8):
lib.aes_decrypt_key128(key, context)
elif len(key) == (192 / 8):
lib.aes_decrypt_key192(key, context)
elif len(key) == (256 / 8):
lib.aes_decrypt_key256(key, context)
else:
raise NotSupported("Unsupported AES key length: {}".format(len(key) * 8))
def aes_cbc_decrypt(key, iv, ciphertext):
context = bytes(context_structure_length)
plaintext = bytes(len(ciphertext))
aes_decrypt_initialise(key, context)
lib.aes_cbc_decrypt(ciphertext, plaintext, len(ciphertext), iv, context)
return remove_pkcs_padding(plaintext)
def load_json_testvectors(filename):
try:
result = json.loads(open(os.path.join(testvectors_directory, filename)).read())
except Exception:
raise DataError()
return result
def generate_aes(filename):
vectors = []
data = load_json_testvectors(filename)
if not keys_in_dict(data, {"algorithm", "testGroups"}):
raise DataError()
if data["algorithm"] != "AES-CBC-PKCS5":
raise DataError()
for test_group in data["testGroups"]:
if not keys_in_dict(test_group, {"tests"}):
raise DataError()
for test in test_group["tests"]:
if not keys_in_dict(test, {"key", "iv", "msg", "ct", "result"}):
raise DataError()
try:
key = unhexlify(test["key"])
iv = unhexlify(test["iv"])
plaintext = unhexlify(test["msg"])
ciphertext = unhexlify(test["ct"])
result = parse_result(test["result"])
except Exception:
raise DataError()
if len(key) not in [128 / 8, 192 / 8, 256 / 8]:
continue
if result is None:
continue
vectors.append(
(
hexlify(key),
hexlify(iv),
hexlify(plaintext),
hexlify(ciphertext),
result,
)
)
return vectors
def generate_chacha_poly(filename):
vectors = []
data = load_json_testvectors(filename)
if not keys_in_dict(data, {"algorithm", "testGroups"}):
raise DataError()
if data["algorithm"] != "CHACHA20-POLY1305":
raise DataError()
for test_group in data["testGroups"]:
if not keys_in_dict(test_group, {"tests"}):
raise DataError()
for test in test_group["tests"]:
if not keys_in_dict(
test, {"key", "iv", "aad", "msg", "ct", "tag", "result"}
):
raise DataError()
try:
key = unhexlify(test["key"])
iv = unhexlify(test["iv"])
associated_data = unhexlify(test["aad"])
plaintext = unhexlify(test["msg"])
ciphertext = unhexlify(test["ct"])
tag = unhexlify(test["tag"])
result = parse_result(test["result"])
except Exception:
raise DataError()
if result is None:
continue
vectors.append(
(
hexlify(key),
hexlify(iv),
hexlify(associated_data),
hexlify(plaintext),
hexlify(ciphertext),
hexlify(tag),
result,
)
)
return vectors
def generate_curve25519_dh(filename):
vectors = []
data = load_json_testvectors(filename)
if not keys_in_dict(data, {"algorithm", "testGroups"}):
raise DataError()
if data["algorithm"] != "X25519":
raise DataError()
for test_group in data["testGroups"]:
if not keys_in_dict(test_group, {"tests"}):
raise DataError()
for test in test_group["tests"]:
if not keys_in_dict(
test, {"public", "private", "shared", "result", "curve"}
):
raise DataError()
try:
public_key = unhexlify(test["public"])
curve_name = parse_curve_name(test["curve"])
private_key = unhexlify(test["private"])
shared = unhexlify(test["shared"])
result = parse_result(test["result"])
except Exception:
raise DataError()
if curve_name != "curve25519":
continue
if result is None:
continue
vectors.append(
(hexlify(public_key), hexlify(private_key), hexlify(shared), result)
)
return vectors
def generate_ecdh(filename):
vectors = []
data = load_json_testvectors(filename)
if not keys_in_dict(data, {"algorithm", "testGroups"}):
raise DataError()
if data["algorithm"] != "ECDH":
raise DataError()
for test_group in data["testGroups"]:
if not keys_in_dict(test_group, {"tests"}):
raise DataError()
for test in test_group["tests"]:
if not keys_in_dict(
test, {"public", "private", "shared", "result", "curve"}
):
raise DataError()
try:
public_key = unhexlify(test["public"])
curve_name = parse_curve_name(test["curve"])
private_key = parse_signed_hex(test["private"])
shared = unhexlify(test["shared"])
result = parse_result(test["result"])
except Exception:
raise DataError()
try:
private_key = parse_ecdh256_privkey(private_key)
except ParseError:
continue
try:
key_curve_name, public_key = parse_ec_pubkey(public_key)
except NotSupported:
continue
except ParseError:
continue
if key_curve_name != curve_name:
continue
if result is None:
continue
vectors.append(
(
curve_name,
hexlify(public_key),
hexlify(private_key),
hexlify(shared),
result,
)
)
return vectors
def generate_ecdsa(filename):
vectors = []
data = load_json_testvectors(filename)
if not keys_in_dict(data, {"algorithm", "testGroups"}):
raise DataError()
if data["algorithm"] != "ECDSA":
raise DataError()
for test_group in data["testGroups"]:
if not keys_in_dict(test_group, {"tests", "keyDer", "sha"}):
raise DataError()
try:
public_key = unhexlify(test_group["keyDer"])
except Exception:
raise DataError()
try:
curve_name, public_key = parse_ec_pubkey(public_key)
except NotSupported:
continue
except ParseError:
continue
try:
hasher = parse_digest(test_group["sha"])
except NotSupported:
continue
for test in test_group["tests"]:
if not keys_in_dict(test, {"sig", "msg", "result"}):
raise DataError()
try:
signature = unhexlify(test["sig"])
message = unhexlify(test["msg"])
result = parse_result(test["result"])
except Exception:
raise DataError()
if result is None:
continue
try:
signature = parse_ecdsa256_signature(signature)
except ParseError:
continue
vectors.append(
(
curve_name,
hexlify(public_key),
hasher,
hexlify(message),
hexlify(signature),
result,
)
)
return vectors
def generate_eddsa(filename):
vectors = []
data = load_json_testvectors(filename)
if not keys_in_dict(data, {"algorithm", "testGroups"}):
raise DataError()
if data["algorithm"] != "EDDSA":
raise DataError()
for test_group in data["testGroups"]:
if not keys_in_dict(test_group, {"tests", "keyDer"}):
raise DataError()
try:
public_key = unhexlify(test_group["keyDer"])
except Exception:
raise DataError()
try:
public_key = parse_ed_pubkey(public_key)
except ParseError:
continue
for test in test_group["tests"]:
if not keys_in_dict(test, {"sig", "msg", "result"}):
raise DataError()
try:
signature = unhexlify(test["sig"])
message = unhexlify(test["msg"])
result = parse_result(test["result"])
except Exception:
raise DataError()
if result is None:
continue
try:
signature = parse_eddsa_signature(signature)
except ParseError:
continue
vectors.append(
(hexlify(public_key), hexlify(message), hexlify(signature), result)
)
return vectors
dir = os.path.abspath(os.path.dirname(__file__))
lib = ctypes.cdll.LoadLibrary(os.path.join(dir, "libtrezor-crypto.so"))
testvectors_directory = os.path.join(dir, "wycheproof/testvectors")
context_structure_length = 1024
ecdh_vectors = generate_ecdh("ecdh_test.json")
curve25519_dh_vectors = generate_curve25519_dh("x25519_test.json")
eddsa_vectors = generate_eddsa("eddsa_test.json")
ecdsa_vectors = (
generate_ecdsa("ecdsa_test.json")
+ generate_ecdsa("ecdsa_secp256k1_sha256_test.json")
+ generate_ecdsa("ecdsa_secp256r1_sha256_test.json")
)
ecdh_vectors = (
generate_ecdh("ecdh_test.json")
+ generate_ecdh("ecdh_secp256k1_test.json")
+ generate_ecdh("ecdh_secp256r1_test.json")
)
chacha_poly_vectors = generate_chacha_poly("chacha20_poly1305_test.json")
aes_vectors = generate_aes("aes_cbc_pkcs5_test.json")
@pytest.mark.parametrize("public_key, message, signature, result", eddsa_vectors)
def test_eddsa(public_key, message, signature, result):
public_key = unhexlify(public_key)
signature = unhexlify(signature)
message = unhexlify(message)
computed_result = (
lib.ed25519_sign_open(message, len(message), public_key, signature) == 0
)
assert result == computed_result
@pytest.mark.parametrize(
"curve_name, public_key, hasher, message, signature, result", ecdsa_vectors
)
def test_ecdsa(curve_name, public_key, hasher, message, signature, result):
curve = get_curve_by_name(curve_name)
if curve is None:
raise NotSupported("Curve not supported: {}".format(curve_name))
public_key = unhexlify(public_key)
signature = unhexlify(signature)
message = unhexlify(message)
computed_result = (
lib.ecdsa_verify(curve, hasher, public_key, signature, message, len(message))
== 0
)
assert result == computed_result
@pytest.mark.parametrize(
"public_key, private_key, shared, result", curve25519_dh_vectors
)
def test_curve25519_dh(public_key, private_key, shared, result):
public_key = unhexlify(public_key)
private_key = unhexlify(private_key)
shared = unhexlify(shared)
computed_shared = bytes([0] * 32)
lib.curve25519_scalarmult(computed_shared, private_key, public_key)
computed_result = shared == computed_shared
assert result == computed_result
@pytest.mark.parametrize(
"curve_name, public_key, private_key, shared, result", ecdh_vectors
)
def test_ecdh(curve_name, public_key, private_key, shared, result):
curve = get_curve_by_name(curve_name)
if curve is None:
raise NotSupported("Curve not supported: {}".format(curve_name))
public_key = unhexlify(public_key)
private_key = unhexlify(private_key)
shared = unhexlify(shared)
computed_shared = bytes([0] * 2 * 32)
lib.ecdh_multiply(curve, private_key, public_key, computed_shared)
computed_shared = computed_shared[1:33]
computed_result = shared == computed_shared
assert result == computed_result
@pytest.mark.parametrize(
"key, iv, associated_data, plaintext, ciphertext, tag, result", chacha_poly_vectors
)
def test_chacha_poly(key, iv, associated_data, plaintext, ciphertext, tag, result):
key = unhexlify(key)
iv = unhexlify(iv)
associated_data = unhexlify(associated_data)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
tag = unhexlify(tag)
computed_ciphertext, computed_tag = chacha_poly_encrypt(
key, iv, associated_data, plaintext
)
computed_result = ciphertext == computed_ciphertext and tag == computed_tag
assert result == computed_result
computed_plaintext = chacha_poly_decrypt(key, iv, associated_data, ciphertext, tag)
computed_result = plaintext == computed_plaintext
assert result == computed_result
@pytest.mark.parametrize("key, iv, plaintext, ciphertext, result", aes_vectors)
def test_aes(key, iv, plaintext, ciphertext, result):
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
computed_ciphertext = aes_cbc_encrypt(key, iv, plaintext)
computed_result = ciphertext == computed_ciphertext
assert result == computed_result
computed_plaintext = aes_cbc_decrypt(key, bytes(iv), ciphertext)
computed_result = plaintext == computed_plaintext
assert result == computed_result
|
trezor/trezor-crypto
|
tests/test_wycheproof.py
|
Python
|
mit
| 21,271 | 0.000658 |
# -*- coding: utf-8 -*-
from .common import *
class ReferenceDescriptorTest(TestCase):
def setUp(self):
self.reference_descriptor = ReferenceDescriptor.objects.create(
content_type=ContentType.objects.get_for_model(Model))
self.client = JSONClient()
def test_reference_descriptor_list(self):
url = reverse('business-logic:rest:reference-descriptor-list')
response = self.client.get(url)
self.assertEqual(200, response.status_code)
_json = response_json(response)
self.assertIsInstance(_json, list)
self.assertEqual(1, len(_json))
descriptor = _json[0]
model = 'test_app.Model'
self.assertEqual(model, descriptor['name'])
self.assertEqual('Test Model', descriptor['verbose_name'])
self.assertEqual(reverse('business-logic:rest:reference-list', kwargs=dict(model=model)), descriptor['url'])
def test_unregistered_reference_list_not_found(self):
model = 'business_logic.ReferenceDescriptor'
url = reverse('business-logic:rest:reference-list', kwargs=dict(model=model))
response = self.client.get(url)
self.assertEqual(404, response.status_code)
def test_notexists_model_not_found(self):
for model in ('ooo.XXX', 'password'):
url = reverse('business-logic:rest:reference-list', kwargs=dict(model=model))
response = self.client.get(url)
self.assertEqual(404, response.status_code)
class ReferenceListTest(TestCase):
def setUp(self):
self.reference_descriptor = ReferenceDescriptor.objects.create(
content_type=ContentType.objects.get_for_model(Model))
self.client = JSONClient()
model = 'test_app.Model'
self.url = reverse('business-logic:rest:reference-list', kwargs=dict(model=model))
self.test_models = []
for i in range(11):
self.test_models.append(Model.objects.create(string_value='str_{}'.format(str(i) * 3)))
def test_reference_list(self):
response = self.client.get(self.url)
self.assertEqual(200, response.status_code)
_json = response_json(response)
self.assertIsInstance(_json, dict)
self.assertEqual(11, len(_json['results']))
reference = _json['results'][0]
self.assertEqual(self.test_models[0].id, reference['id'])
self.assertEqual(str(self.test_models[0]), reference['name'])
def test_reference_list_search_not_configured(self):
response = self.client.get(self.url, dict(search='111'))
self.assertEqual(400, response.status_code)
_json = response_json(response)
self.assertEqual(
['ReferenceDescriptor for `test_app.Model` are not configured: incorrect `search_fields` field'], _json)
def test_reference_list_search(self):
self.reference_descriptor.search_fields = 'string_value'
self.reference_descriptor.save()
response = self.client.get(self.url, dict(search='111'))
_json = response_json(response)
self.assertEqual(1, len(_json['results']))
def test_reference_list_search_related_fields(self):
self.reference_descriptor.search_fields = 'foreign_value__string_value'
self.reference_descriptor.save()
test_model = self.test_models[2]
test_related_model = RelatedModel.objects.create(string_value='xxx')
test_model.foreign_value = test_related_model
test_model.save()
response = self.client.get(self.url, dict(search='xxx'))
_json = response_json(response)
self.assertEqual(1, len(_json['results']))
reference = _json['results'][0]
self.assertEqual(test_model.id, reference['id'])
class ReferenceViewTest(TestCase):
def setUp(self):
self.reference_descriptor = ReferenceDescriptor.objects.create(
content_type=ContentType.objects.get_for_model(Model))
self.client = JSONClient()
model = 'test_app.Model'
self.test_model = Model.objects.create(string_value='str_value')
self.url = reverse('business-logic:rest:reference', kwargs=dict(model=model, pk=self.test_model.id))
def test_reference_view(self):
response = self.client.get(self.url)
self.assertEqual(200, response.status_code)
_json = response_json(response)
self.assertIsInstance(_json, dict)
self.assertEqual(self.test_model.id, _json['id'])
self.assertEqual(str(self.test_model), _json['name'])
def test_reference_view_name_field(self):
self.reference_descriptor.name_field = 'string_value'
self.reference_descriptor.save()
response = self.client.get(self.url)
self.assertEqual(200, response.status_code)
_json = response_json(response)
self.assertIsInstance(_json, dict)
self.assertEqual(self.test_model.id, _json['id'])
self.assertEqual(self.test_model.string_value, _json['name'])
|
vlfedotov/django-business-logic
|
tests/rest/test_reference.py
|
Python
|
mit
| 4,996 | 0.001401 |
"""
Define a few commands
"""
from .meeseeksbox.utils import Session, fix_issue_body, fix_comment_body
from .meeseeksbox.scopes import admin, write, everyone
from textwrap import dedent
def _format_doc(function, name):
if not function.__doc__:
doc = " "
else:
doc = function.__doc__.splitlines()
first, other = doc[0], "\n".join(doc[1:])
return "`@meeseeksdev {} {}` ({}) \n{} ".format(name, first, function.scope, other)
def help_make(commands):
data = "\n".join([_format_doc(v, k) for k, v in commands.items()])
@everyone
def help(*, session, payload, arguments):
comment_url = payload["issue"]["comments_url"]
session.post_comment(
comment_url,
dedent(
"""The following commands are available:\n\n{}
""".format(
data
)
),
)
return help
@write
def close(*, session, payload, arguments, local_config=None):
session.ghrequest("PATCH", payload["issue"]["url"], json={"state": "closed"})
@write
def open(*, session, payload, arguments, local_config=None):
session.ghrequest("PATCH", payload["issue"]["url"], json={"state": "open"})
@write
def migrate_issue_request(
*, session: Session, payload: dict, arguments: str, local_config=None
):
"""[to] {org}/{repo}
Need to be admin on target repo. Replicate all comments on target repo and close current on.
"""
"""Todo:
- Works through pagination of comments
- Works through pagination of labels
Link to non-migrated labels.
"""
if arguments.startswith("to "):
arguments = arguments[3:]
org_repo = arguments
org, repo = arguments.split("/")
target_session = yield org_repo
if not target_session:
session.post_comment(
payload["issue"]["comments_url"],
body="I'm afraid I can't do that. Maybe I need to be installed on target repository ?\n"
"Click [here](https://github.com/integrations/meeseeksdev/installations/new) to do that.".format(
botname="meeseeksdev"
),
)
return
issue_title = payload["issue"]["title"]
issue_body = payload["issue"]["body"]
original_org = payload["organization"]["login"]
original_repo = payload["repository"]["name"]
original_poster = payload["issue"]["user"]["login"]
original_number = payload["issue"]["number"]
migration_requester = payload["comment"]["user"]["login"]
request_id = payload["comment"]["id"]
original_labels = [l["name"] for l in payload["issue"]["labels"]]
if original_labels:
available_labels = target_session.ghrequest(
"GET",
"https://api.github.com/repos/{org}/{repo}/labels".format(
org=org, repo=repo
),
None,
).json()
available_labels = [l["name"] for l in available_labels]
migrate_labels = [l for l in original_labels if l in available_labels]
not_set_labels = [l for l in original_labels if l not in available_labels]
new_response = target_session.create_issue(
org,
repo,
issue_title,
fix_issue_body(
issue_body,
original_poster,
original_repo,
original_org,
original_number,
migration_requester,
),
labels=migrate_labels,
)
new_issue = new_response.json()
new_comment_url = new_issue["comments_url"]
original_comments = session.ghrequest(
"GET", payload["issue"]["comments_url"], None
).json()
for comment in original_comments:
if comment["id"] == request_id:
continue
body = comment["body"]
op = comment["user"]["login"]
url = comment["html_url"]
target_session.post_comment(
new_comment_url,
body=fix_comment_body(body, op, url, original_org, original_repo),
)
if not_set_labels:
body = "I was not able to apply the following label(s): %s " % ",".join(
not_set_labels
)
target_session.post_comment(new_comment_url, body=body)
session.post_comment(
payload["issue"]["comments_url"],
body="Done as {}/{}#{}.".format(org, repo, new_issue["number"]),
)
session.ghrequest("PATCH", payload["issue"]["url"], json={"state": "closed"})
from .meeseeksbox.scopes import pr_author, write
from .meeseeksbox.commands import tag, untag
@pr_author
@write
def ready(*, session, payload, arguments, local_config=None):
"""{no arguments}
Remove "waiting for author" tag, adds "need review" tag. Can also be issued
if you are the current PR author even if you are not admin.
"""
tag(session, payload, "need review")
untag(session, payload, "waiting for author")
@write
def merge(*, session, payload, arguments, method="merge", local_config=None):
print("===== merging =====")
if arguments:
if arguments not in {"merge", "squash", "rebase"}:
print("don't know how to merge with methods", arguments)
return
else:
method = arguments
prnumber = payload["issue"]["number"]
org_name = payload["repository"]["owner"]["login"]
repo_name = payload["repository"]["name"]
# collect extended payload on the PR
print("== Collecting data on Pull-request...")
r = session.ghrequest(
"GET",
"https://api.github.com/repos/{}/{}/pulls/{}".format(
org_name, repo_name, prnumber
),
json=None,
)
pr_data = r.json()
head_sha = pr_data["head"]["sha"]
mergeable = pr_data["mergeable"]
repo_name = pr_data["head"]["repo"]["name"]
if mergeable:
resp = session.ghrequest(
"PUT",
"https://api.github.com/repos/{}/{}/pulls/{}/merge".format(
org_name, repo_name, prnumber
),
json={"sha": head_sha, "merge_method": method},
override_accept_header="application/vnd.github.polaris-preview+json",
)
print("------------")
print(resp.json())
print("------------")
resp.raise_for_status()
else:
print("Not mergeable", pr_data["mergeable"])
###
# Lock and Unlock are not yet available for integration.
###
# def _lock_primitive(meth,*, session, payload, arguments):
# number = payload['issue']['number']
# org_name = payload['repository']['owner']['login']
# repo_name = payload['repository']['name']
# session.ghrequest('PUT', 'https://api.github.com/repos/{}/{}/issues/{}/lock'.format(org_name, repo_name, number))
#
# @admin
# def lock(**kwargs):
# _lock_primitive('PUT', **kwargs)
#
# @admin
# def unlock(**kwargs):
# _lock_primitive('DELETE', **kwargs)
|
MeeseeksBox/MeeseeksDev
|
meeseeksdev/commands.py
|
Python
|
mit
| 6,864 | 0.002477 |
#!BPY
"""
Name: 'MDL (.mdl)'
Blender: 244
Group: 'Export'
Tooltip: 'Export to Quake file format (.mdl).'
"""
__author__ = 'Andrew Denner'
__version__ = '0.1.3'
__url__ = ["Andrew's site, http://www.btinternet.com/~chapterhonour/",
"Can also be contacted through http://celephais.net/board", "blender", "elysiun"]
__email__ = ["Andrew Denner, andrew.denner:btinternet*com", "scripts"]
__bpydoc__ = """\
This script Exports a Quake 1 file (MDL).
Based wholesale off the MD2 export by Bob Holcomb, with the help of David Henry's MDL format guide
"""
# ***** BEGIN GPL LICENSE BLOCK *****
#
# Script copyright (C): Andrew Denner(portions Bob Holcomb)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
import Blender
from Blender import *
from Blender.Draw import *
from Blender.BGL import *
from Blender.Window import *
import struct, string
from types import *
from math import *
######################################################
# GUI Loader
######################################################
# Export globals
g_filename=Create("default.mdl")
g_frame_filename=Create("default")
g_filename_search=Create("")
g_frame_search=Create("default")
user_frame_list=[]
#Globals
g_scale=Create(1.0)
g_fixuvs=Create(0)
g_flags=Create(0)
# Events
EVENT_NOEVENT=1
EVENT_SAVE_MDL=2
EVENT_CHOOSE_FILENAME=3
EVENT_CHOOSE_FRAME=4
EVENT_EXIT=100
######################################################
# Callbacks for Window functions
######################################################
def filename_callback(input_filename):
global g_filename
g_filename.val=input_filename
def frame_callback(input_frame):
global g_frame_filename
g_frame_filename.val=input_frame
def draw_gui():
global g_scale
global g_fixuvs
global g_flags
global g_filename
global g_frame_filename
global EVENT_NOEVENT,EVENT_SAVE_MDL,EVENT_CHOOSE_FILENAME,EVENT_CHOOSE_FRAME,EVENT_EXIT
########## Titles
glClear(GL_COLOR_BUFFER_BIT)
glRasterPos2d(10, 140)
Text("MDL Export")
######### Parameters GUI Buttons
######### MDL Filename text entry
g_filename = String("MDL file to save: ", EVENT_NOEVENT, 10, 75, 210, 18,
g_filename.val, 255, "MDL file to save")
########## MDL File Search Button
Button("Search",EVENT_CHOOSE_FILENAME,220,75,80,18)
########## MDL Frame List Text entry
g_frame_filename = String("Frame List file to load: ", EVENT_NOEVENT, 10, 55, 210, 18,
g_frame_filename.val, 255, "Frame List to load-overrides MDL defaults")
g_flags = Number("Model Flags", EVENT_NOEVENT, 10, 115, 210, 18,
g_flags.val, 0, 1<<15, "Specify the combination of flags you desire")
########## Frame List Search Button
Button("Search",EVENT_CHOOSE_FRAME,220,55,80,18)
########## Scale slider-default is 1
g_scale = Slider("Scale Factor: ", EVENT_NOEVENT, 10, 95, 210, 18,
g_scale.val, 0.001, 10.0, 1.0, "Scale factor for object Model");
########## Fix UVs options
g_fixuvs = Menu("Fix UV coords %t|Don't Fix UVs%x0|Translate points %x1|Clamp points %x2",
EVENT_NOEVENT, 10, 35, 210, 18, g_fixuvs.val, "Method for handling UV's which are outside skin range")
######### Draw and Exit Buttons
Button("Export",EVENT_SAVE_MDL , 10, 10, 80, 18)
Button("Exit",EVENT_EXIT , 170, 10, 80, 18)
def event(evt, val):
if (evt == QKEY and not val):
Exit()
def bevent(evt):
global g_filename
global g_frame_filename
global EVENT_NOEVENT,EVENT_SAVE_MDL,EVENT_EXIT
######### Manages GUI events
if (evt==EVENT_EXIT):
Blender.Draw.Exit()
elif (evt==EVENT_CHOOSE_FILENAME):
FileSelector(filename_callback, "MDL File Selection")
elif (evt==EVENT_CHOOSE_FRAME):
FileSelector(frame_callback, "Frame Selection")
elif (evt==EVENT_SAVE_MDL):
if (g_filename.val == "model"):
save_mdl("blender.mdl")
Blender.Draw.Exit()
return
else:
save_mdl(g_filename.val)
Blender.Draw.Exit()
return
Register(draw_gui, event, bevent)
######################################################
# MDL Model Constants
######################################################
MDL_MAX_TRIANGLES=2048
MDL_MAX_VERTICES=1024
MDL_MAX_TEXCOORDS=1024
MDL_MAX_FRAMES=256
MDL_MAX_SKINS=16
MDL_MAX_FRAMESIZE=(MDL_MAX_VERTICES * 4 + 128)
MDL_FRAME_NAME_LIST=(("stand",1,10)),
#10 frames
#pretty sure these are the same
MDL_NORMALS=((-0.525731, 0.000000, 0.850651),
(-0.442863, 0.238856, 0.864188),
(-0.295242, 0.000000, 0.955423),
(-0.309017, 0.500000, 0.809017),
(-0.162460, 0.262866, 0.951056),
(0.000000, 0.000000, 1.000000),
(0.000000, 0.850651, 0.525731),
(-0.147621, 0.716567, 0.681718),
(0.147621, 0.716567, 0.681718),
(0.000000, 0.525731, 0.850651),
(0.309017, 0.500000, 0.809017),
(0.525731, 0.000000, 0.850651),
(0.295242, 0.000000, 0.955423),
(0.442863, 0.238856, 0.864188),
(0.162460, 0.262866, 0.951056),
(-0.681718, 0.147621, 0.716567),
(-0.809017, 0.309017, 0.500000),
(-0.587785, 0.425325, 0.688191),
(-0.850651, 0.525731, 0.000000),
(-0.864188, 0.442863, 0.238856),
(-0.716567, 0.681718, 0.147621),
(-0.688191, 0.587785, 0.425325),
(-0.500000, 0.809017, 0.309017),
(-0.238856, 0.864188, 0.442863),
(-0.425325, 0.688191, 0.587785),
(-0.716567, 0.681718, -0.147621),
(-0.500000, 0.809017, -0.309017),
(-0.525731, 0.850651, 0.000000),
(0.000000, 0.850651, -0.525731),
(-0.238856, 0.864188, -0.442863),
(0.000000, 0.955423, -0.295242),
(-0.262866, 0.951056, -0.162460),
(0.000000, 1.000000, 0.000000),
(0.000000, 0.955423, 0.295242),
(-0.262866, 0.951056, 0.162460),
(0.238856, 0.864188, 0.442863),
(0.262866, 0.951056, 0.162460),
(0.500000, 0.809017, 0.309017),
(0.238856, 0.864188, -0.442863),
(0.262866, 0.951056, -0.162460),
(0.500000, 0.809017, -0.309017),
(0.850651, 0.525731, 0.000000),
(0.716567, 0.681718, 0.147621),
(0.716567, 0.681718, -0.147621),
(0.525731, 0.850651, 0.000000),
(0.425325, 0.688191, 0.587785),
(0.864188, 0.442863, 0.238856),
(0.688191, 0.587785, 0.425325),
(0.809017, 0.309017, 0.500000),
(0.681718, 0.147621, 0.716567),
(0.587785, 0.425325, 0.688191),
(0.955423, 0.295242, 0.000000),
(1.000000, 0.000000, 0.000000),
(0.951056, 0.162460, 0.262866),
(0.850651, -0.525731, 0.000000),
(0.955423, -0.295242, 0.000000),
(0.864188, -0.442863, 0.238856),
(0.951056, -0.162460, 0.262866),
(0.809017, -0.309017, 0.500000),
(0.681718, -0.147621, 0.716567),
(0.850651, 0.000000, 0.525731),
(0.864188, 0.442863, -0.238856),
(0.809017, 0.309017, -0.500000),
(0.951056, 0.162460, -0.262866),
(0.525731, 0.000000, -0.850651),
(0.681718, 0.147621, -0.716567),
(0.681718, -0.147621, -0.716567),
(0.850651, 0.000000, -0.525731),
(0.809017, -0.309017, -0.500000),
(0.864188, -0.442863, -0.238856),
(0.951056, -0.162460, -0.262866),
(0.147621, 0.716567, -0.681718),
(0.309017, 0.500000, -0.809017),
(0.425325, 0.688191, -0.587785),
(0.442863, 0.238856, -0.864188),
(0.587785, 0.425325, -0.688191),
(0.688191, 0.587785, -0.425325),
(-0.147621, 0.716567, -0.681718),
(-0.309017, 0.500000, -0.809017),
(0.000000, 0.525731, -0.850651),
(-0.525731, 0.000000, -0.850651),
(-0.442863, 0.238856, -0.864188),
(-0.295242, 0.000000, -0.955423),
(-0.162460, 0.262866, -0.951056),
(0.000000, 0.000000, -1.000000),
(0.295242, 0.000000, -0.955423),
(0.162460, 0.262866, -0.951056),
(-0.442863, -0.238856, -0.864188),
(-0.309017, -0.500000, -0.809017),
(-0.162460, -0.262866, -0.951056),
(0.000000, -0.850651, -0.525731),
(-0.147621, -0.716567, -0.681718),
(0.147621, -0.716567, -0.681718),
(0.000000, -0.525731, -0.850651),
(0.309017, -0.500000, -0.809017),
(0.442863, -0.238856, -0.864188),
(0.162460, -0.262866, -0.951056),
(0.238856, -0.864188, -0.442863),
(0.500000, -0.809017, -0.309017),
(0.425325, -0.688191, -0.587785),
(0.716567, -0.681718, -0.147621),
(0.688191, -0.587785, -0.425325),
(0.587785, -0.425325, -0.688191),
(0.000000, -0.955423, -0.295242),
(0.000000, -1.000000, 0.000000),
(0.262866, -0.951056, -0.162460),
(0.000000, -0.850651, 0.525731),
(0.000000, -0.955423, 0.295242),
(0.238856, -0.864188, 0.442863),
(0.262866, -0.951056, 0.162460),
(0.500000, -0.809017, 0.309017),
(0.716567, -0.681718, 0.147621),
(0.525731, -0.850651, 0.000000),
(-0.238856, -0.864188, -0.442863),
(-0.500000, -0.809017, -0.309017),
(-0.262866, -0.951056, -0.162460),
(-0.850651, -0.525731, 0.000000),
(-0.716567, -0.681718, -0.147621),
(-0.716567, -0.681718, 0.147621),
(-0.525731, -0.850651, 0.000000),
(-0.500000, -0.809017, 0.309017),
(-0.238856, -0.864188, 0.442863),
(-0.262866, -0.951056, 0.162460),
(-0.864188, -0.442863, 0.238856),
(-0.809017, -0.309017, 0.500000),
(-0.688191, -0.587785, 0.425325),
(-0.681718, -0.147621, 0.716567),
(-0.442863, -0.238856, 0.864188),
(-0.587785, -0.425325, 0.688191),
(-0.309017, -0.500000, 0.809017),
(-0.147621, -0.716567, 0.681718),
(-0.425325, -0.688191, 0.587785),
(-0.162460, -0.262866, 0.951056),
(0.442863, -0.238856, 0.864188),
(0.162460, -0.262866, 0.951056),
(0.309017, -0.500000, 0.809017),
(0.147621, -0.716567, 0.681718),
(0.000000, -0.525731, 0.850651),
(0.425325, -0.688191, 0.587785),
(0.587785, -0.425325, 0.688191),
(0.688191, -0.587785, 0.425325),
(-0.955423, 0.295242, 0.000000),
(-0.951056, 0.162460, 0.262866),
(-1.000000, 0.000000, 0.000000),
(-0.850651, 0.000000, 0.525731),
(-0.955423, -0.295242, 0.000000),
(-0.951056, -0.162460, 0.262866),
(-0.864188, 0.442863, -0.238856),
(-0.951056, 0.162460, -0.262866),
(-0.809017, 0.309017, -0.500000),
(-0.864188, -0.442863, -0.238856),
(-0.951056, -0.162460, -0.262866),
(-0.809017, -0.309017, -0.500000),
(-0.681718, 0.147621, -0.716567),
(-0.681718, -0.147621, -0.716567),
(-0.850651, 0.000000, -0.525731),
(-0.688191, 0.587785, -0.425325),
(-0.587785, 0.425325, -0.688191),
(-0.425325, 0.688191, -0.587785),
(-0.425325, -0.688191, -0.587785),
(-0.587785, -0.425325, -0.688191),
(-0.688191, -0.587785, -0.425325))
COLORMAP=(( 0, 0, 0), ( 15, 15, 15), ( 31, 31, 31), ( 47, 47, 47),
( 63, 63, 63), ( 75, 75, 75), ( 91, 91, 91), (107, 107, 107),
(123, 123, 123), (139, 139, 139), (155, 155, 155), (171, 171, 171),
(187, 187, 187), (203, 203, 203), (219, 219, 219), (235, 235, 235),
( 15, 11, 7), ( 23, 15, 11), ( 31, 23, 11), ( 39, 27, 15),
( 47, 35, 19), ( 55, 43, 23), ( 63, 47, 23), ( 75, 55, 27),
( 83, 59, 27), ( 91, 67, 31), ( 99, 75, 31), (107, 83, 31),
(115, 87, 31), (123, 95, 35), (131, 103, 35), (143, 111, 35),
( 11, 11, 15), ( 19, 19, 27), ( 27, 27, 39), ( 39, 39, 51),
( 47, 47, 63), ( 55, 55, 75), ( 63, 63, 87), ( 71, 71, 103),
( 79, 79, 115), ( 91, 91, 127), ( 99, 99, 139), (107, 107, 151),
(115, 115, 163), (123, 123, 175), (131, 131, 187), (139, 139, 203),
( 0, 0, 0), ( 7, 7, 0), ( 11, 11, 0), ( 19, 19, 0),
( 27, 27, 0), ( 35, 35, 0), ( 43, 43, 7), ( 47, 47, 7),
( 55, 55, 7), ( 63, 63, 7), ( 71, 71, 7), ( 75, 75, 11),
( 83, 83, 11), ( 91, 91, 11), ( 99, 99, 11), (107, 107, 15),
( 7, 0, 0), ( 15, 0, 0), ( 23, 0, 0), ( 31, 0, 0),
( 39, 0, 0), ( 47, 0, 0), ( 55, 0, 0), ( 63, 0, 0),
( 71, 0, 0), ( 79, 0, 0), ( 87, 0, 0), ( 95, 0, 0),
(103, 0, 0), (111, 0, 0), (119, 0, 0), (127, 0, 0),
( 19, 19, 0), ( 27, 27, 0), ( 35, 35, 0), ( 47, 43, 0),
( 55, 47, 0), ( 67, 55, 0), ( 75, 59, 7), ( 87, 67, 7),
( 95, 71, 7), (107, 75, 11), (119, 83, 15), (131, 87, 19),
(139, 91, 19), (151, 95, 27), (163, 99, 31), (175, 103, 35),
( 35, 19, 7), ( 47, 23, 11), ( 59, 31, 15), ( 75, 35, 19),
( 87, 43, 23), ( 99, 47, 31), (115, 55, 35), (127, 59, 43),
(143, 67, 51), (159, 79, 51), (175, 99, 47), (191, 119, 47),
(207, 143, 43), (223, 171, 39), (239, 203, 31), (255, 243, 27),
( 11, 7, 0), ( 27, 19, 0), ( 43, 35, 15), ( 55, 43, 19),
( 71, 51, 27), ( 83, 55, 35), ( 99, 63, 43), (111, 71, 51),
(127, 83, 63), (139, 95, 71), (155, 107, 83), (167, 123, 95),
(183, 135, 107), (195, 147, 123), (211, 163, 139), (227, 179, 151),
(171, 139, 163), (159, 127, 151), (147, 115, 135), (139, 103, 123),
(127, 91, 111), (119, 83, 99), (107, 75, 87), ( 95, 63, 75),
( 87, 55, 67), ( 75, 47, 55), ( 67, 39, 47), ( 55, 31, 35),
( 43, 23, 27), ( 35, 19, 19), ( 23, 11, 11), ( 15, 7, 7),
(187, 115, 159), (175, 107, 143), (163, 95, 131), (151, 87, 119),
(139, 79, 107), (127, 75, 95), (115, 67, 83), (107, 59, 75),
( 95, 51, 63), ( 83, 43, 55), ( 71, 35, 43), ( 59, 31, 35),
( 47, 23, 27), ( 35, 19, 19), ( 23, 11, 11), ( 15, 7, 7),
(219, 195, 187), (203, 179, 167), (191, 163, 155), (175, 151, 139),
(163, 135, 123), (151, 123, 111), (135, 111, 95), (123, 99, 83),
(107, 87, 71), ( 95, 75, 59), ( 83, 63, 51), ( 67, 51, 39),
( 55, 43, 31), ( 39, 31, 23), ( 27, 19, 15), ( 15, 11, 7),
(111, 131, 123), (103, 123, 111), ( 95, 115, 103), ( 87, 107, 95),
( 79, 99, 87), ( 71, 91, 79), ( 63, 83, 71), ( 55, 75, 63),
( 47, 67, 55), ( 43, 59, 47), ( 35, 51, 39), ( 31, 43, 31),
( 23, 35, 23), ( 15, 27, 19), ( 11, 19, 11), ( 7, 11, 7),
(255, 243, 27), (239, 223, 23), (219, 203, 19), (203, 183, 15),
(187, 167, 15), (171, 151, 11), (155, 131, 7), (139, 115, 7),
(123, 99, 7), (107, 83, 0), ( 91, 71, 0), ( 75, 55, 0),
( 59, 43, 0), ( 43, 31, 0), ( 27, 15, 0), ( 11, 7, 0),
( 0, 0, 255), ( 11, 11, 239), ( 19, 19, 223), ( 27, 27, 207),
( 35, 35, 191), ( 43, 43, 175), ( 47, 47, 159), ( 47, 47, 143),
( 47, 47, 127), ( 47, 47, 111), ( 47, 47, 95), ( 43, 43, 79),
( 35, 35, 63), ( 27, 27, 47), ( 19, 19, 31), ( 11, 11, 15),
( 43, 0, 0), ( 59, 0, 0), ( 75, 7, 0), ( 95, 7, 0),
(111, 15, 0), (127, 23, 7), (147, 31, 7), (163, 39, 11),
(183, 51, 15), (195, 75, 27), (207, 99, 43), (219, 127, 59),
(227, 151, 79), (231, 171, 95), (239, 191, 119), (247, 211, 139),
(167, 123, 59), (183, 155, 55), (199, 195, 55), (231, 227, 87),
(127, 191, 255), (171, 231, 255), (215, 255, 255), (103, 0, 0),
(139, 0, 0), (179, 0, 0), (215, 0, 0), (255, 0, 0),
(255, 243, 147), (255, 247, 199), (255, 255, 255), (159, 91, 83))
######################################################
# MDL data structures
######################################################
class mdl_point:
vertices=[]
lightnormalindex=0
binary_format="<3BB"
def __init__(self):
self.vertices=[0]*3
self.lightnormalindex=0
def save(self, file):
temp_data=[0]*4
temp_data[0]=self.vertices[0]
temp_data[1]=self.vertices[1]
temp_data[2]=self.vertices[2]
temp_data[3]=self.lightnormalindex
data=struct.pack(self.binary_format, temp_data[0], temp_data[1], temp_data[2], temp_data[3])
file.write(data)
def dump(self):
print "MDL Point Structure"
print "vertex X: ", self.vertices[0]
print "vertex Y: ", self.vertices[1]
print "vertex Z: ", self.vertices[2]
print "lightnormalindex: ",self.lightnormalindex
print ""
class mdl_face:
facesfront=1
vertex_index=[]
binary_format="<i3i"
def __init__(self):
self.facesfront = 1
self.vertex_index = [ 0, 0, 0 ]
def save(self, file):
temp_data=[0]*4
temp_data[0]=self.facesfront
#reverse order to flip polygons after x transform
temp_data[1]=self.vertex_index[1]
temp_data[2]=self.vertex_index[0]
temp_data[3]=self.vertex_index[2]
data=struct.pack(self.binary_format,temp_data[0],temp_data[1],temp_data[2],temp_data[3])
file.write(data)
def dump (self):
print "MDL Face Structure"
print "facesfront: ", self.facesfront
print "vertex 1 index: ", self.vertex_index[0]
print "vertex 2 index: ", self.vertex_index[1]
print "vertex 3 index: ", self.vertex_index[2]
print ""
class mdl_tex_coord:
onseam=0
u=0
v=0
binary_format="<i2i"
def __init__(self):
self.onseam=0
self.u=0
self.v=0
def save(self, file):
temp_data=[0]*3
temp_data[0]=self.onseam
temp_data[1]=self.u
temp_data[2]=self.v
data=struct.pack(self.binary_format, temp_data[0], temp_data[1], temp_data[2])
file.write(data)
def dump (self):
print "MDL Texture Coordinate Structure"
print "onseam: ",self.onseam
print "texture coordinate u: ",self.u
print "texture coordinate v: ",self.v
print ""
class mdl_skin:
group = 0
skin=[]
dim=[]
binary_format="<i"
def __init__(self):
self.group=0
self.skin=[]
self.dim=[0]*2
self.dim[0]=256
self.dim[1]=256 #defaults
def save(self, file):
temp_data=self.group
data=struct.pack(self.binary_format, temp_data)
file.write(data)
#write skin
for j in range (0,self.dim[1]):
for i in range(0,self.dim[0]):
data=struct.pack("<B",skin_data[(i+1)*self.dim[1]-j-1] ) #skin indices
file.write(data)
def dump (self):
print "MDL Skin"
print "group: ",self.group
print ""
class mdl_frame:
grouped=0
bboxmin=[]
bboxmax=[]
name=[]
vertices=[]
binary_format="<i4B4B16s"
def __init__(self):
self.grouped=0
self.bboxmin=[0]*4
self.bboxmax=[0]*4
self.name=""
self.vertices=[]
def save(self, file):
temp_data=[0]*8
temp_data[0]=self.grouped
temp_data[1]=self.bboxmin[0]
temp_data[2]=self.bboxmin[1]
temp_data[3]=self.bboxmin[2]
temp_data[4]=self.bboxmax[0]
temp_data[5]=self.bboxmax[1]
temp_data[6]=self.bboxmax[2]
temp_data[7]=self.name
data=struct.pack(self.binary_format, temp_data[0],temp_data[1],temp_data[2],temp_data[3],0,temp_data[4],temp_data[5],temp_data[6],0,temp_data[7])
file.write(data)
def dump (self):
print "MDL Frame"
print "min x: ",self.bboxmin[0]
print "min y: ",self.bboxmin[1]
print "min z: ",self.bboxmin[2]
print "max x: ",self.bboxmax[0]
print "max y: ",self.bboxmax[1]
print "max z: ",self.bboxmax[2]
print "name: ",self.name
print ""
class mdl_obj:
#Header Structure
ident=0 #int This is used to identify the file
version=0 #int The version number of the file (Must be 6)
scale=[] #vec_3 global scale
translate=[] #vec_3 global offset
boundingradius=0 #float nobody knows
eyeposition=[] #vec_3 eye position
num_skins=0 #int The number of skins associated with the model
skin_width=0 #int The skin width in pixels
skin_height=0 #int The skin height in pixels
num_vertices=0 #int The number of vertices (constant for each frame)
num_faces=0 #int The number of faces (polygons)
num_frames=0 #int The number of animation frames
synctype=0 #int sycronised?
flags=0 #int effect flags
size=0 #float size
binary_format="<2i10f8if" #little-endian (<), i ints, f floats etc
#mdl data objects
skins=[]
tex_coords=[]
faces=[]
frames=[]
def __init__ (self):
self.scale=[0.0]*3
self.translate=[0.0]*3
self.eyeposition=[0.0]*3
self.tex_coords=[]
self.faces=[]
self.frames=[]
self.skins=[]
def save(self, file):
temp_data=[0]*21
temp_data[0]=self.ident
temp_data[1]=self.version
temp_data[2]=float(self.scale[0])
temp_data[3]=float(self.scale[1])
temp_data[4]=float(self.scale[2])
temp_data[5]=float(self.translate[0])
temp_data[6]=float(self.translate[1])
temp_data[7]=float(self.translate[2])
temp_data[8]=float(self.boundingradius)
temp_data[9]=float(self.eyeposition[0])
temp_data[10]=float(self.eyeposition[1])
temp_data[11]=float(self.eyeposition[2])
temp_data[12]=self.num_skins
temp_data[13]=self.skin_width
temp_data[14]=self.skin_height
temp_data[15]=self.num_vertices
temp_data[16]=self.num_faces
temp_data[17]=self.num_frames
temp_data[18]=self.synctype
temp_data[19]=self.flags
temp_data[20]=float(self.size)
data=struct.pack(self.binary_format, temp_data[0],temp_data[1],temp_data[2],temp_data[3],temp_data[4],temp_data[5],temp_data[6],temp_data[7],temp_data[8],temp_data[9],temp_data[10],temp_data[11],temp_data[12],temp_data[13],temp_data[14],temp_data[15],temp_data[16],temp_data[17],temp_data[18],temp_data[19],temp_data[20])
file.write(data)
#write the skin data
for skin in self.skins:
skin.save(file)
#save the texture coordinates
for tex_coord in self.tex_coords:
tex_coord.save(file)
#save the face info
for face in self.faces:
face.save(file)
#save the frames
for frame in self.frames:
frame.save(file)
for vert in frame.vertices:
vert.save(file)
def dump (self):
print "Header Information"
print "ident: ", self.ident
print "version: ", self.version
print "scale x: ", self.scale[0]
print "scale y: ", self.scale[1]
print "scale z: ", self.scale[2]
print "offset x: ", self.translate[0]
print "offset y: ", self.translate[1]
print "offset z: ", self.translate[2]
print "boundingradius: ",self.boundingradius
print "eyeposition x: ", self.eyeposition[0]
print "eyeposition y: ", self.eyeposition[1]
print "eyeposition z: ", self.eyeposition[2]
print "number of skins: ", self.num_skins
print "skin width: ", self.skin_width
print "skin height: ", self.skin_height
print "number of vertices: ", self.num_vertices
print "number of faces: ", self.num_faces
print "number of frames: ", self.num_frames
print "synctype: ", self.synctype
print "flags: ", self.flags
print "size: ", self.size
print ""
######################################################
# Validation
######################################################
def FindColorIndex(r, g, b):
for i in range(0,256):
if ((COLORMAP[i][0] == r) & (COLORMAP[i][1] == g) & (COLORMAP[i][2] == b)):
return i
return -1
def validation(object):
global user_frame_list
if object.getEuler('worldspace')!=Blender.Mathutils.Euler(0.0,0.0,0.0):
print "object.rot: ", object.getEuler('worldspace')
object.setEuler([0.0,0.0,0.0])
print "Object is rotated-You should rotate the mesh verts, not the object"
result=Blender.Draw.PupMenu("Object is rotated-You should rotate the mesh verts, not the object-fixing for you")
#get access to the mesh data
mesh=object.getData(False, True) #get the object (not just name) and the Mesh, not NMesh
#check it's composed of only tri's
result=0
for face in mesh.faces:
if len(face.verts)!=3:
#select the face for future triangulation
face.sel=1
if result==0: #first time we have this problem, don't pop-up a window every time it finds a quad
print "Model not made entirely of triangles"
result=Blender.Draw.PupMenu("Model not made entirely out of Triangles-Convert?%t|YES|NO")
#triangulate or quit
if result==1:
#selecting face mode
Blender.Mesh.Mode(3)
editmode = Window.EditMode() # are we in edit mode? If so ...
if editmode: Window.EditMode(0) # leave edit mode before getting the mesh
mesh.quadToTriangle(0) #use closest verticies in breaking a quad
elif result==2:
return False #user will fix (I guess)
#check it has UV coordinates
if mesh.vertexUV==True:
print "Vertex UV not supported"
result=Blender.Draw.PupMenu("Vertex UV not suppored-Use Sticky UV%t|Quit")
return False
elif mesh.faceUV==True:
for face in mesh.faces:
if(len(face.uv)==3):
pass
else:
print "Model's vertices do not all have UV"
result=Blender.Draw.PupMenu("Model's vertices do not all have UV%t|Quit")
return False
else:
print "Model does not have UV (face or vertex)"
result=Blender.Draw.PupMenu("Model does not have UV (face or vertex)%t|Quit")
return False
#check it has an associated texture map
last_face=""
last_face=mesh.faces[0].image
if last_face=="":
print "Model does not have a texture Map"
result=Blender.Draw.PupMenu("Model does not have a texture Map%t|Quit")
return False
#check if each face uses the same texture map (only one allowed)
for face in mesh.faces:
mesh_image=face.image
if not mesh_image:
print "Model has a face without a texture Map"
result=Blender.Draw.PupMenu("Model has a face without a texture Map%t|Quit")
return False
if mesh_image!=last_face:
print "Model has more than 1 texture map assigned"
result=Blender.Draw.PupMenu("Model has more than 1 texture map assigned%t|Quit")
return False
size=mesh_image.getSize()
#is this really what the user wants
# if (size[0]!=256 or size[1]!=256):
# print "Texture map size is non-standard (not 256x256), it is: ",size[0],"x",size[1]
# result=Blender.Draw.PupMenu("Texture map size is non-standard (not 256x256), it is: "+str(size[0])+"x"+str(size[1])+": Continue?%t|YES|NO")
# if(result==2):
# return False
print "Texture map size is: ",size[0],"x",size[1]
global skin_data
p = size[0]*size[1]
warned = 0
skin_data = [0]*p
for i in range(0,size[0]):
for j in range (0,size[1]):
pixel = mesh_image.getPixelI(i,j)
color_index = FindColorIndex(pixel[0],pixel[1],pixel[2])
if color_index == -1:
skin_data[i*size[1]+j] = 0
if warned == 0:
print "Texture is not in Q1 palette"
warned = 1
else:
skin_data[i*size[1]+j] = color_index
#verify frame list data
user_frame_list=get_frame_list()
temp=user_frame_list[len(user_frame_list)-1]
temp_num_frames=temp[2]
#verify tri/vert/frame counts are within MDL standard
face_count=len(mesh.faces)
vert_count=len(mesh.verts)
frame_count=temp_num_frames
if face_count>MDL_MAX_TRIANGLES:
print "Number of triangles exceeds MDL standard: ", face_count,">",MDL_MAX_TRIANGLES
result=Blender.Draw.PupMenu("Number of triangles exceeds MDL standard: Continue?%t|YES|NO")
if(result==2):
return False
if vert_count>MDL_MAX_VERTICES:
print "Number of verticies exceeds MDL standard: ",vert_count,">",MDL_MAX_VERTICES
result=Blender.Draw.PupMenu("Number of verticies exceeds MDL standard: Continue?%t|YES|NO")
if(result==2):
return False
if frame_count>MDL_MAX_FRAMES:
print "Number of frames exceeds MDL standard: ",frame_count,">",MDL_MAX_FRAMES
result=Blender.Draw.PupMenu("Number of frames exceeds MDL standard: Continue?%t|YES|NO")
if(result==2):
return False
#model is OK
return True
######################################################
# Fill MDL data structure
######################################################
def fill_mdl(mdl, object):
#global defines
global user_frame_list
# global g_texture_path
global g_fixuvs
global g_flags
Blender.Window.DrawProgressBar(0.25,"Filling MDL Data")
#get a Mesh, not NMesh
mesh=object.getData(False, True)
#load up some intermediate data structures
tex_list={}
vt_list={}
backlist={}
tex_count=0
#create the vertex list from the first frame
Blender.Set("curframe", 1)
#header information
mdl.ident=1330660425
mdl.version=6
mdl.num_faces=len(mesh.faces)
#get the skin information
#use the first faces' image for the texture information
mesh_image=mesh.faces[0].image
size=mesh_image.getSize()
mdl.skin_width=size[0]
mdl.skin_height=size[1]
mdl.num_skins=1
#add a skin node to the mdl data structure
mdl.skins.append(mdl_skin())
mdl.skins[0].group = 0
mdl.skins[0].dim[0]=size[0]
mdl.skins[0].dim[1]=size[1]
#put texture information in the mdl structure
#build UV coord dictionary (prevents double entries-saves space)
for face in mesh.faces:
for i in range(0,3):
t=(face.uv[i])
vt=(face.verts[i].co)
#vertices are merged if occupy same uv coords AND same space in frame 1
#this might cause undesired merging if models are not carefully designed
#would take far too long to check every vertex in every frame
tex_key=(t[0],t[1],vt[0],vt[1],vt[2])
if not tex_list.has_key(tex_key):
tex_list[tex_key]=tex_count
#add a dictionary here of entries for the vertex set
vt_list[face.index, i]=tex_count
backlist[tex_count] = face.verts[i].index
tex_count+=1
else:
vt_list[face.index, i]=tex_list[tex_key]
backlist[tex_count] = face.verts[i].index
mdl.num_vertices=tex_count
for this_tex in range (0, mdl.num_vertices):
mdl.tex_coords.append(mdl_tex_coord())
for coord, index in tex_list.iteritems():
mdl.tex_coords[index].u=floor(coord[0]*mdl.skin_width)
mdl.tex_coords[index].v=floor((1-coord[1])*mdl.skin_height)
if g_fixuvs.val == 1: #shift them
while mdl.tex_coords[index].u < 0:
mdl.tex_coords[index].u = mdl.tex_coords[index].u + mdl.skin_width
while mdl.tex_coords[index].u >= mdl.skin_width:
mdl.tex_coords[index].u = mdl.tex_coords[index].u - mdl.skin_width
while mdl.tex_coords[index].v < 0:
mdl.tex_coords[index].v = mdl.tex_coords[index].v + mdl.skin_height
while mdl.tex_coords[index].v >= mdl.skin_height:
mdl.tex_coords[index].v = mdl.tex_coords[index].v - mdl.skin_height
elif g_fixuvs.val == 2: #clamp them
if mdl.tex_coords[index].u < 0:
mdl.tex_coords[index].u = 0
if mdl.tex_coords[index].u >= mdl.skin_width:# mdl.skin_width:
mdl.tex_coords[index].u = mdl.skin_width - 1
#print "vertex ", index, " clamped"
if mdl.tex_coords[index].v < 0:
mdl.tex_coords[index].v = 0
if mdl.tex_coords[index].v >= mdl.skin_height:
mdl.tex_coords[index].v = mdl.skin_height - 1
#print "vertex ", index, " clamped"
#put faces in the mdl structure
#for each face in the model
for this_face in range(0, mdl.num_faces):
mdl.faces.append(mdl_face())
for i in range(0,3):
#blender uses indexed vertexes so this works very well
mdl.faces[this_face].vertex_index[i]=vt_list[mesh.faces[this_face].index, i]
#get the frame list
user_frame_list=get_frame_list()
if user_frame_list=="default":
mdl.num_frames=10
else:
temp=user_frame_list[len(user_frame_list)-1] #last item
mdl.num_frames=temp[2] #last frame number
progress=0.5
progressIncrement=0.25/mdl.num_frames
# set global scale and translation points
# maybe add integer options
mesh_min_x=100000.0
mesh_max_x=-100000.0
mesh_min_y=100000.0
mesh_max_y=-100000.0
mesh_min_z=100000.0
mesh_max_z=-100000.0
for frame_counter in range(0,mdl.num_frames):
Blender.Set("curframe", frame_counter+1) #set blender to the correct frame
mesh.getFromObject(object.name, 1, 0) #update the mesh to make verts current
for face in mesh.faces:
for vert in face.verts:
if mesh_min_x>vert.co[1]: mesh_min_x=vert.co[1]
if mesh_max_x<vert.co[1]: mesh_max_x=vert.co[1]
if mesh_min_y>vert.co[0]: mesh_min_y=vert.co[0]
if mesh_max_y<vert.co[0]: mesh_max_y=vert.co[0]
if mesh_min_z>vert.co[2]: mesh_min_z=vert.co[2]
if mesh_max_z<vert.co[2]: mesh_max_z=vert.co[2]
mesh_scale_x=(mesh_max_x-mesh_min_x)/255
mesh_scale_y=(mesh_max_y-mesh_min_y)/255
mesh_scale_z=(mesh_max_z-mesh_min_z)/255
mdl.scale[0] = mesh_scale_x
mdl.scale[1] = mesh_scale_y
mdl.scale[2] = mesh_scale_z
mdl.translate[0] = mesh_min_x
mdl.translate[1] = mesh_min_y
mdl.translate[2] = mesh_min_z
#fill in each frame with frame info and all the vertex data for that frame
for frame_counter in range(0,mdl.num_frames):
progress+=progressIncrement
Blender.Window.DrawProgressBar(progress, "Calculating Frame: "+str(frame_counter+1))
#add a frame
mdl.frames.append(mdl_frame())
#update the mesh objects vertex positions for the animation
Blender.Set("curframe", frame_counter+1) #set blender to the correct frame
mesh.getFromObject(object.name, 1, 0) #update the mesh to make verts current
frame_min_x=100000
frame_max_x=-100000
frame_min_y=100000
frame_max_y=-100000
frame_min_z=100000
frame_max_z=-100000
#now for the vertices
for vert_counter in range(0, mdl.num_vertices):
#add a vertex to the mdl structure
mdl.frames[frame_counter].vertices.append(mdl_point())
#figure out the new coords based on scale and transform
#then translates the point so it's not less than 0
#then scale it so it's between 0..255
current_vertex = backlist[vert_counter]
vert = mesh.verts[current_vertex]
# scale
# x coord needs flipping
new_x=255-int((vert.co[1]-mesh_min_x)/mesh_scale_x)
new_y=int((vert.co[0]-mesh_min_y)/mesh_scale_y)
new_z=int((vert.co[2]-mesh_min_z)/mesh_scale_z)
# bbox stuff
if frame_min_x>new_x: frame_min_x=new_x
if frame_max_x<new_x: frame_max_x=new_x
if frame_min_y>new_y: frame_min_y=new_y
if frame_max_y<new_y: frame_max_y=new_y
if frame_min_z>new_z: frame_min_z=new_z
if frame_max_z<new_z: frame_max_z=new_z
#put them in the structure
mdl.frames[frame_counter].vertices[vert_counter].vertices=(new_x, new_y, new_z)
#need to add the lookup table check here
maxdot = -999999.0;
maxdotindex = -1;
for j in range(0,162):
x1=-mesh.verts[current_vertex].no[1]
y1=mesh.verts[current_vertex].no[0]
z1=mesh.verts[current_vertex].no[2]
dot = (x1*MDL_NORMALS[j][0]+
y1*MDL_NORMALS[j][1]+
z1*MDL_NORMALS[j][2]);
if (dot > maxdot):
maxdot = dot;
maxdotindex = j;
mdl.frames[frame_counter].vertices[vert_counter].lightnormalindex=maxdotindex
del maxdot, maxdotindex
del new_x, new_y, new_z
mdl.frames[frame_counter].bboxmin[0] = frame_min_x
mdl.frames[frame_counter].bboxmax[0] = frame_max_x
mdl.frames[frame_counter].bboxmin[1] = frame_min_y
mdl.frames[frame_counter].bboxmax[1] = frame_max_y
mdl.frames[frame_counter].bboxmin[2] = frame_min_z
mdl.frames[frame_counter].bboxmax[2] = frame_max_z
del frame_min_x,frame_max_x,frame_min_y,frame_max_y,frame_min_z,frame_max_z
#output all the frame names-user_frame_list is loaded during the validation
for frame_set in user_frame_list:
for counter in range(frame_set[1]-1, frame_set[2]):
mdl.frames[counter].name=frame_set[0]+str(counter-frame_set[1]+2)
ofs = object.getLocation('worldspace')
sc = object.getSize('worldspace')
# Rather than warn about these things, just apply the transformations they indicate
mdl.scale[0] = mdl.scale[0] * sc[1] * g_scale.val
mdl.scale[1] = mdl.scale[1] * sc[0] * g_scale.val
mdl.scale[2] = mdl.scale[2] * sc[2] * g_scale.val
mdl.translate[0] = mdl.translate[0] + ofs[1]
mdl.translate[1] = mdl.translate[1] + ofs[0]
mdl.translate[2] = mdl.translate[2] + ofs[2]
mdl.boundingradius = (mesh_max_x-mesh_min_x+mesh_max_y-mesh_min_y+mesh_max_z-mesh_min_z)/2
# a crude approximation, but when is this used?
mdl.eyeposition[0] = 0
mdl.eyeposition[1] = 0
mdl.eyeposition[2] = mesh_min_z #ground plane for QMe
mdl.synctype = 1
mdl.flags = g_flags.val
mdl.size = 10.0 #unused?
######################################################
# Get Frame List
######################################################
def get_frame_list():
global g_frame_filename
frame_list=[]
if g_frame_filename.val=="default":
return MDL_FRAME_NAME_LIST
else:
#check for file
if (Blender.sys.exists(g_frame_filename.val)==1):
#open file and read it in
file=open(g_frame_filename.val,"r")
lines=file.readlines()
file.close()
#check header (first line)
if lines[0].strip()<>"# MDL Frame Name List":
print "its not a valid file"
result=Blender.Draw.PupMenu("This is not a valid frame definition file-using default%t|OK")
return MDL_FRAME_NAME_LIST
else:
#read in the data
num_frames=0
for counter in range(1, len(lines)):
current_line=lines[counter].strip()
if current_line[0]=="#":
#found a comment
pass
else:
data=current_line.split()
frame_list.append([data[0],num_frames+1, num_frames+int(data[1])])
num_frames+=int(data[1])
return frame_list
else:
print "Cannot find file"
result=Blender.Draw.PupMenu("Cannot find frame definion file-using default%t|OK")
return MDL_FRAME_NAME_LIST
######################################################
# Save MDL Format
######################################################
def save_mdl(filename):
print ""
print "***********************************"
print "MDL Export"
print "***********************************"
print ""
Blender.Window.DrawProgressBar(0.0,"Beginning MDL Export")
mdl=mdl_obj() #blank mdl object to save
#get the object
mesh_objs = Blender.Object.GetSelected()
#check there is a blender object selected
if len(mesh_objs)==0:
print "Fatal Error: Must select a mesh to output as MDL"
print "Found nothing"
result=Blender.Draw.PupMenu("Must select an object to export%t|OK")
return
mesh_obj=mesh_objs[0] #this gets the first object (should be only one)
#check if it's a mesh object
if mesh_obj.getType()!="Mesh":
print "Fatal Error: Must select a mesh to output as MDL"
print "Found: ", mesh_obj.getType()
result=Blender.Draw.PupMenu("Selected Object must be a mesh to output as MDL%t|OK")
return
ok=validation(mesh_obj)
if ok==False:
return
fill_mdl(mdl, mesh_obj)
mdl.dump()
Blender.Window.DrawProgressBar(1.0, "Writing to Disk")
#actually write it to disk
file=open(filename,"wb")
mdl.save(file)
file.close()
#cleanup
mdl=0
print "Closed the file"
|
atphalix/eviltoys
|
tools/myscripts/mdl_export.py
|
Python
|
gpl-2.0
| 38,523 | 0.052384 |
import os
import unicodedata
from tendrl.commons.event import Event
from tendrl.commons.message import ExceptionMessage
from tendrl.commons.utils import cmd_utils
from tendrl.commons.utils import etcd_utils
from tendrl.commons.utils import log_utils as logger
def sync():
try:
_keep_alive_for = int(NS.config.data.get("sync_interval", 10)) + 250
disks = get_node_disks()
disk_map = {}
for disk in disks:
# Creating dict with disk name as key and disk_id as value
# It will help populate block device disk_id attribute
_map = dict(disk_id=disks[disk]['disk_id'], ssd=False)
disk_map[disks[disk]['disk_name']] = _map
block_devices = get_node_block_devices(disk_map)
for disk in disks:
if disk_map[disks[disk]['disk_name']]:
disks[disk]['ssd'] = disk_map[disks[disk][
'disk_name']]['ssd']
if "virtio" in disks[disk]["driver"]:
# Virtual disk
NS.tendrl.objects.VirtualDisk(**disks[disk]).save(
ttl=_keep_alive_for
)
else:
# physical disk
NS.tendrl.objects.Disk(**disks[disk]).save(ttl=_keep_alive_for)
for device in block_devices['all']:
NS.tendrl.objects.BlockDevice(**device).save(ttl=_keep_alive_for)
for device_id in block_devices['used']:
etcd_utils.write(
"nodes/%s/LocalStorage/BlockDevices/used/%s" %
(NS.node_context.node_id,
device_id.replace("/", "_").replace("_", "", 1)),
device_id, ttl=_keep_alive_for
)
for device_id in block_devices['free']:
etcd_utils.write(
"nodes/%s/LocalStorage/BlockDevices/free/%s" %
(NS.node_context.node_id,
device_id.replace("/", "_").replace("_", "", 1)),
device_id, ttl=_keep_alive_for
)
raw_reference = get_raw_reference()
etcd_utils.write(
"nodes/%s/LocalStorage/DiskRawReference" %
NS.node_context.node_id,
raw_reference,
ttl=_keep_alive_for,
)
except(Exception, KeyError) as ex:
_msg = "node_sync disks sync failed: " + ex.message
Event(
ExceptionMessage(
priority="error",
publisher=NS.publisher_id,
payload={"message": _msg,
"exception": ex}
)
)
def get_node_disks():
disks, disks_map, err = get_disk_details()
if not err:
cmd = cmd_utils.Command('hwinfo --partition')
out, err, rc = cmd.run()
if not err:
for partitions in out.split('\n\n'):
devlist = {"hardware_id": "",
"parent_hardware_id": "",
"sysfs_id": "",
"hardware_class": "",
"model": "",
"partition_name": "",
"device_files": "",
"config_status": "",
}
for partition in partitions.split('\n'):
key = partition.split(':')[0]
if key.strip() == "Unique ID":
devlist["hardware_id"] = \
partition.split(':')[1].lstrip()
if key.strip() == "Parent ID":
devlist["parent_hardware_id"] = \
partition.split(':')[1].lstrip()
if key.strip() == "SysFS ID":
devlist["sysfs_id"] = \
partition.split(':')[1].lstrip()
if key.strip() == "Hardware Class":
devlist["hardware_class"] = \
partition.split(':')[1].lstrip()
if key.strip() == "Model":
devlist["model"] = \
partition.split(':')[1].lstrip().replace('"', "")
if key.strip() == "Device File":
_name = partition.split(':')[1].lstrip()
devlist["partition_name"] = \
"".join(_name.split(" ")[0])
if key.strip() == "Device Files":
devlist["device_files"] = \
partition.split(':')[1].lstrip()
if key.strip() == "Config Status":
devlist["config_status"] = \
partition.split(':')[1].lstrip()
# checking if partition parent id is in collected
# disk_ids or not
if devlist["parent_hardware_id"] in disks_map:
part_name = devlist["partition_name"]
parent = disks_map[devlist["parent_hardware_id"]]
disks[parent]["partitions"][part_name] = devlist
return disks
def get_disk_details():
disks = {}
disks_map = {}
cmd = cmd_utils.Command('hwinfo --disk')
out, err, rc = cmd.run()
if not err:
out = unicodedata.normalize('NFKD', out).encode('utf8', 'ignore') \
if isinstance(out, unicode) \
else unicode(out, errors="ignore").encode('utf8')
for all_disks in out.split('\n\n'):
devlist = {"disk_id": "",
"hardware_id": "",
"parent_id": "",
"disk_name": "",
"sysfs_id": "",
"sysfs_busid": "",
"sysfs_device_link": "",
"hardware_class": "",
"model": "",
"vendor": "",
"device": "",
"rmversion": "",
"serial_no": "",
"driver_modules": "",
"driver": "",
"device_files": "",
"device_number": "",
"bios_id": "",
"geo_bios_edd": "",
"geo_logical": "",
"size": "",
"size_bios_edd": "",
"geo_bios_legacy": "",
"config_status": "",
"partitions": {}
}
for disk in all_disks.split('\n'):
key = disk.split(':')[0]
if key.strip() == "Unique ID":
devlist["hardware_id"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "Parent ID":
devlist["parent_id"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "SysFS ID":
devlist["sysfs_id"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "SysFS BusID":
devlist["sysfs_busid"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "SysFS Device Link":
devlist["sysfs_device_link"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "Hardware Class":
devlist["hardware_class"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "Model":
devlist["model"] = \
disk.split(':')[1].lstrip().replace('"', "")
elif key.strip() == "Vendor":
devlist["vendor"] = \
disk.split(':')[1].replace(" ", "").replace('"', "")
elif key.strip() == "Device":
devlist["device"] = \
disk.split(':')[1].replace(" ", "").replace('"', "")
elif key.strip() == "Revision":
devlist["rmversion"] = \
disk.split(':')[1].lstrip().replace('"', "")
elif key.strip() == "Serial ID":
devlist["serial_no"] = \
disk.split(':')[1].replace(" ", "").replace('"', "")
elif key.strip() == "Driver":
devlist["driver"] = \
disk.split(':')[1].lstrip().replace('"', "")
elif key.strip() == "Driver Modules":
devlist["driver_modules"] = \
disk.split(':')[1].lstrip().replace('"', "")
elif key.strip() == "Device File":
_name = disk.split(':')[1].lstrip()
devlist["disk_name"] = \
"".join(_name.split(" ")[0])
elif key.strip() == "Device Files":
devlist["device_files"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "Device Number":
devlist["device_number"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "BIOS id":
devlist["bios_id"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "Geometry (Logical)":
devlist["geo_logical"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "Capacity":
devlist["size"] = \
disk.split('(')[1].split()[0]
elif key.strip() == "Geometry (BIOS EDD)":
devlist["geo_bios_edd"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "Size (BIOS EDD)":
devlist["size_bios_edd"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "Geometry (BIOS Legacy)":
devlist["geo_bios_legacy"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "Config Status":
devlist["config_status"] = \
disk.split(':')[1].lstrip()
if ("virtio" in devlist["driver"] and
"by-id/virtio" in devlist['device_files']):
# split from:
# /dev/vdc, /dev/disk/by-id/virtio-0200f64e-5892-40ee-8,
# /dev/disk/by-path/virtio-pci-0000:00:08.0
for entry in devlist['device_files'].split(','):
if "by-id/virtio" in entry:
devlist['disk_id'] = entry.split('/')[-1]
break
elif "VMware" in devlist["vendor"]:
devlist["disk_id"] = \
"{vendor}_{device}_{parent_id}_{hardware_id}".format(**devlist)
elif (devlist["vendor"] != "" and
devlist["device"] != "" and
devlist["serial_no"] != ""):
devlist["disk_id"] = (devlist["vendor"] + "_" +
devlist["device"] + "_" + devlist[
"serial_no"])
else:
devlist['disk_id'] = devlist['disk_name']
if devlist["disk_id"] in disks.keys():
# Multipath is like multiple I/O paths between
# server nodes and storage arrays into a single device
# If single device is connected with more than one path
# then hwinfo and lsblk will give same device details with
# different device names. To avoid this duplicate entry,
# If multiple devices exists with same disk_id then
# device_name which is lower in alphabetical order is stored.
# It will avoid redundacy of disks and next sync it will
# make sure same device detail is populated
if devlist["disk_name"] < disks[
devlist['disk_id']]['disk_name']:
disks[devlist["disk_id"]] = devlist
disks_map[devlist['hardware_id']] = devlist["disk_id"]
else:
disks[devlist["disk_id"]] = devlist
disks_map[devlist['hardware_id']] = devlist["disk_id"]
return disks, disks_map, err
def get_node_block_devices(disks_map):
block_devices = dict(all=list(), free=list(), used=list())
columns = 'NAME,KNAME,PKNAME,MAJ:MIN,FSTYPE,MOUNTPOINT,LABEL,' \
'UUID,RA,RO,RM,SIZE,STATE,OWNER,GROUP,MODE,ALIGNMENT,' \
'MIN-IO,OPT-IO,PHY-SEC,LOG-SEC,ROTA,SCHED,RQ-SIZE,' \
'DISC-ALN,DISC-GRAN,DISC-MAX,DISC-ZERO,TYPE'
keys = columns.split(',')
lsblk = (
"lsblk --all --bytes --noheadings --output='%s' --path --raw" %
columns)
cmd = cmd_utils.Command(lsblk)
out, err, rc = cmd.run()
if not err:
out = unicodedata.normalize('NFKD', out).encode('utf8', 'ignore') \
if isinstance(out, unicode) \
else unicode(out, errors="ignore").encode('utf8')
devlist = map(
lambda line: dict(zip(keys, line.split(' '))),
out.splitlines())
all_parents = []
parent_ids = []
multipath = {}
for dev_info in devlist:
device = dict()
device['device_name'] = dev_info['NAME']
device['device_kernel_name'] = dev_info['KNAME']
device['parent_name'] = dev_info['PKNAME']
device['major_to_minor_no'] = dev_info['MAJ:MIN']
device['fstype'] = dev_info['FSTYPE']
device['mount_point'] = dev_info['MOUNTPOINT']
device['label'] = dev_info['LABEL']
device['fsuuid'] = dev_info['UUID']
device['read_ahead'] = dev_info['RA']
if dev_info['RO'] == '0':
device['read_only'] = False
else:
device['read_only'] = True
if dev_info['RM'] == '0':
device['removable_device'] = False
else:
device['removable_device'] = True
device['size'] = dev_info['SIZE']
device['state'] = dev_info['STATE']
device['owner'] = dev_info['OWNER']
device['group'] = dev_info['GROUP']
device['mode'] = dev_info['MODE']
device['alignment'] = dev_info['ALIGNMENT']
device['min_io_size'] = dev_info['MIN-IO']
device['optimal_io_size'] = dev_info['OPT-IO']
device['phy_sector_size'] = dev_info['PHY-SEC']
device['log_sector_size'] = dev_info['LOG-SEC']
device['device_type'] = dev_info['TYPE']
device['scheduler_name'] = dev_info['SCHED']
device['req_queue_size'] = dev_info['RQ-SIZE']
device['discard_align_offset'] = dev_info['DISC-ALN']
device['discard_granularity'] = dev_info['DISC-GRAN']
device['discard_max_bytes'] = dev_info['DISC-MAX']
device['discard_zeros_data'] = dev_info['DISC-ZERO']
device['rotational'] = dev_info['ROTA']
if dev_info['TYPE'] == 'disk':
device['ssd'] = is_ssd(dev_info['ROTA'])
else:
device['ssd'] = False
if dev_info['TYPE'] == 'part':
device['used'] = True
# if partition is under multipath then parent of multipath
# is assigned
if dev_info['PKNAME'] in multipath.keys():
dev_info['PKNAME'] = multipath[dev_info['PKNAME']]
if dev_info['PKNAME'] in disks_map.keys():
device['disk_id'] = disks_map[
dev_info['PKNAME']]['disk_id']
block_devices['all'].append(device)
block_devices['used'].append(device['device_name'])
if dev_info['TYPE'] == 'disk':
if dev_info['NAME'] in disks_map.keys():
device['disk_id'] = disks_map[dev_info['NAME']]['disk_id']
disks_map[dev_info['NAME']]['ssd'] = device['ssd']
all_parents.append(device)
if dev_info['TYPE'] == 'mpath':
multipath[device['device_kernel_name']] = dev_info['PKNAME']
else:
if dev_info['PKNAME'] in multipath.keys():
dev_info['PKNAME'] = multipath[dev_info['PKNAME']]
parent_ids.append(dev_info['PKNAME'])
for parent in all_parents:
if parent['device_name'] in parent_ids:
parent['used'] = True
block_devices['used'].append(parent['device_name'])
else:
parent['used'] = False
block_devices['free'].append(parent['device_name'])
block_devices['all'].append(parent)
else:
logger.log(
"debug",
NS.publisher_id,
{"message": err}
)
return block_devices
def get_raw_reference():
base_path = '/dev/disk/'
paths = os.listdir(base_path)
raw_reference = {}
for path in paths:
raw_reference[path] = []
full_path = base_path + path
cmd = cmd_utils.Command("ls -l %s" % full_path)
out, err, rc = cmd.run()
if not err:
out = unicodedata.normalize('NFKD', out).encode('utf8', 'ignore') \
if isinstance(out, unicode) \
else unicode(out, errors="ignore").encode('utf8')
count = 0
for line in out.split('\n'):
if count == 0:
# to skip first line
count = count + 1
continue
line = line.replace(" ", " ")
raw_reference[path].append(line.split(' ', 8)[-1])
else:
logger.log(
"debug",
NS.publisher_id,
{"message": err}
)
return raw_reference
def is_ssd(rotational):
if rotational == '0':
return True
if rotational == '1':
return False
"""Rotational attribute not found for
this device which is not either SSD or HD
"""
return False
|
Tendrl/node_agent
|
tendrl/node_agent/node_sync/disk_sync.py
|
Python
|
lgpl-2.1
| 18,338 | 0.000327 |
# This is your "setup.py" file.
# See the following sites for general guide to Python packaging:
# * `The Hitchhiker's Guide to Packaging <http://guide.python-distribute.org/>`_
# * `Python Project Howto <http://infinitemonkeycorps.net/docs/pph/>`_
from setuptools import setup, find_packages
import sys
import os
#from Cython.Build import cythonize
from setuptools.extension import Extension
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md'), "rb").read().decode("utf-8")
NEWS = open(os.path.join(here, 'NEWS.rst')).read()
version = '0.1'
install_requires = [
# List your project dependencies here.
# For more details, see:
# http://packages.python.org/distribute/setuptools.html#declaring-dependencies
# Packages with fixed versions
# "<package1>==0.1",
# "<package2>==0.3.0",
# "nose", "coverage" # Put it here.
]
tests_requires = [
# List your project testing dependencies here.
]
dev_requires = [
# List your project development dependencies here.\
]
dependency_links = [
# Sources for some fixed versions packages
#'https://github.com/<user1>/<package1>/archive/master.zip#egg=<package1>-0.1',
#'https://github.com/<user2>/<package2>/archive/master.zip#egg=<package2>-0.3.0',
]
# Cython extension
# TOP_DIR="/home/eugeneai/Development/codes/NLP/workprog/tmp/link-grammar"
# LG_DIR="link-grammar"
# LG_LIB_DIR=os.path.join(TOP_DIR,LG_DIR,".libs")
# LG_HEADERS=os.path.join(TOP_DIR)
ext_modules = [
# Extension("isu.aquarium.cython_module",
# sources=["src/./isu.aquarium/cython_module.pyx"],
# libraries=["gdal"],
# )
]
setup(
name='isu.aquarium',
version=version,
description="Document organizing WEB-system",
long_description=README + '\n\n' + NEWS,
# Get classifiers from http://pypi.python.org/pypi?%3Aaction=list_classifiers
# classifiers=[c.strip() for c in """
# Development Status :: 4 - Beta
# License :: OSI Approved :: MIT License
# Operating System :: OS Independent
# Programming Language :: Python :: 2.6
# Programming Language :: Python :: 2.7
# Programming Language :: Python :: 3
# Topic :: Software Development :: Libraries :: Python Modules
# """.split('\n') if c.strip()],
# ],
keywords='WEB Semantics JavaScript',
author='Evgeny Cherkashin',
author_email='eugeneai@irnok.net',
url='https://github.com/sergeeva-olga/decree-server',
license='GPL>=2',
packages=find_packages("src"),
package_dir={'': "src"},
namespace_packages=['isu'],
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
dependency_links=dependency_links,
extras_require={
'tests': tests_requires,
'dev': dev_requires,
},
test_suite='tests',
entry_points="""\
[paste.app_factory]
main=isu.aquarium.server:main
""",
#ext_modules = cythonize(ext_modules),
#test_suite = 'nose.collector',
# setup_requires=['nose>=1.0','Cython','coverage']
)
|
sergeeva-olga/decree-server
|
setup.py
|
Python
|
gpl-3.0
| 3,119 | 0.002886 |
from testscenarios import TestWithScenarios
import unittest
from geocode.geocode import GeoCodeAccessAPI
class GeoCodeTests(TestWithScenarios, unittest.TestCase):
scenarios = [
(
"Scenario - 1: Get latlng from address",
{
'address': "Sydney NSW",
'latlng': (-33.8674869, 151.2069902),
'method': "geocode",
}
),
(
"Scenario - 2: Get address from latlng",
{
'address': "Sydney NSW",
'latlng': (-33.8674869, 151.2069902),
'method': "address",
}
),
]
def setUp(self):
self.api = GeoCodeAccessAPI()
def test_geocode(self):
if self.method == 'geocode':
expected_address = self.address
expected_lat = self.latlng[0]
expected_lng = self.latlng[1]
geocode = self.api.get_geocode(expected_address)
self.assertAlmostEqual(geocode.lat, expected_lat, delta=5)
self.assertAlmostEqual(geocode.lng, expected_lng, delta=5)
self.assertIn(expected_address, geocode.address)
else:
expected_address = self.address
expected_lat = self.latlng[0]
expected_lng = self.latlng[1]
address = self.api.get_address(lat=expected_lat, lng=expected_lng)
self.assertIn(expected_address, address)
def tearDown(self):
pass
if __name__ == "__main__":
unittest.main()
|
saleem-latif/GeoCode
|
tests/unittest_geocode.py
|
Python
|
gpl-2.0
| 1,541 | 0 |
"""
A simple client to query a TensorFlow Serving instance.
Example:
$ python client.py \
--images IMG_0932_sm.jpg \
--num_results 10 \
--model_name inception \
--host localhost \
--port 9000 \
--timeout 10
Author: Grant Van Horn
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import time
import tfserver
def parse_args():
parser = argparse.ArgumentParser(description='Command line classification client. Sorts and prints the classification results.')
parser.add_argument('--images', dest='image_paths',
help='Path to one or more images to classify (jpeg or png).',
type=str, nargs='+', required=True)
parser.add_argument('--num_results', dest='num_results',
help='The number of results to print. Set to 0 to print all classes.',
required=False, type=int, default=0)
parser.add_argument('--model_name', dest='model_name',
help='The name of the model to query.',
required=False, type=str, default='inception')
parser.add_argument('--host', dest='host',
help='Machine host where the TensorFlow Serving model is.',
required=False, type=str, default='localhost')
parser.add_argument('--port', dest='port',
help='Port that the TensorFlow Server is listening on.',
required=False, type=int, default=9000)
parser.add_argument('--timeout', dest='timeout',
help='Amount of time to wait before failing.',
required=False, type=int, default=10)
args = parser.parse_args()
return args
def main():
args = parse_args()
# Read in the image bytes
image_data = []
for fp in args.image_paths:
with open(fp) as f:
data = f.read()
image_data.append(data)
# Get the predictions
t = time.time()
predictions = tfserver.predict(image_data, model_name=args.model_name,
host=args.host, port=args.port, timeout=args.timeout
)
dt = time.time() - t
print("Prediction call took %0.4f seconds" % (dt,))
# Process the results
results = tfserver.process_classification_prediction(predictions, max_classes=args.num_results)
# Print the results
for i, fp in enumerate(args.image_paths):
print("Results for image: %s" % (fp,))
for name, score in results[i]:
print("%s: %0.3f" % (name, score))
print()
if __name__ == '__main__':
main()
|
visipedia/tf_classification
|
tfserving/client.py
|
Python
|
mit
| 2,561 | 0.0164 |
"""A backport of the get_terminal_size function from Python 3.3's shutil."""
__title__ = "backports.shutil_get_terminal_size"
__version__ = "1.0.0"
__license__ = "MIT"
__author__ = "Christopher Rosell"
__copyright__ = "Copyright 2014 Christopher Rosell"
__all__ = ["get_terminal_size"]
from .get_terminal_size import get_terminal_size
|
nateprewitt/pipenv
|
pipenv/vendor/backports/shutil_get_terminal_size/__init__.py
|
Python
|
mit
| 338 | 0 |
from django.test import TestCase
from django.contrib.auth.models import User
from django.urls import reverse
from .models import UserProfile
from imagersite.tests import AuthenticatedTestCase
# Create your tests here.
class ProfileTestCase(TestCase):
"""TestCase for Profile"""
def setUp(self):
"""Set up User Profile"""
self.user = User(username='Cris', first_name='Cris')
self.user.save()
def test_user_has_profile(self):
"""Test User has a profile."""
self.assertTrue(hasattr(self.user, 'profile'))
def test_profile_username(self):
"""Test Profile has username"""
self.assertEqual(self.user.profile.user.username, 'Cris')
# Learn to paramertize
def test_profile_has_cameratype(self):
"""Test profile has cameria type attr."""
self.assertTrue(hasattr(self.user.profile, 'camera_type'))
def test_profile_repr(self):
"""Test repr function."""
self.assertIn('Cris', repr(self.user.profile))
def test_profile_active(self):
"""Test profile manager."""
self.assertTrue(len(UserProfile.active.all()) > 0)
class UserProfilePageTestCase(AuthenticatedTestCase):
"""Test case for viewing the profile."""
def test_profile_page(self):
self.log_in()
self.assertEqual(self.client.get('/profile/').status_code, 200)
def test_profile_page_has_username(self):
self.log_in()
self.assertIn(
self.username.encode('utf-8'),
self.client.get('/profile/').content
)
def test_profile_page_has_photo_count(self):
self.log_in()
self.assertIn(
b'Photos uploaded:',
self.client.get('/profile/').content
)
def test_profile_page_has_album_count(self):
self.log_in()
self.assertIn(b'Albums created:', self.client.get('/profile/').content)
class EditProfileTestCase(TestCase):
"""Edit profile test case."""
def setUp(self):
"""GET the route named edit_profile."""
self.user = User(username='test')
self.user.save()
self.client.force_login(self.user)
self.response = self.client.get(reverse('edit_profile'))
def test_status_code(self):
"""Test the status code for GETing edit_profile is 200."""
self.assertEqual(self.response.status_code, 200)
def test_edit_profile(self):
"""Test editing a album stores the updated value."""
new_camera_type = 'camera'
data = {
'camera_type': new_camera_type,
}
response = self.client.post(reverse('edit_profile'), data)
self.assertEqual(response.status_code, 302)
profile = UserProfile.objects.filter(user=self.user).first()
self.assertEqual(profile.camera_type, new_camera_type)
|
welliam/imagersite
|
user_profile/tests.py
|
Python
|
mit
| 2,830 | 0 |
"""
Revision ID: 0146_add_service_callback_api
Revises: 0145_add_notification_reply_to
Create Date: 2017-11-28 15:13:48.730554
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0146_add_service_callback_api'
down_revision = '0145_add_notification_reply_to'
def upgrade():
op.create_table('service_callback_api_history',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('service_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('url', sa.String(), nullable=False),
sa.Column('bearer_token', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('updated_by_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('version', sa.Integer(), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', 'version')
)
op.create_index(op.f('ix_service_callback_api_history_service_id'), 'service_callback_api_history',
['service_id'], unique=False)
op.create_index(op.f('ix_service_callback_api_history_updated_by_id'), 'service_callback_api_history',
['updated_by_id'], unique=False)
op.create_table('service_callback_api',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('service_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('url', sa.String(), nullable=False),
sa.Column('bearer_token', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('updated_by_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('version', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['service_id'], ['services.id'], ),
sa.ForeignKeyConstraint(['updated_by_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_service_callback_api_service_id'), 'service_callback_api', ['service_id'], unique=True)
op.create_index(op.f('ix_service_callback_api_updated_by_id'), 'service_callback_api', ['updated_by_id'], unique=False)
def downgrade():
op.drop_index(op.f('ix_service_callback_api_updated_by_id'), table_name='service_callback_api')
op.drop_index(op.f('ix_service_callback_api_service_id'), table_name='service_callback_api')
op.drop_table('service_callback_api')
op.drop_index(op.f('ix_service_callback_api_history_updated_by_id'), table_name='service_callback_api_history')
op.drop_index(op.f('ix_service_callback_api_history_service_id'), table_name='service_callback_api_history')
op.drop_table('service_callback_api_history')
|
alphagov/notifications-api
|
migrations/versions/0146_add_service_callback_api.py
|
Python
|
mit
| 2,779 | 0.010076 |
# -*- coding: utf-8 -*-
#
# libmypaint documentation build configuration file, created by
# sphinx-quickstart2 on Wed Jun 13 23:40:45 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Breathe setup, for integrating doxygen content
extensions.append('breathe')
doxyxml_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../doxygen')
print doxyxml_dir
breathe_projects = {"libmypaint": doxyxml_dir}
breathe_default_project = "libmypaint"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'libmypaint'
copyright = u'2012, MyPaint Development Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'libmypaintdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'libmypaint.tex', u'libmypaint Documentation',
u'MyPaint Development Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'libmypaint', u'libmypaint Documentation',
[u'MyPaint Development Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'libmypaint', u'libmypaint Documentation',
u'MyPaint Development Team', 'libmypaint', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
kragniz/mypaint
|
brushlib/doc/source/conf.py
|
Python
|
gpl-2.0
| 8,376 | 0.007283 |
# -*- coding: utf-8 -*-
"""Controllers for the pypollmanage pluggable application."""
from .root import RootController
|
tongpa/pypollmanage
|
pypollmanage/controllers/__init__.py
|
Python
|
apache-2.0
| 119 | 0.008403 |
# -*- coding: utf-8 -*-
# © 2016 Comunitea
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import api, fields, models
class GeneralLedgerReportWizard(models.TransientModel):
_inherit = "general.ledger.report.wizard"
@api.onchange('company_id')
def onchange_company_id(self):
res = super(GeneralLedgerReportWizard, self).onchange_company_id()
if self.company_id:
res['domain']['partner_ids'] = [
('is_company', '=', True)
]
return res
|
Comunitea/CMNT_00098_2017_JIM_addons
|
jim_invoice/models/general_ledger_wizard.py
|
Python
|
agpl-3.0
| 545 | 0.003676 |
# Copyright 2012 Michael Still and Canonical Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Config Drive v2 helper."""
import os
import shutil
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import units
from nova import exception
from nova.i18n import _LW
from nova.openstack.common import fileutils
from nova import utils
from nova import version
LOG = logging.getLogger(__name__)
configdrive_opts = [
cfg.StrOpt('config_drive_format',
default='iso9660',
choices=('iso9660', 'vfat'),
help='Config drive format.'),
# force_config_drive is a string option, to allow for future behaviors
# (e.g. use config_drive based on image properties)
cfg.StrOpt('force_config_drive',
choices=('always', 'True', 'False'),
help='Set to "always" to force injection to take place on a '
'config drive. NOTE: The "always" will be deprecated in '
'the Liberty release cycle.'),
cfg.StrOpt('mkisofs_cmd',
default='genisoimage',
help='Name and optionally path of the tool used for '
'ISO image creation')
]
CONF = cfg.CONF
CONF.register_opts(configdrive_opts)
# Config drives are 64mb, if we can't size to the exact size of the data
CONFIGDRIVESIZE_BYTES = 64 * units.Mi
FS_FORMAT_VFAT = 'vfat'
FS_FORMAT_ISO9660 = 'iso9660'
IMAGE_TYPE_RAW = 'raw'
IMAGE_TYPE_PLOOP = 'ploop'
class ConfigDriveBuilder(object):
"""Build config drives, optionally as a context manager."""
def __init__(self, instance_md=None):
if CONF.force_config_drive == 'always':
LOG.warning(_LW('The setting "always" will be deprecated in the '
'Liberty version. Please use "True" instead'))
self.imagefile = None
self.mdfiles = []
if instance_md is not None:
self.add_instance_metadata(instance_md)
def __enter__(self):
return self
def __exit__(self, exctype, excval, exctb):
if exctype is not None:
# NOTE(mikal): this means we're being cleaned up because an
# exception was thrown. All bets are off now, and we should not
# swallow the exception
return False
self.cleanup()
def _add_file(self, basedir, path, data):
filepath = os.path.join(basedir, path)
dirname = os.path.dirname(filepath)
fileutils.ensure_tree(dirname)
with open(filepath, 'wb') as f:
f.write(data)
def add_instance_metadata(self, instance_md):
for (path, data) in instance_md.metadata_for_config_drive():
self.mdfiles.append((path, data))
def _write_md_files(self, basedir):
for data in self.mdfiles:
self._add_file(basedir, data[0], data[1])
def _make_iso9660(self, path, tmpdir):
publisher = "%(product)s %(version)s" % {
'product': version.product_string(),
'version': version.version_string_with_package()
}
utils.execute(CONF.mkisofs_cmd,
'-o', path,
'-ldots',
'-allow-lowercase',
'-allow-multidot',
'-l',
'-publisher',
publisher,
'-quiet',
'-J',
'-r',
'-V', 'config-2',
tmpdir,
attempts=1,
run_as_root=False)
def _make_vfat(self, path, tmpdir):
# NOTE(mikal): This is a little horrible, but I couldn't find an
# equivalent to genisoimage for vfat filesystems.
with open(path, 'wb') as f:
f.truncate(CONFIGDRIVESIZE_BYTES)
utils.mkfs('vfat', path, label='config-2')
with utils.tempdir() as mountdir:
mounted = False
try:
_, err = utils.trycmd(
'mount', '-o', 'loop,uid=%d,gid=%d' % (os.getuid(),
os.getgid()),
path,
mountdir,
run_as_root=True)
if err:
raise exception.ConfigDriveMountFailed(operation='mount',
error=err)
mounted = True
# NOTE(mikal): I can't just use shutils.copytree here,
# because the destination directory already
# exists. This is annoying.
for ent in os.listdir(tmpdir):
shutil.copytree(os.path.join(tmpdir, ent),
os.path.join(mountdir, ent))
finally:
if mounted:
utils.execute('umount', mountdir, run_as_root=True)
def _make_ext4_ploop(self, path, tmpdir):
"""ploop is a disk loopback block device, that is used in
Parallels(OpenVZ) containers. It is similiar to Linux loop
device but prevents double caching of data in memory and
supports snapshots and some other effeciency benefits. Adding
ploop is a natural way to add disk device to VZ containers.
Ploop device has its own image format. It contains specific
partition table with one ext4 partition.
"""
os.mkdir(path)
utils.execute('ploop',
'init',
'-s', CONFIGDRIVESIZE_BYTES,
'-t', 'ext4',
path + '/disk.config.hds',
attempts=1,
run_as_root=True)
with utils.tempdir() as mountdir:
mounted = False
try:
_, err = utils.trycmd(
'ploop', 'mount',
'-m', mountdir,
'-t', 'ext4',
path + '/DiskDescriptor.xml',
run_as_root=True)
if os.path.exists(mountdir):
utils.execute('chown', '-R',
'%(u)d:%(g)d' % {'u': os.getuid(),
'g': os.getgid()},
mountdir,
run_as_root=True)
mounted = True
for ent in os.listdir(tmpdir):
shutil.copytree(os.path.join(tmpdir, ent),
os.path.join(mountdir, ent))
finally:
if mounted:
utils.execute('ploop', 'umount',
path + '/disk.config.hds', run_as_root=True)
def make_drive(self, path, image_type=IMAGE_TYPE_RAW):
"""Make the config drive.
:param path: the path to place the config drive image at
:param image_type: host side image format
:raises ProcessExecuteError if a helper process has failed.
"""
fs_format = CONF.config_drive_format
if fs_format is None:
if image_type == IMAGE_TYPE_RAW:
fs_format = FS_FORMAT_ISO9660
with utils.tempdir() as tmpdir:
self._write_md_files(tmpdir)
if image_type == IMAGE_TYPE_RAW:
if fs_format not in (FS_FORMAT_VFAT, FS_FORMAT_ISO9660):
raise exception.ConfigDriveUnsupportedFormat(
format=fs_format,
image_type=image_type,
image_path=path)
elif fs_format == FS_FORMAT_ISO9660:
self._make_iso9660(path, tmpdir)
elif fs_format == FS_FORMAT_VFAT:
self._make_vfat(path, tmpdir)
elif image_type == IMAGE_TYPE_PLOOP:
self._make_ext4_ploop(path, tmpdir)
else:
raise exception.ConfigDriveUnsupportedFormat(
format=fs_format,
image_type=image_type,
image_path=path)
def cleanup(self):
if self.imagefile:
fileutils.delete_if_exists(self.imagefile)
def __repr__(self):
return "<ConfigDriveBuilder: " + str(self.mdfiles) + ">"
def required_by(instance):
image_prop = utils.instance_sys_meta(instance).get(
utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive', 'optional')
if image_prop not in ['optional', 'mandatory']:
LOG.warning(_LW('Image config drive option %(image_prop)s is invalid '
'and will be ignored'),
{'image_prop': image_prop},
instance=instance)
return (instance.get('config_drive') or
'always' == CONF.force_config_drive or
strutils.bool_from_string(CONF.force_config_drive) or
image_prop == 'mandatory'
)
def update_instance(instance):
"""Update the instance config_drive setting if necessary
The image or configuration file settings may override the default instance
setting. In this case the instance needs to mirror the actual
virtual machine configuration.
"""
if not instance.config_drive and required_by(instance):
instance.config_drive = True
|
yatinkumbhare/openstack-nova
|
nova/virt/configdrive.py
|
Python
|
apache-2.0
| 9,964 | 0.000401 |
from django.contrib.admin.models import LogEntry
from django.contrib.auth.models import User, Group, Permission
from simple_history import register
from celsius.tools import register_for_permission_handling
register(User)
register(Group)
register_for_permission_handling(User)
register_for_permission_handling(Group)
register_for_permission_handling(Permission)
register_for_permission_handling(LogEntry)
|
cytex124/celsius-cloud-backend
|
src/addons/management_user/admin.py
|
Python
|
mit
| 408 | 0 |
from django.test import TestCase
from voice.models import Call
class CallModelTestCase(TestCase):
def setUp(self):
self.call = Call(sid="CAxxx",
from_number="+15558675309",
to_number="+15556667777")
self.call.save()
def test_string_representation(self):
self.assertEqual(str(self.call),
"{0}: from +15558675309 to "
"+15556667777".format(self.call.date_created))
|
RobSpectre/garfield
|
garfield/voice/tests/test_models.py
|
Python
|
mit
| 499 | 0 |
#!/usr/bin/env python
# vim: set fileencoding=utf-8
import os
import argparse
from gff3 import genes, get_gff3_id, get_rbs_from, feature_test_true, feature_lambda, feature_test_type
from cpt_gffParser import gffParse, gffWrite
from Bio import SeqIO
from jinja2 import Environment, FileSystemLoader
import logging
from math import floor
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(name="pat")
# Path to script, required because of Galaxy.
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
# Path to the HTML template for the report
def genes_all(feature_list, feature_type=["gene"], sort=False):
"""
Simple filter to extract gene features from the feature set.
"""
if not sort:
for x in feature_lambda(
feature_list, feature_test_type, {"types": feature_type}, subfeatures=True
):
yield x
else:
data = list(genes_all(feature_list, feature_type, sort=False))
data = sorted(data, key=lambda feature: feature.location.start)
for x in data:
yield x
def checkSubs(feature, qualName):
subFeats = []
res = ""
subFeats = feature.sub_features
while (len(subFeats) > 0):
for feat in subFeats:
for i in feat.qualifiers.keys():
for j in qualName:
if i == j:
if res == "":
res = feat.qualifiers[i][0]
else:
res += "; " + feat.qualifiers[i][0]
if res != "":
return res
tempFeats = []
for feat in subFeats: # Should be breadth-first results
for x in feat.sub_features:
tempFeats.append(x)
subFeats = tempFeats
return res
def annotation_table_report(record, types, wanted_cols, gaf_data, searchSubs):
getTypes = []
for x in [y.strip() for y in types.split(",")]:
getTypes.append(x)
getTypes.append("gene")
sorted_features = list(genes_all(record.features, getTypes, sort=True))
if wanted_cols is None or len(wanted_cols.strip()) == 0:
return [], []
useSubs = searchSubs
def rid(record, feature):
"""Organism ID
"""
return record.id
def id(record, feature):
"""ID
"""
return feature.id
def featureType(record, feature):
"""Type
"""
return feature.type
def name(record, feature):
"""Name
"""
for x in ["Name", "name"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["Name", "name"])
if res != "":
return res
return "None"
def start(record, feature):
"""Boundary
"""
return str(feature.location.start + 1)
def end(record, feature):
"""Boundary
"""
return str(feature.location.end)
def location(record, feature):
"""Location
"""
return str(feature.location.start + 1) + "..{0.end}".format(feature.location)
def length(record, feature):
"""CDS Length (AA)
"""
if feature.type == "CDS":
cdss = [feature]
else:
cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True))
if cdss == []:
return "None"
res = (sum([len(cds) for cds in cdss]) / 3) - 1
if floor(res) == res:
res = int(res)
return str(res)
def notes(record, feature):
"""User entered Notes"""
for x in ["Note", "note", "Notes", "notes"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["Note", "note", "Notes", "notes"])
if res != "":
return res
return "None"
def date_created(record, feature):
"""Created"""
return feature.qualifiers.get("date_creation", ["None"])[0]
def date_last_modified(record, feature):
"""Last Modified"""
res = feature.qualifiers.get("date_last_modified", ["None"])[0]
if res != "None":
return res
if useSubs:
res = checkSubs(feature, ["date_last_modified"])
if res != "":
return res
return "None"
def description(record, feature):
"""Description"""
res = feature.qualifiers.get("description", ["None"])[0]
if res != "None":
return res
if useSubs:
res = checkSubs(feature, ["description"])
if res != "":
return res
return "None"
def owner(record, feature):
"""Owner
User who created the feature. In a 464 scenario this may be one of
the TAs."""
for x in ["Owner", "owner"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["Owner", "owner"])
if res != "":
return res
return "None"
def product(record, feature):
"""Product
User entered product qualifier (collects "Product" and "product"
entries)"""
"""User entered Notes"""
for x in ["product", "Product"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["product", "Product"])
if res != "":
return res
return "None"
def note(record, feature):
"""Note
User entered Note qualifier(s)"""
return feature.qualifiers.get("Note", [])
def strand(record, feature):
"""Strand
"""
return "+" if feature.location.strand > 0 else "-"
def sd_spacing(record, feature):
"""Shine-Dalgarno spacing
"""
rbss = get_rbs_from(gene)
if len(rbss) == 0:
return "None"
else:
resp = []
for rbs in rbss:
cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True))
if len(cdss) == 0:
return "No CDS"
if rbs.location.strand > 0:
distance = min(
cdss, key=lambda x: x.location.start - rbs.location.end
)
distance_val = str(distance.location.start - rbs.location.end)
resp.append(distance_val)
else:
distance = min(
cdss, key=lambda x: x.location.end - rbs.location.start
)
distance_val = str(rbs.location.start - distance.location.end)
resp.append(distance_val)
if len(resp) == 1:
return str(resp[0])
return resp
def sd_seq(record, feature):
"""Shine-Dalgarno sequence
"""
rbss = get_rbs_from(gene)
if len(rbss) == 0:
return "None"
else:
resp = []
for rbs in rbss:
resp.append(str(rbs.extract(record).seq))
if len(resp) == 1:
return str(resp[0])
else:
return resp
def start_codon(record, feature):
"""Start Codon
"""
if feature.type == "CDS":
cdss = [feature]
else:
cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True))
data = [x for x in cdss]
if len(data) == 1:
return str(data[0].extract(record).seq[0:3])
else:
return [
"{0} ({1.location.start}..{1.location.end}:{1.location.strand})".format(
x.extract(record).seq[0:3], x
)
for x in data
]
def stop_codon(record, feature):
"""Stop Codon
"""
return str(feature.extract(record).seq[-3:])
def dbxrefs(record, feature):
"""DBxrefs
"""
"""User entered Notes"""
for x in ["Dbxref", "db_xref", "DB_xref", "DBxref", "DB_Xref", "DBXref"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
return "None"
def upstream_feature(record, feature):
"""Next gene upstream"""
if feature.strand > 0:
upstream_features = [
x for x in sorted_features if (x.location.start < feature.location.start and x.type == "gene" and x.strand == feature.strand)
]
if len(upstream_features) > 0:
foundSelf = False
featCheck = upstream_features[-1].sub_features
for x in featCheck:
if x == feature:
foundSelf = True
break
featCheck = featCheck + x.sub_features
if foundSelf:
if len(upstream_features) > 1:
return upstream_features[-2]
return None
return upstream_features[-1]
else:
return None
else:
upstream_features = [
x for x in sorted_features if (x.location.end > feature.location.end and x.type == "gene" and x.strand == feature.strand)
]
if len(upstream_features) > 0:
foundSelf = False
featCheck = upstream_features[0].sub_features
for x in featCheck:
if x == feature:
foundSelf = True
break
featCheck = featCheck + x.sub_features
if foundSelf:
if len(upstream_features) > 1:
return upstream_features[1]
return None
return upstream_features[0]
else:
return None
def upstream_feature__name(record, feature):
"""Next gene upstream"""
up = upstream_feature(record, feature)
if up:
return str(up.id)
return "None"
def ig_dist(record, feature):
"""Distance to next upstream gene on same strand"""
up = upstream_feature(record, feature)
if up:
dist = None
if feature.strand > 0:
dist = feature.location.start - up.location.end
else:
dist = up.location.start - feature.location.end
return str(dist)
else:
return "None"
def _main_gaf_func(record, feature, gaf_data, attr):
if feature.id in gaf_data:
return [x[attr] for x in gaf_data[feature.id]]
return []
def gaf_annotation_extension(record, feature, gaf_data):
"""GAF Annotation Extension
Contains cross references to other ontologies that can be used
to qualify or enhance the annotation. The cross-reference is
prefaced by an appropriate GO relationship; references to
multiple ontologies can be entered. For example, if a gene
product is localized to the mitochondria of lymphocytes, the GO
ID (column 5) would be mitochondrion ; GO:0005439, and the
annotation extension column would contain a cross-reference to
the term lymphocyte from the Cell Type Ontology.
"""
return _main_gaf_func(record, feature, gaf_data, "annotation_extension")
def gaf_aspect(record, feature, gaf_data):
"""GAF Aspect code
E.g. P (biological process), F (molecular function) or C (cellular component)
"""
return _main_gaf_func(record, feature, gaf_data, "aspect")
def gaf_assigned_by(record, feature, gaf_data):
"""GAF Creating Organisation
"""
return _main_gaf_func(record, feature, gaf_data, "assigned_by")
def gaf_date(record, feature, gaf_data):
"""GAF Creation Date
"""
return _main_gaf_func(record, feature, gaf_data, "date")
def gaf_db(record, feature, gaf_data):
"""GAF DB
"""
return _main_gaf_func(record, feature, gaf_data, "db")
def gaf_db_reference(record, feature, gaf_data):
"""GAF DB Reference
"""
return _main_gaf_func(record, feature, gaf_data, "db_reference")
def gaf_evidence_code(record, feature, gaf_data):
"""GAF Evidence Code
"""
return _main_gaf_func(record, feature, gaf_data, "evidence_code")
def gaf_go_id(record, feature, gaf_data):
"""GAF GO ID
"""
return _main_gaf_func(record, feature, gaf_data, "go_id")
def gaf_go_term(record, feature, gaf_data):
"""GAF GO Term
"""
return _main_gaf_func(record, feature, gaf_data, "go_term")
def gaf_id(record, feature, gaf_data):
"""GAF ID
"""
return _main_gaf_func(record, feature, gaf_data, "id")
def gaf_notes(record, feature, gaf_data):
"""GAF Notes
"""
return _main_gaf_func(record, feature, gaf_data, "notes")
def gaf_owner(record, feature, gaf_data):
"""GAF Creator
"""
return _main_gaf_func(record, feature, gaf_data, "owner")
def gaf_with_or_from(record, feature, gaf_data):
"""GAF With/From
"""
return _main_gaf_func(record, feature, gaf_data, "with_or_from")
cols = []
data = []
funcs = []
lcl = locals()
for x in [y.strip().lower() for y in wanted_cols.split(",")]:
if not x:
continue
if x == "type":
x = "featureType"
if x in lcl:
funcs.append(lcl[x])
# Keep track of docs
func_doc = lcl[x].__doc__.strip().split("\n\n")
# If there's a double newline, assume following text is the
# "help" and the first part is the "name". Generate empty help
# if not provided
if len(func_doc) == 1:
func_doc += [""]
cols.append(func_doc)
elif "__" in x:
chosen_funcs = [lcl[y] for y in x.split("__")]
func_doc = [
" of ".join(
[y.__doc__.strip().split("\n\n")[0] for y in chosen_funcs[::-1]]
)
]
cols.append(func_doc)
funcs.append(chosen_funcs)
for gene in genes_all(record.features, getTypes, sort=True):
row = []
for func in funcs:
if isinstance(func, list):
# If we have a list of functions, repeatedly apply them
value = gene
for f in func:
if value is None:
value = "None"
break
value = f(record, value)
else:
# Otherwise just apply the lone function
if func.__name__.startswith("gaf_"):
value = func(record, gene, gaf_data)
else:
value = func(record, gene)
if isinstance(value, list):
collapsed_value = ", ".join(value)
value = [str(collapsed_value)]#.encode("unicode_escape")]
else:
value = str(value)#.encode("unicode_escape")
row.append(value)
# print row
data.append(row)
return data, cols
def parseGafData(file):
cols = []
data = {}
# '10d04a01-5ed8-49c8-b724-d6aa4df5a98d': {
# 'annotation_extension': '',
# 'aspect': '',
# 'assigned_by': 'CPT',
# 'date': '2017-05-04T16:25:22.161916Z',
# 'db': 'UniProtKB',
# 'db_reference': 'GO_REF:0000100',
# 'evidence_code': 'ISA',
# 'gene': '0d307196-833d-46e8-90e9-d80f7a041d88',
# 'go_id': 'GO:0039660',
# 'go_term': 'structural constituent of virion',
# 'id': '10d04a01-5ed8-49c8-b724-d6aa4df5a98d',
# 'notes': 'hit was putative minor structural protein',
# 'owner': 'amarc1@tamu.edu',
# 'with_or_from': 'UNIREF90:B2ZYZ7'
# },
for row in file:
if row.startswith("#"):
# Header
cols = (
row.strip().replace("# ", "").replace("GO Term", "go_term").split("\t")
)
else:
line = row.strip().split("\t")
tmp = dict(zip(cols, line))
if "gene" not in tmp.keys():
continue
if tmp["gene"] not in data:
data[tmp["gene"]] = []
data[tmp["gene"]].append(tmp)
return data
def evaluate_and_report(
annotations,
genome,
types="gene",
reportTemplateName="phage_annotation_validator.html",
annotationTableCols="",
gafData=None,
searchSubs = False,
):
"""
Generate our HTML evaluation of the genome
"""
# Get features from GFF file
seq_dict = SeqIO.to_dict(SeqIO.parse(genome, "fasta"))
# Get the first GFF3 record
# TODO: support multiple GFF3 files.
at_table_data = []
gaf = {}
if gafData:
gaf = parseGafData(gafData)
for record in gffParse(annotations, base_dict=seq_dict):
if reportTemplateName.endswith(".html"):
record.id = record.id.replace(".", "-")
log.info("Producing an annotation table for %s" % record.id)
annotation_table_data, annotation_table_col_names = annotation_table_report(
record, types, annotationTableCols, gaf, searchSubs
)
at_table_data.append((record, annotation_table_data))
# break
# This is data that will go into our HTML template
kwargs = {
"annotation_table_data": at_table_data,
"annotation_table_col_names": annotation_table_col_names,
}
env = Environment(
loader=FileSystemLoader(SCRIPT_PATH), trim_blocks=True, lstrip_blocks=True
)
if reportTemplateName.endswith(".html"):
env.filters["nice_id"] = str(get_gff3_id).replace(".", "-")
else:
env.filters["nice_id"] = get_gff3_id
def join(listy):
return "\n".join(listy)
env.filters.update({"join": join})
tpl = env.get_template(reportTemplateName)
return tpl.render(**kwargs).encode("utf-8")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="rebase gff3 features against parent locations", epilog=""
)
parser.add_argument(
"annotations", type=argparse.FileType("r"), help="Parent GFF3 annotations"
)
parser.add_argument("genome", type=argparse.FileType("r"), help="Genome Sequence")
parser.add_argument(
"--types",
help="Select extra types to display in output (Will always include gene)",
)
parser.add_argument(
"--reportTemplateName",
help="Report template file name",
default="phageqc_report_full.html",
)
parser.add_argument(
"--annotationTableCols",
help="Select columns to report in the annotation table output format",
)
parser.add_argument(
"--gafData", help="CPT GAF-like table", type=argparse.FileType("r")
)
parser.add_argument(
"--searchSubs", help="Attempt to populate fields from sub-features if qualifier is empty", action="store_true"
)
args = parser.parse_args()
print(evaluate_and_report(**vars(args)).decode("utf-8"))
|
TAMU-CPT/galaxy-tools
|
tools/phage/phage_annotation_table.py
|
Python
|
gpl-3.0
| 19,472 | 0.004725 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo.config import cfg
from nova.api.openstack import compute
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import servers
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import db
import nova.db.api
from nova.network import manager
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests.image import fake
CONF = cfg.CONF
FAKE_UUID = fakes.FAKE_UUID
def fake_gen_uuid():
return FAKE_UUID
def return_security_group(context, instance_id, security_group_id):
pass
class SchedulerHintsTestCase(test.TestCase):
def setUp(self):
super(SchedulerHintsTestCase, self).setUp()
self.fake_instance = fakes.stub_instance(1, uuid=FAKE_UUID)
self.app = compute.APIRouterV3(init_only=('servers',
'os-scheduler-hints'))
def test_create_server_without_hints(self):
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], {})
return ([self.fake_instance], '')
self.stubs.Set(nova.compute.api.API, 'create', fake_create)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'server': {
'name': 'server_test',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavor_ref': '1',
}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_with_hints(self):
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], {'a': 'b'})
return ([self.fake_instance], '')
self.stubs.Set(nova.compute.api.API, 'create', fake_create)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {
'server': {
'name': 'server_test',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavor_ref': '1',
},
'os-scheduler-hints:scheduler_hints': {'a': 'b'},
}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_bad_hints(self):
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {
'server': {
'name': 'server_test',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavor_ref': '1',
},
'os-scheduler-hints:scheduler_hints': 'here',
}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
class ServersControllerCreateTest(test.TestCase):
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
CONF.set_override('extensions_blacklist', 'os-scheduler-hints',
'osapi_v3')
self.no_scheduler_hints_controller = servers.ServersController(
extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"config_drive": None,
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update(context, instance_uuid, params):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
def queue_get_for(context, *args):
return 'network_topic'
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_add_security_group',
return_security_group)
self.stubs.Set(db, 'project_get_networks',
project_get_networks)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(rpc, 'cast', fake_method)
self.stubs.Set(db, 'instance_update_and_get_original',
server_update)
self.stubs.Set(rpc, 'queue_get_for', queue_get_for)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
def _test_create_extra(self, params, no_image=False,
override_controller=None):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2)
if no_image:
server.pop('image_ref', None)
server.update(params)
body = dict(server=server)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
if override_controller:
server = override_controller.create(req, body).obj['server']
else:
server = self.controller.create(req, body).obj['server']
def test_create_instance_with_scheduler_hints_disabled(self):
hints = {'a': 'b'}
params = {'scheduler_hints': hints}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn('scheduler_hints', kwargs)
# self.assertEqual(kwargs['scheduler_hints'], {})
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params,
override_controller=self.no_scheduler_hints_controller)
def test_create_instance_with_scheduler_hints_enabled(self):
hints = {'a': 'b'}
params = {'scheduler_hints': hints}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], hints)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
class TestServerCreateRequestXMLDeserializer(test.TestCase):
def setUp(self):
super(TestServerCreateRequestXMLDeserializer, self).setUp()
ext_info = plugins.LoadedExtensionInfo()
controller = servers.ServersController(extension_info=ext_info)
self.deserializer = servers.CreateDeserializer(controller)
def test_request_with_scheduler_hints_and_alternate_namespace_prefix(self):
serial_request = """
<ns2:server xmlns:ns2="http://docs.openstack.org/compute/api/v3"
name="new-server-test"
image_ref="1"
flavor_ref="2">
<ns2:metadata><ns2:meta key="hello">world</ns2:meta></ns2:metadata>
<os:scheduler_hints
xmlns:os="http://docs.openstack.org/compute/ext/scheduler-hints/api/v3">
<hypervisor>xen</hypervisor>
<near>eb999657-dd6b-464e-8713-95c532ac3b18</near>
</os:scheduler_hints>
</ns2:server>
"""
request = self.deserializer.deserialize(serial_request)
expected = {
"server": {
'os-scheduler-hints:scheduler_hints': {
'hypervisor': ['xen'],
'near': ['eb999657-dd6b-464e-8713-95c532ac3b18']
},
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "2",
"metadata": {
"hello": "world"
}
}
}
self.assertEquals(request['body'], expected)
def test_request_with_scheduler_hints(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v3"
xmlns:os-scheduler-hints=
"http://docs.openstack.org/compute/ext/scheduler-hints/api/v3"
name="new-server-test" image_ref="1" flavor_ref="1">
<os-scheduler-hints:scheduler_hints>
<different_host>
7329b667-50c7-46a6-b913-cb2a09dfeee0
</different_host>
<different_host>
f31efb24-34d2-43e1-8b44-316052956a39
</different_host>
</os-scheduler-hints:scheduler_hints>
</server>"""
request = self.deserializer.deserialize(serial_request)
expected = {"server": {
"name": "new-server-test",
"image_ref": "1",
"flavor_ref": "1",
"os-scheduler-hints:scheduler_hints": {
"different_host": [
"7329b667-50c7-46a6-b913-cb2a09dfeee0",
"f31efb24-34d2-43e1-8b44-316052956a39",
]
}
}}
self.assertEquals(request['body'], expected)
|
ntt-sic/nova
|
nova/tests/api/openstack/compute/plugins/v3/test_scheduler_hints.py
|
Python
|
apache-2.0
| 12,196 | 0.000246 |
#
# Copyright 2008-2018 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
"""Different methods for combining lists of arrays."""
import numpy
import numina.array._combine as intl_combine
CombineError = intl_combine.CombineError
mean_method = intl_combine.mean_method
def mean(arrays, masks=None, dtype=None, out=None,
zeros=None, scales=None,
weights=None):
"""Combine arrays using the mean, with masks and offsets.
Arrays and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:return: mean, variance of the mean and number of points stored
Example:
>>> import numpy
>>> image = numpy.array([[1., 3.], [1., -1.4]])
>>> inputs = [image, image + 1]
>>> mean(inputs)
array([[[ 1.5, 3.5],
[ 1.5, -0.9]],
<BLANKLINE>
[[ 0.5, 0.5],
[ 0.5, 0.5]],
<BLANKLINE>
[[ 2. , 2. ],
[ 2. , 2. ]]])
"""
return generic_combine(intl_combine.mean_method(), arrays, masks=masks,
dtype=dtype, out=out,
zeros=zeros, scales=scales,
weights=weights)
def median(arrays, masks=None, dtype=None, out=None,
zeros=None, scales=None,
weights=None):
"""Combine arrays using the median, with masks.
Arrays and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:return: median, variance of the median and number of points stored
"""
return generic_combine(intl_combine.median_method(), arrays, masks=masks,
dtype=dtype, out=out,
zeros=zeros, scales=scales, weights=weights)
def sigmaclip(arrays, masks=None, dtype=None, out=None,
zeros=None, scales=None, weights=None,
low=3., high=3.):
"""Combine arrays using the sigma-clipping, with masks.
Inputs and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:param low:
:param high:
:return: mean, variance of the mean and number of points stored
"""
return generic_combine(intl_combine.sigmaclip_method(low, high), arrays,
masks=masks, dtype=dtype, out=out,
zeros=zeros, scales=scales, weights=weights)
def minmax(arrays, masks=None, dtype=None, out=None, zeros=None,
scales=None, weights=None, nmin=1, nmax=1):
"""Combine arrays using mix max rejection, with masks.
Inputs and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:param nmin:
:param nmax:
:return: mean, variance of the mean and number of points stored
"""
return generic_combine(intl_combine.minmax_method(nmin, nmax), arrays,
masks=masks, dtype=dtype, out=out,
zeros=zeros, scales=scales, weights=weights)
def quantileclip(arrays, masks=None, dtype=None, out=None,
zeros=None, scales=None, weights=None,
fclip=0.10):
"""Combine arrays using the sigma-clipping, with masks.
Inputs and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:param fclip: fraction of points removed on both ends. Maximum is 0.4 (80% of points rejected)
:return: mean, variance of the mean and number of points stored
"""
return generic_combine(intl_combine.quantileclip_method(fclip), arrays,
masks=masks, dtype=dtype, out=out,
zeros=zeros, scales=scales, weights=weights)
def flatcombine(arrays, masks=None, dtype=None, scales=None,
low=3.0, high=3.0, blank=1.0):
"""Combine flat arrays.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:param blank: non-positive values are substituted by this on output
:return: mean, variance of the mean and number of points stored
"""
result = sigmaclip(arrays, masks=masks,
dtype=dtype, scales=scales,
low=low, high=high)
# Substitute values <= 0 by blank
mm = result[0] <= 0
result[0, mm] = blank
# Add values to mask
result[1:2, mm] = 0
return result
def zerocombine(arrays, masks, dtype=None, scales=None):
"""Combine zero arrays.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param scales:
:return: median, variance of the median and number of points stored
"""
result = median(arrays, masks=masks,
dtype=dtype, scales=scales)
return result
def sum(arrays, masks=None, dtype=None, out=None,
zeros=None, scales=None):
"""Combine arrays by addition, with masks and offsets.
Arrays and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the sum,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:return: sum, variance of the sum and number of points stored
Example:
>>> import numpy
>>> image = numpy.array([[1., 3.], [1., -1.4]])
>>> inputs = [image, image + 1]
>>> sum(inputs)
array([[[ 1.5, 3.5],
[ 1.5, -0.9]],
<BLANKLINE>
[[ 0.5, 0.5],
[ 0.5, 0.5]],
<BLANKLINE>
[[ 2. , 2. ],
[ 2. , 2. ]]])
"""
return generic_combine(intl_combine.sum_method(), arrays, masks=masks,
dtype=dtype, out=out,
zeros=zeros, scales=scales)
def generic_combine(method, arrays, masks=None, dtype=None,
out=None, zeros=None, scales=None, weights=None):
"""Stack arrays using different methods.
:param method: the combination method
:type method: PyCObject
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param zeros:
:param scales:
:param weights:
:return: median, variance of the median and number of points stored
"""
arrays = [numpy.asarray(arr, dtype=dtype) for arr in arrays]
if masks is not None:
masks = [numpy.asarray(msk) for msk in masks]
if out is None:
# Creating out if needed
# We need three numbers
try:
outshape = (3,) + tuple(arrays[0].shape)
out = numpy.zeros(outshape, dtype)
except AttributeError:
raise TypeError('First element in arrays does '
'not have .shape attribute')
else:
out = numpy.asanyarray(out)
intl_combine.generic_combine(
method, arrays,
out[0], out[1], out[2],
masks, zeros, scales, weights
)
return out
|
guaix-ucm/numina
|
numina/array/combine.py
|
Python
|
gpl-3.0
| 9,851 | 0.000203 |
"""
Constants used across the ORM in general.
"""
# Separator used to split filter strings apart.
LOOKUP_SEP = '__'
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.5/django/db/models/constants.py
|
Python
|
bsd-3-clause
| 118 | 0.008475 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import time
import numpy
import os
import pmt
from gnuradio import gr, gr_unittest
from gnuradio import blocks
class test_multiply_matrix_ff (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
self.multiplier = None
def tearDown (self):
self.tb = None
self.multiplier = None
def run_once(self, X_in, A, tpp=gr.TPP_DONT, A2=None, tags=None, msg_A=None):
""" Run the test for given input-, output- and matrix values.
Every row from X_in is considered an input signal on a port. """
X_in = numpy.matrix(X_in)
A_matrix = numpy.matrix(A)
(N, M) = A_matrix.shape
self.assertTrue(N == X_in.shape[0])
# Calc expected
Y_out_exp = numpy.matrix(numpy.zeros((M, X_in.shape[1])))
self.multiplier = blocks.multiply_matrix_ff(A, tpp)
if A2 is not None:
self.multiplier.set_A(A2)
A = A2
A_matrix = numpy.matrix(A)
for i in xrange(N):
if tags is None:
these_tags = ()
else:
these_tags = (tags[i],)
self.tb.connect(blocks.vector_source_f(X_in[i].tolist()[0], tags=these_tags), (self.multiplier, i))
sinks = []
for i in xrange(M):
sinks.append(blocks.vector_sink_f())
self.tb.connect((self.multiplier, i), sinks[i])
# Run and check
self.tb.run()
for i in xrange(X_in.shape[1]):
Y_out_exp[:,i] = A_matrix * X_in[:,i]
Y_out = [list(x.data()) for x in sinks]
if tags is not None:
self.the_tags = []
for i in xrange(M):
self.the_tags.append(sinks[i].tags())
self.assertEqual(list(Y_out), Y_out_exp.tolist())
def test_001_t (self):
""" Simplest possible check: N==M, unit matrix """
X_in = (
(1, 2, 3, 4),
(5, 6, 7, 8),
)
A = (
(1, 0),
(0, 1),
)
self.run_once(X_in, A)
def test_002_t (self):
""" Switch check: N==M, flipped unit matrix """
X_in = (
(1, 2, 3, 4),
(5, 6, 7, 8),
)
A = (
(0, 1),
(1, 0),
)
self.run_once(X_in, A)
def test_003_t (self):
""" Average """
X_in = (
(1, 1, 1, 1),
(2, 2, 2, 2),
)
A = (
(0.5, 0.5),
(0.5, 0.5),
)
self.run_once(X_in, A)
def test_004_t (self):
""" Set """
X_in = (
(1, 2, 3, 4),
(5, 6, 7, 8),
)
A1 = (
(1, 0),
(0, 1),
)
A2 = (
(0, 1),
(1, 0),
)
self.run_once(X_in, A1, A2=A2)
def test_005_t (self):
""" Tags """
X_in = (
(1, 2, 3, 4),
(5, 6, 7, 8),
)
A = (
(0, 1), # Flip them round
(1, 0),
)
tag1 = gr.tag_t()
tag1.offset = 0
tag1.key = pmt.intern("in1")
tag1.value = pmt.PMT_T
tag2 = gr.tag_t()
tag2.offset = 0
tag2.key = pmt.intern("in2")
tag2.value = pmt.PMT_T
self.run_once(X_in, A, tpp=gr.TPP_ONE_TO_ONE, tags=(tag1, tag2))
self.assertTrue(pmt.equal(tag1.key, self.the_tags[0][0].key))
self.assertTrue(pmt.equal(tag2.key, self.the_tags[1][0].key))
#def test_006_t (self):
#""" Message passing """
#X_in = (
#(1, 2, 3, 4),
#(5, 6, 7, 8),
#)
#A1 = (
#(1, 0),
#(0, 1),
#)
#msg_A = (
#(0, 1),
#(1, 0),
#)
#self.run_once(X_in, A1, msg_A=msg_A)
if __name__ == '__main__':
#gr_unittest.run(test_multiply_matrix_ff, "test_multiply_matrix_ff.xml")
gr_unittest.run(test_multiply_matrix_ff)
|
douggeiger/gnuradio
|
gr-blocks/python/blocks/qa_multiply_matrix_ff.py
|
Python
|
gpl-3.0
| 4,810 | 0.009356 |
import pyautogui, win32api, win32con, ctypes, autoit
from PIL import ImageOps, Image, ImageGrab
from numpy import *
import os
import time
import cv2
import random
from Bot import *
def main():
bot = Bot()
autoit.win_wait(bot.title, 5)
counter = 0
poitonUse = 0
cycle = True
fullCounter = 0
while cycle:
hpstatus = bot.checkOwnHp()
print 'hp ' + str(hpstatus)
if hpstatus == 0:
autoit.control_send(bot.title, '', '{F9}', 0)
bot.sleep(0.3,0.6)
print 'Dead'
cv2.imwrite('Dead' + str(int(time.time())) + '.png',bot.getScreen(leftCornerx,leftCornery,x2,fullY2))
cycle = False
if hpstatus == 1:
if poitonUse == 0:
autoit.control_send(bot.title, '', '{F10}', 0)
poitonUse += 1
if poitonUse > 5:
poitonUse = 0
else:
poitonUse = 0
res = bot.findHP();
print 'tgs ' + str(res)
if res == 3:
fullCounter += 1
print 'fc ' + str(fullCounter)
autoit.control_send(bot.title, '', '{F1}', 0)
else:
fullCounter = 0
if fullCounter > 4:
autoit.control_send(bot.title, '', '{ESC}', 0)
bot.sleep(0.3,0.6)
autoit.control_send(bot.title, '', '{F3}', 0)
bot.sleep(0.1,0.3)
autoit.control_send(bot.title, '', '{F1}', 0)
# bot.mouseRotate()
fullCounter = 0
if res > 0:
autoit.control_send(bot.title, '', '{F1}', 0)
counter = 0
if res == 1 or res == 3:
bot.sleep(0.3,0.6)
if res > 1 and res < 3:
bot.sleep(1,3)
if res == 1:
autoit.control_send(bot.title, '', '{F3}', 0)
bot.sleep(0.3,0.6)
autoit.control_send(bot.title, '', '{F2}', 0)
bot.sleep(0.3,0.6)
autoit.control_send(bot.title, '', '{F1}', 0)
else:
fullCounter = 0
if counter < 3:
autoit.control_send(bot.title, '', '{F3}', 0)
bot.sleep(0.5,0.8)
autoit.control_send(bot.title, '', '{F1}', 0)
print 'F3'
if counter > 2:
# bot.findTarget()
autoit.control_send(bot.title, '', '{F7}', 0)
# if counter > 3:
# autoit.control_send(bot.title, '', '{F8}', 0)
# counter = 0
counter += 1
print 'cnt ' + str(counter)
pass
if __name__ == '__main__':
main()
|
oyajiro/l2bot
|
hf/wl.py
|
Python
|
artistic-2.0
| 2,661 | 0.007516 |
# -*- coding: utf-8 -*-
import system_tests
class TestCvePoC(metaclass=system_tests.CaseMeta):
url = "https://github.com/Exiv2/exiv2/issues/208"
filename = "$data_path/2018-01-09-exiv2-crash-001.tiff"
commands = ["$exiv2 " + filename]
retval = [1]
stdout = [""]
stderr = [
"""$exiv2_exception_message """ + filename + """:
$filename: $kerFileContainsUnknownImageType
"""]
|
AlienCowEatCake/ImageViewer
|
src/ThirdParty/Exiv2/exiv2-0.27.5-Source/tests/bugfixes/github/test_CVE_2017_17722.py
|
Python
|
gpl-3.0
| 409 | 0 |
import subprocess
import sys
import os
import time
from collections import namedtuple
sys.path.append(os.path.join(os.getcwd(), "src"))
from utils import settings
from utils import logger
settings.initialize('watcher')
original_plist = '/opt/TopPatch/agent/daemon/com.toppatch.agent.plist'
osx_plist = '/System/Library/LaunchDaemons/com.toppatch.agent.plist'
daemon_label = 'com.toppatch.agent'
cp_command = ['/bin/cp', original_plist, osx_plist]
list_command = ['/bin/launchctl', 'list']
load_command = ['/bin/launchctl', 'load', '-w', osx_plist]
unload_command = ['/bin/launchctl', 'unload', '-w', osx_plist]
start_command = ['/bin/launchctl', 'start', daemon_label]
stop_command = ['/bin/launchctl', 'stop', daemon_label]
check_in_seconds = 60
def start_agent():
result = False
try:
process = subprocess.Popen(start_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
raw_output, error_output = process.communicate()
if raw_output == '' and error_output == '':
logger.log('Agent started.')
result = True
elif 'No such process' in error_output:
logger.log('Agent not found.')
else:
logger.log('Unknown output: "%s"' % error_output)
except Exception as e:
logger.log("Could not start agent.", logger.LogLevel.Error)
logger.log_exception(e)
return result
def restart_agent():
try:
process = subprocess.Popen(stop_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
raw_output, error_output = process.communicate()
if raw_output == '' and error_output == '':
logger.log('Agent has restarted.')
elif 'No such process' in error_output:
logger.log('Agent not found. Nothing to restart.')
else:
logger.log('Unknown output: "%s"' % error_output)
except Exception as e:
logger.log("Could not start agent.", logger.LogLevel.Error)
logger.log_exception(e)
def load_agent():
try:
process = subprocess.Popen(load_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
raw_output, error_output = process.communicate()
if raw_output == '' and error_output == '':
logger.log('Agent loaded.')
elif 'Already loaded' in error_output:
logger.log('Agent is already loaded.')
else:
logger.log('Unknown output: "%s"' % error_output)
except Exception as e:
logger.log("Could not load agent.", logger.LogLevel.Error)
logger.log_exception(e)
def unload_agent():
try:
process = subprocess.Popen(unload_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
raw_output, error_output = process.communicate()
if raw_output == '' and error_output == '':
logger.log('Agent unloaded.')
elif 'Error unloading' in error_output:
logger.log('Agent is not loaded/installed.')
else:
logger.log('Unknown output: "%s"' % error_output)
except Exception as e:
logger.log("Could not load agent.", logger.LogLevel.Error)
logger.log_exception(e)
AgentStatus = namedtuple('AgentStats', ['loaded', 'running'])
def agent_running_stats():
ps_info = []
running = False
loaded = False
process = subprocess.Popen(list_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
raw_output, error_output = process.communicate()
for line in raw_output.splitlines():
pid, run, pname = line.split('\t')
ps_info.append((pname, run, pid))
for p in ps_info:
if daemon_label == p[0]:
# p[1] can either be:
# : '0' meaning not running.
# : '-' meaning its running.
loaded = True
if p[1] == '-':
running = True
break
elif p[1] == '0':
running = False
status = AgentStatus(loaded, running)
logger.log(str(status), logger.LogLevel.Debug)
return status
if __name__ == '__main__':
logger.log("Starting watcher daemon.")
while True:
time.sleep(check_in_seconds)
agent_status = agent_running_stats()
if agent_status.loaded:
if agent_status.running:
logger.log("Agent is running.", logger.LogLevel.Debug)
continue
else:
if not start_agent():
load_agent()
else:
load_agent()
|
vFense/vFenseAgent-nix
|
agent/watcher_mac.py
|
Python
|
lgpl-3.0
| 4,731 | 0.001057 |
# When some software has issues and we need to fix it in a
# hackish way, we put it in here. This one day will be empty.
import copy_reg
from twisted.web.client import SchemeNotSupported
from txsocksx.http import SOCKS5Agent as SOCKS5AgentOriginal
def patched_reduce_ex(self, proto):
"""
This is a hack to overcome a bug in one of pythons core functions. It is
located inside of copy_reg and is called _reduce_ex.
Some background on the issue can be found here:
http://stackoverflow.com/questions/569754/how-to-tell-for-which-object-attribute-pickle
http://stackoverflow.com/questions/2049849/why-cant-i-pickle-this-object
There was also an open bug on the pyyaml trac repo, but it got closed because
they could not reproduce.
http://pyyaml.org/ticket/190
It turned out to be easier to patch the python core library than to monkey
patch yaml.
XXX see if there is a better way. sigh...
"""
_HEAPTYPE = 1 << 9
assert proto < 2
for base in self.__class__.__mro__:
if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
break
else:
base = object # not really reachable
if base is object:
state = None
elif base is int:
state = None
else:
if base is self.__class__:
raise TypeError("can't pickle %s objects" % base.__name__)
state = base(self)
args = (self.__class__, base, state)
try:
getstate = self.__getstate__
except AttributeError:
if getattr(self, "__slots__", None):
raise TypeError("a class that defines __slots__ without "
"defining __getstate__ cannot be pickled")
try:
dict = self.__dict__
except AttributeError:
dict = None
else:
dict = getstate()
if dict:
return copy_reg._reconstructor, args, dict
else:
return copy_reg._reconstructor, args
class SOCKS5Agent(SOCKS5AgentOriginal):
"""
This is a quick hack to fix:
https://github.com/habnabit/txsocksx/issues/9
"""
def _getEndpoint(self, scheme_or_uri, host=None, port=None):
if host is not None:
scheme = scheme_or_uri
else:
scheme = scheme_or_uri.scheme
host = scheme_or_uri.host
port = scheme_or_uri.port
if scheme not in ('http', 'https'):
raise SchemeNotSupported('unsupported scheme', scheme)
endpoint = self.endpointFactory(
host, port, self.proxyEndpoint, **self.endpointArgs)
if scheme == 'https':
if hasattr(self, '_wrapContextFactory'):
tlsPolicy = self._wrapContextFactory(host, port)
elif hasattr(self, '_policyForHTTPS'):
tlsPolicy = self._policyForHTTPS.creatorForNetloc(host, port)
else:
raise NotImplementedError("can't figure out how to make a context factory")
endpoint = self._tlsWrapper(tlsPolicy, endpoint)
return endpoint
|
Karthikeyan-kkk/ooni-probe
|
ooni/utils/hacks.py
|
Python
|
bsd-2-clause
| 3,059 | 0.000654 |
try:
import _pygeapi
except ImportError as e:
e.msg += ' (this module can be imported only from greenev)'
raise
from .evloop import event_loop
|
ateska/striga2-pocs
|
greenev/pyge/__init__.py
|
Python
|
unlicense
| 147 | 0.020408 |
# -*- coding: ISO-8859-15 -*-
from twisted.web import client
from twisted.internet.defer import inlineCallbacks
from core.Uusipuu import UusipuuModule
import urllib, simplejson
class Module(UusipuuModule):
def startup(self):
self.log('google.py loaded')
@inlineCallbacks
def cmd_google(self, user, target, params):
self.log('Querying google for "%s"' % params)
data = yield client.getPage(
'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&%s' %
urllib.urlencode({'q': params.strip()}))
json = simplejson.loads(data)
results = json['responseData']['results']
if not results:
self.log('No results found matching "%s"' % keyword)
self.chanmsg('No results found matching "%s"' % keyword)
return
self.chanmsg('%s: %s' % \
(results[0]['titleNoFormatting'].encode('utf-8'),
results[0]['url'].encode('utf-8')))
# vim: set et sw=4:
|
desaster/uusipuu
|
modules/google.py
|
Python
|
bsd-2-clause
| 1,007 | 0.005958 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateCertificateAuthority
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-private-ca
# [START privateca_v1beta1_generated_CertificateAuthorityService_UpdateCertificateAuthority_async]
from google.cloud.security import privateca_v1beta1
async def sample_update_certificate_authority():
# Create a client
client = privateca_v1beta1.CertificateAuthorityServiceAsyncClient()
# Initialize request argument(s)
certificate_authority = privateca_v1beta1.CertificateAuthority()
certificate_authority.type_ = "SUBORDINATE"
certificate_authority.tier = "DEVOPS"
certificate_authority.config.reusable_config.reusable_config = "reusable_config_value"
certificate_authority.key_spec.cloud_kms_key_version = "cloud_kms_key_version_value"
request = privateca_v1beta1.UpdateCertificateAuthorityRequest(
certificate_authority=certificate_authority,
)
# Make the request
operation = client.update_certificate_authority(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END privateca_v1beta1_generated_CertificateAuthorityService_UpdateCertificateAuthority_async]
|
googleapis/python-security-private-ca
|
samples/generated_samples/privateca_v1beta1_generated_certificate_authority_service_update_certificate_authority_async.py
|
Python
|
apache-2.0
| 2,081 | 0.002403 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp import models, fields
class MailMessage(models.Model):
""" Add relation to communication configuration to track generated
e-mails.
"""
_inherit = 'mail.mail'
##########################################################################
# FIELDS #
##########################################################################
communication_config_id = fields.Many2one('partner.communication.config')
|
emgirardin/compassion-modules
|
partner_communication/models/email.py
|
Python
|
agpl-3.0
| 908 | 0 |
__author__ = 'Ralph'
from ui.app import Application
if __name__ == '__main__':
from ui.app import Example
import wx
app = wx.App()
Example(None, title='Example')
app.MainLoop()
# application = Application()
# application.run()
# node1 = ImportARFF()
# node2 = SelectAttributes()
# node3 = SupportVectorMachine()
# node4 = SelectAttributes()
# node5 = ApplyModel()
#
# node1.get_config().set('file_name', '/Users/Ralph/datasets/imagemend/out/prepared/features_prepared.arff')
#
# node2.get_config().set('selector_type', 'subset')
# node2.get_config().set('attributes', ['M', 'F', 'age', 'id'])
#
# node3.get_config().set('kernel_type', 'rbf')
# node3.get_config().set('target', 'diagnosis')
# node3.get_config().set('auto_detect', True)
# node3.get_config().set('performance_measure', 'accuracy')
# node3.get_config().set('n_folds', 2)
# node3.get_config().set('n_grid_folds', 2)
# node3.get_config().set('model_output_dir', '/Users/Ralph/tmp/model')
#
# node4.get_config().set('selector_type', 'single')
# node4.get_config().set('attributes', ['diagnosis'])
#
# Connection(
# # ImportARFF -> SelectAttributes
# node1.get_output_port('output'), node2.get_input_port('input'))
# Connection(
# # SelectAttributes -> SVM
# node2.get_output_port('output'), node3.get_input_port('input'))
# Connection(
# # SelectAttributes -> SelectAttributes
# node2.get_output_port('output'), node4.get_input_port('input'))
# Connection(
# # SelectAttributes -> ApplyModel
# node4.get_output_port('output'), node5.get_input_port('input'))
# Connection(
# # SVM -> ApplyModel
# node3.get_output_port('model'), node5.get_input_port('model'))
#
# node1.execute()
#
# print('predictions: {}'.format(node5.get_output_port('output').get_data()))
|
rbrecheisen/pyminer
|
pyminer/pyminer.py
|
Python
|
apache-2.0
| 1,957 | 0.001533 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys, urllib2
def main():
if len(sys.argv) < 2:
print("Error, usage: {0} <your url>".format(sys.argv[0]))
return 1
url = sys.argv[1]
print(urllib2.urlopen('http://t34.me/api/?u=' + url).read())
return 0
if __name__ == '__main__':
main()
|
z0rr0/t34.me
|
configs/api_python.py
|
Python
|
agpl-3.0
| 328 | 0.015244 |
"""
These are filters placed at the end of a tunnel for watching or modifying
the traffic.
"""
##############################################################################
from __future__ import absolute_import
LICENSE = """\
This file is part of pagekite.py.
Copyright 2010-2020, the Beanstalks Project ehf. and Bjarni Runar Einarsson
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU Affero General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see: <http://www.gnu.org/licenses/>
"""
##############################################################################
import six
import re
import time
import pagekite.logging as logging
from pagekite.compat import *
class TunnelFilter:
"""Base class for watchers/filters for data going in/out of Tunnels."""
FILTERS = ('connected', 'data_in', 'data_out')
IDLE_TIMEOUT = 1800
def __init__(self, ui):
self.sid = {}
self.ui = ui
def clean_idle_sids(self, now=None):
now = now or time.time()
for sid in list(six.iterkeys(self.sid)):
if self.sid[sid]['_ts'] < now - self.IDLE_TIMEOUT:
del self.sid[sid]
def filter_set_sid(self, sid, info):
now = time.time()
if sid not in self.sid:
self.sid[sid] = {}
self.sid[sid].update(info)
self.sid[sid]['_ts'] = now
self.clean_idle_sids(now=now)
def filter_connected(self, tunnel, sid, data):
if sid not in self.sid:
self.sid[sid] = {}
self.sid[sid]['_ts'] = time.time()
return data
def filter_data_in(self, tunnel, sid, data):
if sid not in self.sid:
self.sid[sid] = {}
self.sid[sid]['_ts'] = time.time()
return data
def filter_data_out(self, tunnel, sid, data):
if sid not in self.sid:
self.sid[sid] = {}
self.sid[sid]['_ts'] = time.time()
return data
class TunnelWatcher(TunnelFilter):
"""Base class for watchers/filters for data going in/out of Tunnels."""
FILTERS = ('data_in', 'data_out')
def __init__(self, ui, watch_level=0):
TunnelFilter.__init__(self, ui)
self.watch_level = watch_level
def format_data(self, data, level):
if '\r\n\r\n' in data:
head, tail = data.split('\r\n\r\n', 1)
output = self.format_data(head, level)
output[-1] += '\\r\\n'
output.append('\\r\\n')
if tail:
output.extend(self.format_data(tail, level))
return output
else:
output = data.encode('string_escape').replace('\\n', '\\n\n')
if output.count('\\') > 0.15*len(output):
if level > 2:
output = [['', '']]
count = 0
for d in data:
output[-1][0] += '%2.2x' % ord(d)
output[-1][1] += '%c' % ((ord(d) > 31 and ord(d) < 127) and d or '.')
count += 1
if (count % 2) == 0:
output[-1][0] += ' '
if (count % 20) == 0:
output.append(['', ''])
return ['%-50s %s' % (l[0], l[1]) for l in output]
else:
return ['<< Binary bytes: %d >>' % len(data)]
else:
return output.strip().splitlines()
def now(self):
return ts_to_iso(int(10*time.time())/10.0
).replace('T', ' ').replace('00000', '')
def filter_data_in(self, tunnel, sid, data):
if data and self.watch_level[0] > 0:
self.ui.Notify('===[ INCOMING @ %s / %s ]===' % (self.now(), sid),
color=self.ui.WHITE, prefix=' __')
for line in self.format_data(data, self.watch_level[0]):
self.ui.Notify(line, prefix=' <=', now=-1, color=self.ui.GREEN)
return TunnelFilter.filter_data_in(self, tunnel, sid, data)
def filter_data_out(self, tunnel, sid, data):
if data and self.watch_level[0] > 1:
self.ui.Notify('===[ OUTGOING @ %s / %s ]===' % (self.now(), sid),
color=self.ui.WHITE, prefix=' __')
for line in self.format_data(data, self.watch_level[0]):
self.ui.Notify(line, prefix=' =>', now=-1, color=self.ui.BLUE)
return TunnelFilter.filter_data_out(self, tunnel, sid, data)
class HaproxyProtocolFilter(TunnelFilter):
"""Filter prefixes the HAProxy PROXY protocol info to requests."""
FILTERS = ('connected')
ENABLE = 'proxyproto'
def filter_connected(self, tunnel, sid, data):
info = self.sid.get(sid)
if info:
if not info.get(self.ENABLE, False):
pass
elif info[self.ENABLE] in ("1", True):
remote_ip = info['remote_ip']
if '.' in remote_ip:
remote_ip = remote_ip.rsplit(':', 1)[1]
data = 'PROXY TCP%s %s 0.0.0.0 %s %s\r\n%s' % (
'4' if ('.' in remote_ip) else '6',
remote_ip, info['remote_port'], info['port'], data or '')
else:
logging.LogError(
'FIXME: Unimplemented PROXY protocol v%s\n' % info[self.ENABLE])
return TunnelFilter.filter_connected(self, tunnel, sid, data)
class HttpHeaderFilter(TunnelFilter):
"""Filter that adds X-Forwarded-For and X-Forwarded-Proto to requests."""
FILTERS = ('data_in')
HTTP_HEADER = re.compile('(?ism)^(([A-Z]+) ([^\n]+) HTTP/\d+\.\d+\s*)$')
DISABLE = 'rawheaders'
def filter_data_in(self, tunnel, sid, data):
info = self.sid.get(sid)
if (info and
info.get('proto') in ('http', 'http2', 'http3', 'websocket') and
not info.get(self.DISABLE, False)):
# FIXME: Check content-length and skip bodies entirely
http_hdr = self.HTTP_HEADER.search(data)
if http_hdr:
data = self.filter_header_data_in(http_hdr, data, info)
return TunnelFilter.filter_data_in(self, tunnel, sid, data)
def filter_header_data_in(self, http_hdr, data, info):
clean_headers = [
r'(?mi)^(X-(PageKite|Forwarded)-(For|Proto|Port):)'
]
add_headers = [
'X-Forwarded-For: %s' % info.get('remote_ip', 'unknown'),
'X-Forwarded-Proto: %s' % (info.get('using_tls') and 'https' or 'http'),
'X-PageKite-Port: %s' % info.get('port', 0)
]
if info.get('rewritehost', False):
add_headers.append('Host: %s' % info.get('rewritehost'))
clean_headers.append(r'(?mi)^(Host:)')
if http_hdr.group(1).upper() in ('POST', 'PUT'):
# FIXME: This is a bit ugly
add_headers.append('Connection: close')
clean_headers.append(r'(?mi)^(Connection|Keep-Alive):')
info['rawheaders'] = True
for hdr_re in clean_headers:
data = re.sub(hdr_re, 'X-Old-\\1', data)
return re.sub(self.HTTP_HEADER,
'\\1\n%s\r' % '\r\n'.join(add_headers),
data)
class HttpSecurityFilter(HttpHeaderFilter):
"""Filter that blocks known-to-be-dangerous requests."""
DISABLE = 'trusted'
HTTP_DANGER = re.compile('(?ism)^((get|post|put|patch|delete) '
# xampp paths, anything starting with /adm*
'((?:/+(?:xampp/|security/|licenses/|webalizer/|server-(?:status|info)|adm)'
'|[^\n]*/'
# WordPress admin pages
'(?:wp-admin/(?!admin-ajax|css/)|wp-config\.php'
# Hackzor tricks
'|system32/|\.\.|\.ht(?:access|pass)'
# phpMyAdmin and similar tools
'|(?:php|sql)?my(?:sql)?(?:adm|manager)'
# Setup pages for common PHP tools
'|(?:adm[^\n]*|install[^\n]*|setup)\.php)'
')[^\n]*)'
' HTTP/\d+\.\d+\s*)$')
REJECT = 'PAGEKITE_REJECT_'
def filter_header_data_in(self, http_hdr, data, info):
danger = self.HTTP_DANGER.search(data)
if danger:
self.ui.Notify('BLOCKED: %s %s' % (danger.group(2), danger.group(3)),
color=self.ui.RED, prefix='***')
self.ui.Notify('See https://pagekite.net/support/security/ for more'
' details.')
return self.REJECT+data
else:
return data
|
pagekite/PyPagekite
|
pagekite/proto/filters.py
|
Python
|
agpl-3.0
| 8,412 | 0.011531 |
from .allow_origin import allow_origin_tween_factory # noqa
from .api_headers import api_headers_tween_factory # noqa
from .basic_auth import basic_auth_tween_factory # noqa
|
victorlin/pyramid-handy
|
pyramid_handy/tweens/__init__.py
|
Python
|
mit
| 177 | 0 |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
from itertools import groupby
import six
# ============= enthought library imports =======================
from pyface.qt.QtGui import QTextEdit, QWidget, QHBoxLayout, QTextFormat, QColor, QPainter, QFrame, \
QSizePolicy, QPainterPath
from traits.trait_errors import TraitError
from traitsui.basic_editor_factory import BasicEditorFactory
from traitsui.qt4.editor import Editor
# ============= local library imports ==========================
from pychron.git_archive.diff_util import extract_line_numbers
def get_ranges(data):
return [[gi[0] for gi in g]
for k, g in groupby(enumerate(data),
lambda i_x: i_x[0] - i_x[1])]
class QDiffConnector(QFrame):
_left_y = 0
_right_y = 0
def __init__(self):
super(QDiffConnector, self).__init__()
self.color = QColor(0, 100, 0, 100)
self.setSizePolicy(QSizePolicy(QSizePolicy.Fixed,
QSizePolicy.Ignored))
self.setFixedWidth(30)
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(self.color)
qp.setPen(self.color)
rect = event.rect()
x = rect.x()
w = rect.width()
lineheight = 16
print('-------------------')
print('lefts', self.lefts)
print('rights', self.rights)
print('-------------------')
ly = self._left_y + 5
ry = self._right_y + 5
rs=self.rights[:]
# offset=1
for i, l in enumerate(self.lefts):
path = QPainterPath()
sl, el = l[0], l[-1]
try:
r=rs[i]
sr, er = r[0], r[-1]
rs.pop(i)
# offset+=1
except IndexError:
sr, er = l[-1], l[-1]-1
y = ly + lineheight * sl
y2 = ry + lineheight * sr
path.moveTo(x, y)
path.lineTo(x, y + lineheight * (el - sl + 1))
path.lineTo(x + w, y2 + lineheight * (er - sr + 1))
path.lineTo(x + w, y2)
qp.drawPath(path)
for i, r in enumerate(rs):
path = QPainterPath()
sr, er = r[0], r[-1]
# try:
l=self.lefts[i]
sl, el = r[-1], r[-1]-1
# except IndexError:
# sl, el = l[-1]+2, l[-1]+1
# print sl, el
y = ly + lineheight * (sl)
y2 = ry + lineheight * (sr)
path.moveTo(x, y)
path.lineTo(x, y + lineheight * (el - sl + 1))
path.lineTo(x + w, y2 + lineheight * (er - sr + 1))
path.lineTo(x + w, y2)
qp.drawPath(path)
qp.end()
def set_left_y(self, y):
self._left_y += y
def set_right_y(self, y):
self._right_y += y
class LinkedTextEdit(QTextEdit):
linked_widget = None
connector = None
orientation = 'left'
no_update = False
def scrollContentsBy(self, x, y):
if self.linked_widget and not self.no_update:
sb = self.linked_widget.verticalScrollBar()
v = sb.value() - y
self.linked_widget.no_update = True
sb.setSliderPosition(v)
self.linked_widget.no_update = False
if self.connector:
if self.orientation == 'left':
self.connector.set_left_y(y)
else:
self.connector.set_right_y(y)
self.connector.update()
super(LinkedTextEdit, self).scrollContentsBy(x, y)
class QDiffEdit(QWidget):
def __init__(self, parent, *args, **kw):
super(QDiffEdit, self).__init__(*args, **kw)
self.left = LinkedTextEdit()
self.left.orientation = 'left'
self.left.setReadOnly(True)
self.right = LinkedTextEdit()
self.right.orientation = 'right'
self.right.setReadOnly(True)
self.connector = QDiffConnector()
self.left.linked_widget = self.right
self.right.linked_widget = self.left
self.left.connector = self.connector
self.right.connector = self.connector
layout = QHBoxLayout()
layout.setSpacing(0)
layout.addWidget(self.left)
layout.addWidget(self.connector)
layout.addWidget(self.right)
self.setLayout(layout)
def set_left_text(self, txt):
self.left.setText(txt)
def set_right_text(self, txt):
self.right.setText(txt)
def highlight(self, ctrl, lineno):
selection = QTextEdit.ExtraSelection()
selection.cursor = ctrl.textCursor()
selection.format.setBackground(QColor(100, 200, 100))
selection.format.setProperty(
QTextFormat.FullWidthSelection, True)
doc = ctrl.document()
block = doc.findBlockByLineNumber(lineno)
selection.cursor.setPosition(block.position())
ss = ctrl.extraSelections()
ss.append(selection)
ctrl.setExtraSelections(ss)
selection.cursor.clearSelection()
def _clear_selection(self):
for ctrl in (self.left, self.right):
ctrl.setExtraSelections([])
def set_diff(self):
self._clear_selection()
ls, rs = extract_line_numbers(self.left.toPlainText(),
self.right.toPlainText())
for li in ls:
self.highlight(self.left, li)
for ri in rs:
self.highlight(self.right, ri)
self._set_connectors(ls, rs)
def _set_connectors(self, ls, rs):
self.connector.lefts = get_ranges(ls)
self.connector.rights = get_ranges(rs)
self.connector.update()
class _DiffEditor(Editor):
_no_update = False
def init(self, parent):
self.control = self._create_control(parent)
def _create_control(self, parent):
control = QDiffEdit(parent)
# QtCore.QObject.connect(ctrl.left,
# QtCore.SIGNAL('textChanged()'), self.update_left_object)
# QtCore.QObject.connect(ctrl.right,
# QtCore.SIGNAL('textChanged()'), self.update_right_object)
control.left.textChanged.connect(self.update_left_object)
control.right.textChanged.connect(self.update_right_object)
return control
def update_editor(self):
if self.value:
self.control.set_left_text(self.value.left_text)
self.control.set_right_text(self.value.right_text)
self.control.set_diff()
def update_right_object(self):
""" Handles the user entering input data in the edit control.
"""
self._update_object('right')
def update_left_object(self):
""" Handles the user entering input data in the edit control.
"""
self._update_object('left')
def _get_user_left_value(self):
return self._get_user_value('left')
def _get_user_right_value(self):
return self._get_user_value('left')
def _update_object(self, attr):
if (not self._no_update) and (self.control is not None):
try:
setattr(self.value, '{}_text'.format(attr),
getattr(self, '_get_user_{}_value'.format(attr))())
self.control.set_diff()
if self._error is not None:
self._error = None
self.ui.errors -= 1
self.set_error_state(False)
except TraitError as excp:
pass
def _get_user_value(self, attr):
ctrl = getattr(self.control, attr)
try:
value = ctrl.text()
except AttributeError:
value = ctrl.toPlainText()
value = six.text_type(value)
try:
value = self.evaluate(value)
except:
pass
try:
ret = self.factory.mapping.get(value, value)
except (TypeError, AttributeError):
# The value is probably not hashable.
ret = value
return ret
class DiffEditor(BasicEditorFactory):
klass = _DiffEditor
# ============= EOF =============================================
|
UManPychron/pychron
|
pychron/git_archive/diff_editor.py
|
Python
|
apache-2.0
| 9,035 | 0.001107 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class RouteTable(Resource):
"""Route table resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict
:param routes: Collection of routes contained within a route table.
:type routes: list of :class:`Route
<azure.mgmt.network.v2017_03_01.models.Route>`
:ivar subnets: A collection of references to subnets.
:vartype subnets: list of :class:`Subnet
<azure.mgmt.network.v2017_03_01.models.Subnet>`
:param provisioning_state: The provisioning state of the resource.
Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'subnets': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'routes': {'key': 'properties.routes', 'type': '[Route]'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, routes=None, provisioning_state=None, etag=None):
super(RouteTable, self).__init__(id=id, location=location, tags=tags)
self.routes = routes
self.subnets = None
self.provisioning_state = provisioning_state
self.etag = etag
|
v-iam/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/route_table.py
|
Python
|
mit
| 2,557 | 0.000782 |
#!/usr/bin/env python3
import socket
HOST = '127.0.0.1' # The server's hostname or IP address
PORT = 65432 # The port used by the server
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
s.sendall(b'Hello, world')
data = s.recv(1024)
print('Received', repr(data))
|
RobMackie/robiverse
|
python/echo_client/echo_client.py
|
Python
|
gpl-2.0
| 323 | 0 |
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base models for point-cloud based detection."""
from lingvo import compat as tf
from lingvo.core import metrics
from lingvo.core import py_utils
from lingvo.tasks.car import base_decoder
from lingvo.tasks.car import detection_3d_metrics
from lingvo.tasks.car import transform_util
from lingvo.tasks.car.waymo import waymo_ap_metric
from lingvo.tasks.car.waymo import waymo_metadata
import numpy as np
class WaymoOpenDatasetDecoder(base_decoder.BaseDecoder):
"""A decoder to use for decoding a detector model on Waymo."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'draw_visualizations', False, 'Boolean for whether to draw '
'visualizations. This is independent of laser_sampling_rate.')
p.ap_metric = waymo_ap_metric.WaymoAPMetrics.Params(
waymo_metadata.WaymoMetadata())
p.Define(
'extra_ap_metrics', {},
'Dictionary of extra AP metrics to run in the decoder. The key'
'is the name of the metric and the value is a sub-class of '
'APMetric')
p.Define(
'save_residuals', False,
'If True, this expects the residuals and ground-truth to be available '
'in the decoder output dictionary, and it will save it to the decoder '
'output file. See decode_include_residuals in PointDetectorBase '
'for details.')
return p
def CreateDecoderMetrics(self):
"""Decoder metrics for WaymoOpenDataset."""
p = self.params
waymo_metric_p = p.ap_metric.Copy().Set(cls=waymo_ap_metric.WaymoAPMetrics)
waymo_metrics = waymo_metric_p.Instantiate()
class_names = waymo_metrics.metadata.ClassNames()
# TODO(bencaine,vrv): There's some code smell with this ap_metrics params
# usage. We create local copies of the params to then instantiate them.
# Failing to do this risks users editing the params after construction of
# the object, making each object method call have the potential for side
# effects.
# Create a new dictionary with copies of the params converted to objects
# so we can then add these to the decoder metrics.
extra_ap_metrics = {}
for k, metric_p in p.extra_ap_metrics.items():
extra_ap_metrics[k] = metric_p.Instantiate()
waymo_metric_bev_p = waymo_metric_p.Copy()
waymo_metric_bev_p.box_type = '2d'
waymo_metrics_bev = waymo_metric_bev_p.Instantiate()
# Convert the list of class names to a dictionary mapping class_id -> name.
class_id_to_name = dict(enumerate(class_names))
# TODO(vrv): This uses the same top down transform as for KITTI;
# re-visit these settings since detections can happen all around
# the car.
top_down_transform = transform_util.MakeCarToImageTransform(
pixels_per_meter=32.,
image_ref_x=512.,
image_ref_y=1408.,
flip_axes=True)
decoder_metrics = py_utils.NestedMap({
'top_down_visualization':
(detection_3d_metrics.TopDownVisualizationMetric(
top_down_transform,
image_height=1536,
image_width=1024,
class_id_to_name=class_id_to_name)),
'num_samples_in_batch': metrics.AverageMetric(),
'waymo_metrics': waymo_metrics,
'waymo_metrics_bev': waymo_metrics_bev,
})
self._update_metrics_class_keys = ['waymo_metrics_bev', 'waymo_metrics']
for k, metric in extra_ap_metrics.items():
decoder_metrics[k] = metric
self._update_metrics_class_keys.append(k)
decoder_metrics.mesh = detection_3d_metrics.WorldViewer()
return decoder_metrics
def ProcessOutputs(self, input_batch, model_outputs):
"""Produce additional decoder outputs for WaymoOpenDataset.
Args:
input_batch: A .NestedMap of the inputs to the model.
model_outputs: A .NestedMap of the outputs of the model, including::
- per_class_predicted_bboxes: [batch, num_classes, num_boxes, 7] float
Tensor with per class 3D (7 DOF) bounding boxes.
- per_class_predicted_bbox_scores: [batch, num_classes, num_boxes] float
Tensor with per class, per box scores.
- per_class_valid_mask: [batch, num_classes, num_boxes] masking Tensor
indicating which boxes were still kept after NMS for each class.
Returns:
A NestedMap of additional decoder outputs needed for
PostProcessDecodeOut.
"""
del model_outputs
p = self.params
input_labels = input_batch.labels
input_metadata = input_batch.metadata
source_ids = tf.strings.join([
input_metadata.run_segment,
tf.as_string(input_metadata.run_start_offset)
],
separator='_')
ret = py_utils.NestedMap({
'num_points_in_bboxes': input_batch.labels.bboxes_3d_num_points,
# Ground truth.
'bboxes_3d': input_labels.bboxes_3d,
'bboxes_3d_mask': input_labels.bboxes_3d_mask,
'labels': input_labels.labels,
'label_ids': input_labels.label_ids,
'speed': input_labels.speed,
'acceleration': input_labels.acceleration,
# Fill the following in.
'source_ids': source_ids,
'difficulties': input_labels.single_frame_detection_difficulties,
'unfiltered_bboxes_3d_mask': input_labels.unfiltered_bboxes_3d_mask,
'run_segment': input_metadata.run_segment,
'run_start_offset': input_metadata.run_start_offset,
'pose': input_metadata.pose,
})
if p.draw_visualizations:
laser_sample = self._SampleLaserForVisualization(
input_batch.lasers.points_xyz, input_batch.lasers.points_padding)
ret.update(laser_sample)
return ret
def PostProcessDecodeOut(self, dec_out_dict, dec_metrics_dict):
"""Post-processes the decoder outputs."""
p = self.params
# Update num_samples_in_batch.
batch_size, num_classes, num_boxes, _ = (
dec_out_dict.per_class_predicted_bboxes.shape)
dec_metrics_dict.num_samples_in_batch.Update(batch_size)
# Update decoder output by removing z-coordinate, thus reshaping the bboxes
# to [batch, num_bboxes, 5] to be compatible with
# TopDownVisualizationMetric.
# Indices corresponding to the 2D bbox parameters (x, y, dx, dy, phi).
bbox_2d_idx = np.asarray([1, 1, 0, 1, 1, 0, 1], dtype=np.bool)
bboxes_2d = dec_out_dict.bboxes_3d[..., bbox_2d_idx]
predicted_bboxes = dec_out_dict.per_class_predicted_bboxes[..., bbox_2d_idx]
if p.draw_visualizations and dec_out_dict.points_sampled:
tf.logging.info('Updating sample for top down visualization')
dec_metrics_dict.mesh.Update(
py_utils.NestedMap({
'points_xyz': dec_out_dict.points_xyz,
'points_padding': dec_out_dict.points_padding,
}))
# Flatten our predictions/scores to match the API of the visualization
# The last dimension of flattened_bboxes is 5 due to the mask
# above using bbox_2d_idx.
flattened_bboxes = np.reshape(predicted_bboxes,
[batch_size, num_classes * num_boxes, 5])
flattened_visualization_weights = np.reshape(
dec_out_dict.visualization_weights,
[batch_size, num_classes * num_boxes])
# Create a label id mask for now to maintain compatibility.
# TODO(bencaine): Refactor visualizations to reflect new structure.
flattened_visualization_labels = np.tile(
np.arange(0, num_classes)[np.newaxis, :, np.newaxis],
[batch_size, 1, num_boxes])
flattened_visualization_labels = np.reshape(
flattened_visualization_labels, [batch_size, num_classes * num_boxes])
dec_metrics_dict.top_down_visualization.Update(
py_utils.NestedMap({
'visualization_labels': flattened_visualization_labels,
'predicted_bboxes': flattened_bboxes,
'visualization_weights': flattened_visualization_weights,
'points_xyz': dec_out_dict.points_xyz,
'points_padding': dec_out_dict.points_padding,
'gt_bboxes_2d': bboxes_2d,
'gt_bboxes_2d_weights': dec_out_dict.bboxes_3d_mask,
'labels': dec_out_dict.labels,
'difficulties': dec_out_dict.difficulties,
'source_ids': dec_out_dict.source_ids,
}))
# Update AP metrics.
# Skip zeroth step decoding.
if dec_out_dict.global_step == 0:
return None
# TODO(bencaine/vrv): Refactor to unify Waymo code and KITTI
# Returned values are saved in model_dir/decode_* directories.
output_to_save = []
for batch_idx in range(batch_size):
pred_bboxes = dec_out_dict.per_class_predicted_bboxes[batch_idx]
pred_bbox_scores = dec_out_dict.per_class_predicted_bbox_scores[batch_idx]
# The current API expects a 'height' matrix to be passed for filtering
# detections based on height. This is a KITTI-ism that we need to remove,
# but for now we just give a height of 1. The MinHeight metadata function
# for non-KITTI datasets should have a threshold lower than this value.
heights = np.ones((num_classes, num_boxes)).astype(np.float32)
gt_mask = dec_out_dict.bboxes_3d_mask[batch_idx].astype(bool)
gt_labels = dec_out_dict.labels[batch_idx][gt_mask]
gt_bboxes = dec_out_dict.bboxes_3d[batch_idx][gt_mask]
gt_difficulties = dec_out_dict.difficulties[batch_idx][gt_mask]
gt_num_points = dec_out_dict.num_points_in_bboxes[batch_idx][gt_mask]
# Note that this is not used in the KITTI evaluation.
gt_speed = dec_out_dict.speed[batch_idx][gt_mask]
# TODO(shlens): Update me
for metric_key in self._update_metrics_class_keys:
metric_cls = dec_metrics_dict[metric_key]
metric_cls.Update(
dec_out_dict.source_ids[batch_idx],
py_utils.NestedMap(
groundtruth_labels=gt_labels,
groundtruth_bboxes=gt_bboxes,
groundtruth_difficulties=gt_difficulties,
groundtruth_num_points=gt_num_points,
groundtruth_speed=gt_speed,
detection_scores=pred_bbox_scores,
detection_boxes=pred_bboxes,
detection_heights_in_pixels=heights,
))
# We still want to save all ground truth (even if it was filtered
# in some way) so we use the unfiltered_bboxes_3d_mask here.
gt_save_mask = dec_out_dict.unfiltered_bboxes_3d_mask[batch_idx].astype(
bool)
pd_save_mask = dec_out_dict.per_class_valid_mask[batch_idx] > 0
class_ids = np.tile(np.arange(num_classes)[:, np.newaxis], [1, num_boxes])
saved_results = py_utils.NestedMap(
pose=dec_out_dict.pose[batch_idx],
frame_id=dec_out_dict.source_ids[batch_idx],
bboxes=pred_bboxes[pd_save_mask],
scores=pred_bbox_scores[pd_save_mask],
gt_labels=dec_out_dict.labels[batch_idx][gt_save_mask],
gt_label_ids=dec_out_dict.label_ids[batch_idx][gt_save_mask],
gt_speed=dec_out_dict.speed[batch_idx][gt_save_mask],
gt_acceleration=dec_out_dict.acceleration[batch_idx][gt_save_mask],
class_ids=class_ids[pd_save_mask],
gt_bboxes=dec_out_dict.bboxes_3d[batch_idx][gt_save_mask],
gt_difficulties=dec_out_dict.difficulties[batch_idx][gt_save_mask],
)
if p.save_residuals:
# The leading shapes of these tensors should match bboxes and scores.
# These are the underlying tensors that can are used to compute score
# and bboxes.
saved_results.update({
'bboxes_gt_residuals':
dec_out_dict.per_class_gt_residuals[batch_idx][pd_save_mask],
'bboxes_gt_labels':
dec_out_dict.per_class_gt_labels[batch_idx][pd_save_mask],
'bboxes_residuals':
dec_out_dict.per_class_residuals[batch_idx][pd_save_mask],
'bboxes_logits':
dec_out_dict.per_class_logits[batch_idx][pd_save_mask],
'bboxes_anchor_boxes':
dec_out_dict.per_class_anchor_boxes[batch_idx][pd_save_mask],
})
serialized = self.SaveTensors(saved_results)
output_to_save += [(dec_out_dict.source_ids[batch_idx], serialized)]
return output_to_save
|
tensorflow/lingvo
|
lingvo/tasks/car/waymo/waymo_decoder.py
|
Python
|
apache-2.0
| 13,038 | 0.004295 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# general_hosts.py
#
# Copyright 2016-2020 fritzctl Contributors>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from . import base
class API_general_hosts(base.API_base):
"""
General Host Information TR64 Object-Oriented API.
Can be instantiated via ``session.getOOAPI("general_hosts")`` or ``session.getOOAPI("urn:dslforum-org:service:Hosts:1")``\ .
Same parameters and attributes as :py:class:`fritzctl.ooapi.base.API_base()`\ .
"""
def getHostByIndex(self,index,ext=True):
"""
Returns the Host associated with the given Index.
:param int index: The Index of the Host
:param bool ext: Optional Flag if information from the AVM Extension should be integrated, defaults to True
:return: Host Information Object
:rtype: Host
:raises AssertionError: if the index is invalid, e.g. not an integer or lower than 0
:raises ValueError: if the index is out-of-bounds
"""
assert isinstance(index,int) and index>=0
d = self.dynapi.GetGenericHostEntry(NewIndex=index)
if ext:
d.update(self.dynapi.callAPI("X_AVM-DE_GetGenericHostEntryExt",NewIndex=index))
d["_ext"]=True
else:
d["_ext"]=False
return Host(self,index,d)
def getHostByMAC(self,mac,ext=True):
"""
Returns the Host associated with the given MAC Address.
:param str mac: MAC Address of the Host
:param bool ext: Optional Flag if information from the AVM Extension should be integrated, defaults to True
:return: Host Information Object
:rtype: Host
:raises AssertionError: if the MAC Address is invalid, e.g. not a string
:raises ValueError: if the MAC Address is unknown
"""
assert isinstance(mac,str)
d = self.dynapi.GetSpecificHostEntry(NewMACAdress=mac)
d["NewMACAddress"]=mac
if ext:
d.update(self.dynapi.callAPI("X_AVM-DE_GetSpecificHostEntryExt",NewMACAddress=mac))
d["_ext"]=True
else:
d["_ext"]=False
return Host(self,-1,d)
def getHostListLength(self):
"""
Returns the length of the List of all known Hosts.
:return: Number of Entries in the host list.
:rtype: int
"""
return int(self.dynapi.GetHostNumberOfEntries()["NewHostNumberOfEntries"])
def getHostList(self,ext=True):
"""
Returns a list of all hosts.
:param bool ext: Optional Flag if information from the AVM Extension should be integrated, defaults to True
:return: List of Hosts
:rtype: List of :py:class:`Host()`
"""
out = []
for i in range(self.getHostListLength()):
out.append(self.getHostByIndex(i,ext=ext))
return out
def getMacByIndex(self,index):
"""
Returns the MAC Address of the device associated with the given index.
:param int index: Index of the Device to return
:return: MAC Address
:rtype: str
"""
return self.dynapi.GetGenericHostEntry(NewIndex=index)["NewMACAddress"]
def getChangeCounter(self):
"""
Returns the current change counter.
:return: The current change counter
:rtype: int
"""
return int(self.dynapi.callAPI("X_AVM-DE_GetChangeCounter")["NewX_AVM-DE_GetChangeCounter"])
def wakeUp(self,mac):
"""
Sends a WakeOnLAN request to the specified Host.
:param str mac: MAC Address to wake up
:raises AssertionError: if the MAC Address is invalid, e.g. not a string
:raises ValueError: if the MAC Address is unknown
"""
assert isinstance(mac,str)
self.dynapi.callAPI("X_AVM-DE_WakeOnLANByMACAddress",NewMACAddress=mac)
class Host(object):
"""
Host Information and Configuration Class.
:param API_avm_homeauto api: API object to use when querying for data
:param int index: Index this device had when requested via ``GetGenericHostEntry()``\ , may be -1 if unknown
:param dict info: Dictionary containing the TR64 Response with all the data about the device; automatically passed to :py:meth:`loadData()`
:ivar API_avm_homeauto api: stores the supplied API object
:ivar int index: stores the supplied index
:ivar dict info: stores the data in a dictionary
:py:attr:`info` stores a flag if extension data is available in the ``_ext`` key.
:ivar str mac: MAC Address of this Host
:ivar str ip: IP Address of this Host
:ivar str address_source: Source of the Address
:ivar int lease_remaining: Time in second until the DHCP Lease expires
:ivar str interface_type: Type of the interface this Host is connected with
:ivar bool active: Flag if this host is active
:ivar str hostname: Property for reading and writing hostname, see :py:attr:`hostname`
Extension Variables:
:ivar int ethport: Which ethernet port the host is connected with, from 1-4 or 0 if not via LAN
:ivar float speed: Current Connection Speed
:ivar bool updateAvailable: Flag if an update is available, where applicable
:ivar bool updateSuccessful: Flag if the last update was successful, where applicable
:ivar str infourl: URL for getting Information
:ivar str model: Model of the Host
:ivar str url: URL of the Host
"""
def __init__(self,api,index,info):
self.api = api
self.index = index
self.info = info
self.loadData(self.info)
def loadData(self,data):
"""
Populates instance variables with the supplied TR64 response.
This method is automatically called upon construction with the supplied info dict.
Note that the ``_ext`` key must be set to a boolean flag indicating if extension information is contained in the response.
"""
self.mac = data["NewMACAddress"]
self.ip = data["NewIPAddress"]
self.address_source = data["NewAddressSource"]
self.lease_remaining = int(data["NewLeaseTimeRemaining"])
self.interface_type = data["NewInterfaceType"]
self.active = data["NewActive"]=="1"
self._hostname = data["NewHostName"]
if data["_ext"]:
self.ethport = int(data["NewX_AVM-DE_Port"])
self.speed = float(data["NewX_AVM-DE_Speed"])
self.updateAvailable = data["NewX_AVM-DE_UpdateAvailable"]=="1"
self.updateSuccessful = data["NewX_AVM-DE_UpdateSuccessful"]=="succeeded"
self.infourl = data["NewX_AVM-DE_InfoURL"]
self.model = data["NewX_AVM-DE_Model"]
self.url = data["NewX_AVM-DE_URL"]
def reloadData(self):
"""
Reloads the data from the server.
Note that this method will only request extension data if the key ``_ext`` is set to ``True``\ .
"""
d = self.api.dynapi.GetSpecificHostEntry(NewMACAddress=self.mac)
if self.info["_ext"]:
d.update(self.api.dynapi.callAPI("X_AVM-DE_GetSpecificHostEntryExt",NewMACAddress=self.mac))
d["_ext"]=self.info["_ext"]
d["NewMACAddress"]=self.mac
self.info = d
def doUpdate(self):
"""
Requests that the host does an update.
Note that this may not work on every host
"""
self.checkForUpdates()
self.api.dynapi.callAPI("X_AVM-DE_HostDoUpdate",NewMACAddress=self.mac)
def checkForUpdates(self):
"""
Checks for Updates.
Note that this method does not return anything as the underlying API call gives no variables in return.
This method automatically reloads the data to update any update flags that may have changed.
"""
self.api.dynapi.callAPI("X_AVM-DE_HostsCheckUpdate")
self.reloadData()
@property
def autoWOL(self):
"""
Property controlling the Auto-WakeOnLAN Feature.
This Property is not cached and can be written to and read from.
"""
return self.api.dynapi.callAPI("X_AVM-DE_GetAutoWakeOnLANByMACAddress",NewMACAddress=self.mac)["NewAutoWOLEnabled"]=="1"
@autoWOL.setter
def autoWOL(self,value):
self.api.dynapi.callAPI("X_AVM-DE_SetAutoWakeOnLANByMACAddress",NewMACAddress=self.mac,NewAutoWOLEnabled=str(int(value)))
@property
def hostname(self):
"""
Property controlling the hostname of the device.
This property will only update the displayed hostname if it is modified or the data is refreshed.
This property can be read from and written to
"""
return self._hostname
@hostname.setter
def hostname(self,value):
self.api.dynapi.callAPI("X_AVM-DE_SetHostNameByMACAddress",NewMACAddress=self.mac,NewHostName=value)
self.reloadData()
def wakeUp(self):
"""
Sends a WakeOnLAN request to this host and tries to wake it up.
"""
self.api.wakeUp(self.mac)
|
not-na/fritzctl
|
fritzctl/ooapi/general_hosts.py
|
Python
|
gpl-2.0
| 9,883 | 0.012344 |
# -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2009,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = "3.3"
__tabversion__ = "3.2" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("LEX WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("LEX ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
if isinstance(tabfile,types.ModuleType):
lextab = tabfile
else:
if sys.version_info[0] < 3:
exec("import %s as lextab" % tabfile)
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __version__:
raise ImportError("Inconsistent PLY version")
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c,StringTypes):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not state in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func,tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not newtok.type in self.lextokens:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist,namelist):
result = []
for f,name in zip(funclist,namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
lexindexnames = lexindexfunc[:]
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex],[lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self,ldict,log=None,reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = { 'INITIAL' : 'inclusive'}
self.files = {}
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'",n)
self.error = 1
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get("literals","")
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c,StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c))
self.error = 1
continue
except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters")
self.error = 1
def get_states(self):
self.states = self.ldict.get("states",None)
# Build statemap
if self.states:
if not isinstance(self.states,(tuple,list)):
self.log.error("states must be defined as a tuple or list")
self.error = 1
else:
for s in self.states:
if not isinstance(s,tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
self.error = 1
continue
name, statetype = s
if not isinstance(name,StringTypes):
self.log.error("State name %s must be a string", repr(name))
self.error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
self.error = 1
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined",name)
self.error = 1
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
self.toknames = { } # Mapping of symbols to token names
self.funcsym = { } # Symbols defined as functions
self.strsym = { } # Symbols defined as strings
self.ignore = { } # Ignore strings by state
self.errorf = { } # Error functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined")
self.error = 1
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f,self.stateinfo)
self.toknames[f] = tokname
if hasattr(t,"__call__"):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'ignore':
line = func_code(t).co_firstlineno
file = func_code(t).co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
self.error = 1
else:
for s in states:
self.funcsym[s].append((f,t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if "\\" in t:
self.log.warning("%s contains a literal backslash '\\'",f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1
else:
for s in states:
self.strsym[s].append((f,t))
else:
self.log.error("%s not defined as a function or string", f)
self.error = 1
# Sort the functions by line number
for f in self.funcsym.values():
if sys.version_info[0] < 3:
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]),reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
continue
if not f.__doc__:
self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
if c.match(""):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
if '#' in f.__doc__:
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
self.error = 1
# Validate all rules defined by strings
for name,r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1
continue
if not tokname in self.tokens and tokname.find("ignore_") < 0:
self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
if (c.match("")):
self.log.error("Regular expression for rule '%s' matches empty string",name)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
self.error = 1
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'",state)
self.error = 1
# Validate the error function
efunc = self.errorf.get(state,None)
if efunc:
f = efunc
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
for f in self.files:
self.validate_file(f)
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the given file.
# -----------------------------------------------------------------------------
def validate_file(self,filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
self.error = 1
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token,input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object: module = object
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info("lex: tokens = %r", linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
for n in linfo.tokens:
lexobj.lextokens[n] = 1
# Get literals specification
if isinstance(linfo.literals,(list,tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = { }
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state,stype in stateinfo.items():
if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if not s in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if not s in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
if not s in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
if hasattr(r,"__call__"):
f.__doc__ = r.__doc__
else:
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
|
gianina-ingenuity/titanium-branch-deep-linking
|
testbed/x/mobilesdk/osx/5.5.1.GA/common/css/ply/lex.py
|
Python
|
mit
| 40,747 | 0.011633 |
#!/usr/bin/env python
# encoding: utf-8
"""
fuzz.py
Copyright (c) 2011 Adam Cohen
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
import platform
import warnings
try:
from .StringMatcher import StringMatcher as SequenceMatcher
except ImportError:
#if platform.python_implementation() != "PyPy":
# warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning')
from difflib import SequenceMatcher
from . import utils
###########################
# Basic Scoring Functions #
###########################
@utils.check_for_none
@utils.check_empty_string
def ratio(s1, s2):
s1, s2 = utils.make_type_consistent(s1, s2)
m = SequenceMatcher(None, s1, s2)
return utils.intr(100 * m.ratio())
@utils.check_for_none
@utils.check_empty_string
def partial_ratio(s1, s2):
""""Return the ratio of the most similar substring
as a number between 0 and 100."""
s1, s2 = utils.make_type_consistent(s1, s2)
if len(s1) <= len(s2):
shorter = s1
longer = s2
else:
shorter = s2
longer = s1
m = SequenceMatcher(None, shorter, longer)
blocks = m.get_matching_blocks()
# each block represents a sequence of matching characters in a string
# of the form (idx_1, idx_2, len)
# the best partial match will block align with at least one of those blocks
# e.g. shorter = "abcd", longer = XXXbcdeEEE
# block = (1,3,3)
# best score === ratio("abcd", "Xbcd")
scores = []
for block in blocks:
long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0
long_end = long_start + len(shorter)
long_substr = longer[long_start:long_end]
m2 = SequenceMatcher(None, shorter, long_substr)
r = m2.ratio()
if r > .995:
return 100
else:
scores.append(r)
return utils.intr(100 * max(scores))
##############################
# Advanced Scoring Functions #
##############################
def _process_and_sort(s, force_ascii, full_process=True):
"""Return a cleaned string with token sorted."""
# pull tokens
ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s
tokens = ts.split()
# sort tokens and join
sorted_string = u" ".join(sorted(tokens))
return sorted_string.strip()
# Sorted Token
# find all alphanumeric tokens in the string
# sort those tokens and take ratio of resulting joined strings
# controls for unordered string elements
@utils.check_for_none
def _token_sort(s1, s2, partial=True, force_ascii=True, full_process=True):
sorted1 = _process_and_sort(s1, force_ascii, full_process=full_process)
sorted2 = _process_and_sort(s2, force_ascii, full_process=full_process)
if partial:
return partial_ratio(sorted1, sorted2)
else:
return ratio(sorted1, sorted2)
def token_sort_ratio(s1, s2, force_ascii=True, full_process=True):
"""Return a measure of the sequences' similarity between 0 and 100
but sorting the token before comparing.
"""
return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process)
def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True):
"""Return the ratio of the most similar substring as a number between
0 and 100 but sorting the token before comparing.
"""
return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process)
@utils.check_for_none
def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True):
"""Find all alphanumeric tokens in each string...
- treat them as a set
- construct two strings of the form:
<sorted_intersection><sorted_remainder>
- take ratios of those two strings
- controls for unordered partial matches"""
p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1
p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2
if not utils.validate_string(p1):
return 0
if not utils.validate_string(p2):
return 0
# pull tokens
tokens1 = set(p1.split())
tokens2 = set(p2.split())
intersection = tokens1.intersection(tokens2)
diff1to2 = tokens1.difference(tokens2)
diff2to1 = tokens2.difference(tokens1)
sorted_sect = " ".join(sorted(intersection))
sorted_1to2 = " ".join(sorted(diff1to2))
sorted_2to1 = " ".join(sorted(diff2to1))
combined_1to2 = sorted_sect + " " + sorted_1to2
combined_2to1 = sorted_sect + " " + sorted_2to1
# strip
sorted_sect = sorted_sect.strip()
combined_1to2 = combined_1to2.strip()
combined_2to1 = combined_2to1.strip()
if partial:
ratio_func = partial_ratio
else:
ratio_func = ratio
pairwise = [
ratio_func(sorted_sect, combined_1to2),
ratio_func(sorted_sect, combined_2to1),
ratio_func(combined_1to2, combined_2to1)
]
return max(pairwise)
def token_set_ratio(s1, s2, force_ascii=True, full_process=True):
return _token_set(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process)
def partial_token_set_ratio(s1, s2, force_ascii=True, full_process=True):
return _token_set(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process)
###################
# Combination API #
###################
# q is for quick
def QRatio(s1, s2, force_ascii=True):
p1 = utils.full_process(s1, force_ascii=force_ascii)
p2 = utils.full_process(s2, force_ascii=force_ascii)
if not utils.validate_string(p1):
return 0
if not utils.validate_string(p2):
return 0
return ratio(p1, p2)
def UQRatio(s1, s2):
return QRatio(s1, s2, force_ascii=False)
# w is for weighted
def WRatio(s1, s2, force_ascii=True):
"""Return a measure of the sequences' similarity between 0 and 100,
using different algorithms.
"""
p1 = utils.full_process(s1, force_ascii=force_ascii)
p2 = utils.full_process(s2, force_ascii=force_ascii)
if not utils.validate_string(p1):
return 0
if not utils.validate_string(p2):
return 0
# should we look at partials?
try_partial = True
unbase_scale = .95
partial_scale = .90
base = ratio(p1, p2)
len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2))
# if strings are similar length, don't use partials
if len_ratio < 1.5:
try_partial = False
# if one string is much much shorter than the other
if len_ratio > 8:
partial_scale = .6
if try_partial:
partial = partial_ratio(p1, p2) * partial_scale
ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \
* unbase_scale * partial_scale
ptser = partial_token_set_ratio(p1, p2, full_process=False) \
* unbase_scale * partial_scale
return utils.intr(max(base, partial, ptsor, ptser))
else:
tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale
tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale
return utils.intr(max(base, tsor, tser))
def UWRatio(s1, s2):
"""Return a measure of the sequences' similarity between 0 and 100,
using different algorithms. Same as WRatio but preserving unicode.
"""
return WRatio(s1, s2, force_ascii=False)
|
CHBMB/LazyLibrarian
|
lib/fuzzywuzzy/fuzz.py
|
Python
|
gpl-3.0
| 8,419 | 0.00095 |
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
#Fixme: this test currently fails due to an issue to be fixed
# Checks that the boxes remains in the image when switching image
# Other things to be tested:
# - interaction with boxes : move them around etc...
import os
import numpy
from PyQt4.QtGui import QApplication,QKeyEvent
from PyQt4.QtCore import QEvent,Qt
from ilastik.workflows.counting import CountingWorkflow
from tests.helpers import ShellGuiTestCaseBase
from lazyflow.operators import OpPixelFeaturesPresmoothed
class TestObjectCountingGuiMultiImage(ShellGuiTestCaseBase):
"""
Run a set of GUI-based tests on the pixel classification workflow.
Note: These tests are named in order so that simple cases are tried before complex ones.
Additionally, later tests may depend on earlier ones to run properly.
"""
@classmethod
def workflowClass(cls):
return CountingWorkflow
PROJECT_FILE = os.path.split(__file__)[0] + '/test_project.ilp'
SAMPLE_DATA = []
SAMPLE_DATA.append( os.path.split(__file__)[0] + '/1.npy')
SAMPLE_DATA.append( os.path.split(__file__)[0] + '/0.npy')
SAMPLE_DATA.append( os.path.split(__file__)[0] + '/2.npy')
SAMPLE_DATA.append( os.path.split(__file__)[0] + '/3.npy')
SAMPLE_DATA.append( os.path.split(__file__)[0] + '/4.npy')
@classmethod
def setupClass(cls):
# Base class first
super(TestObjectCountingGuiMultiImage, cls).setupClass()
if hasattr(cls, 'SAMPLE_DATA'):
cls.using_random_data = False
else:
cls.using_random_data = True
cls.SAMPLE_DATA = []
cls.SAMPLE_DATA.append(os.path.split(__file__)[0] + '/random_data1.npy')
cls.SAMPLE_DATA.append(os.path.split(__file__)[0] + '/random_data2.npy')
data1 = numpy.random.random((1,200,200,1,1))
data1 *= 256
data2 = numpy.random.random((1,50,100,1,1))
data2 *= 256
numpy.save(cls.SAMPLE_DATA[0], data1.astype(numpy.uint8))
numpy.save(cls.SAMPLE_DATA[1], data2.astype(numpy.uint8))
@classmethod
def teardownClass(cls):
# Call our base class so the app quits!
super(TestObjectCountingGuiMultiImage, cls).teardownClass()
# Clean up: Delete any test files we generated
removeFiles = [ TestObjectCountingGuiMultiImage.PROJECT_FILE ]
if cls.using_random_data:
removeFiles += TestObjectCountingGuiMultiImage.SAMPLE_DATA
for f in removeFiles:
try:
os.remove(f)
except:
pass
def test_1_NewProject(self):
"""
Create a blank project, manipulate few couple settings, and save it.
"""
def impl():
projFilePath = self.PROJECT_FILE
shell = self.shell
# New project
shell.createAndLoadNewProject(projFilePath, self.workflowClass())
workflow = shell.projectManager.workflow
from ilastik.applets.dataSelection.opDataSelection import DatasetInfo
opDataSelection = workflow.dataSelectionApplet.topLevelOperator
for i, dataFile in enumerate(self.SAMPLE_DATA):
# Add a file
info = DatasetInfo()
info.filePath = dataFile
opDataSelection.DatasetGroup.resize(i+1)
opDataSelection.DatasetGroup[i][0].setValue(info)
# Set some features
opFeatures = workflow.featureSelectionApplet.topLevelOperator
opFeatures.FeatureIds.setValue( OpPixelFeaturesPresmoothed.DefaultFeatureIds )
opFeatures.Scales.setValue( [0.3, 0.7, 1, 1.6, 3.5, 5.0, 10.0] )
# sigma: 0.3 0.7 1.0 1.6 3.5 5.0 10.0
selections = numpy.array( [[True, False, False, False, False, False, False],
[True, False, False, False, False, False, False],
[True, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False]] )
opFeatures.SelectionMatrix.setValue(selections)
# Save and close
shell.projectManager.saveProject()
shell.ensureNoCurrentProject(assertClean=True)
# Run this test from within the shell event loop
self.exec_in_shell(impl)
def test_2_ClosedState(self):
"""
Check the state of various shell and gui members when no project is currently loaded.
"""
def impl():
assert self.shell.projectManager is None
assert self.shell.appletBar.count() == 0
# Run this test from within the shell event loop
self.exec_in_shell(impl)
def test_3_OpenProject(self):
def impl():
self.shell.openProjectFile(self.PROJECT_FILE)
assert self.shell.projectManager.currentProjectFile is not None
# Run this test from within the shell event loop
self.exec_in_shell(impl)
# These points are relative to the CENTER of the view
def test_4_AddDotsAndBackground(self):
"""
Add labels and draw them in the volume editor.
"""
def impl():
imageId = 0
workflow = self.shell.projectManager.workflow
countingClassApplet = workflow.countingApplet
self.shell.imageSelectionCombo.setCurrentIndex(imageId)
gui = countingClassApplet.getMultiLaneGui()
self.waitForViews(gui.currentGui().editor.imageViews)
opPix = countingClassApplet.topLevelOperator
# Select the labeling drawer
self.shell.setSelectedAppletDrawer(3)
# Turn off the huds and so we can capture the raw image
viewMenu = gui.currentGui().menus()[0]
viewMenu.actionToggleAllHuds.trigger()
# Select the labeling drawer
self.shell.setSelectedAppletDrawer(3)
# Turn off the huds and so we can capture the raw image
viewMenu = gui.currentGui().menus()[0]
viewMenu.actionToggleAllHuds.trigger()
## Turn off the slicing position lines
## FIXME: This disables the lines without unchecking the position
## box in the VolumeEditorWidget, making the checkbox out-of-sync
#gui.currentGui().editor.navCtrl.indicateSliceIntersection = False
# Do our tests at position 0,0,0
gui.currentGui().editor.posModel.slicingPos = (0,0,0)
assert gui.currentGui()._labelControlUi.liveUpdateButton.isChecked() == False
assert gui.currentGui()._labelControlUi.labelListModel.rowCount() == 2, "Got {} rows".format(gui.currentGui()._labelControlUi.labelListModel.rowCount())
# Select the brush
gui.currentGui()._labelControlUi.paintToolButton.click()
# Let the GUI catch up: Process all events
QApplication.processEvents()
# Draw some arbitrary labels in the view using mouse events.
# Set the brush size
gui.currentGui()._labelControlUi.brushSizeComboBox.setCurrentIndex(1)
gui.currentGui()._labelControlUi.labelListModel.select(0)
imgView = gui.currentGui().editor.imageViews[2]
dot_start_list = [(-14,-20),(6,-8),(10,4), (20,21)]
dot_stop_list = [(-20,-11),(9,-12),(15,-3), (20,21)]
LABEL_START = (-14,-20)
LABEL_STOP = (-14,-21)
LABEL_ERASE_START = (6,-8)
LABEL_ERASE_STOP = (9,-8)
#draw foreground dots
for start,stop in zip(dot_start_list,dot_stop_list):
self.strokeMouseFromCenter( imgView, start,stop )
labelData = opPix.LabelImages[imageId][:].wait()
assert numpy.sum(labelData[labelData==1]) == 4, "Number of foreground dots was {}".format(
numpy.sum(labelData[labelData==1]) )
center = (numpy.array(labelData.shape[:-1]))/2 + 1
true_idx = numpy.array([center + dot for dot in dot_start_list])
idx = numpy.where(labelData)
test_idx = numpy.array((idx[0],idx[1])).transpose()
# This test doesn't require *exact* pixel locations to match due to rounding differences in mouse strokes.
# Instead, we just require them to be close.
# FIXME: This should be fixable by ensuring that the image is properly zoomed to 1-1 scale before the test.
assert numpy.abs(test_idx - true_idx).max() <= 1
# Set the brush size
# Draw background
gui.currentGui()._labelControlUi.labelListModel.select(1)
gui.currentGui()._labelControlUi.brushSizeComboBox.setCurrentIndex(0)
self.strokeMouseFromCenter( imgView, LABEL_START,LABEL_STOP)
#The background in this configuration should override the dots
labelData = opPix.LabelImages[imageId][:].wait()
assert labelData.max() == 2, "Max label value was {}".format( labelData.max() )
assert numpy.sum(labelData[labelData==1]) == 3, "Number of foreground dots was {}".format(
numpy.sum(labelData[labelData==1]) )
#Now select eraser
gui.currentGui()._labelControlUi.eraserToolButton.click()
gui.currentGui()._labelControlUi.brushSizeComboBox.setCurrentIndex(0)
self.strokeMouseFromCenter( imgView, LABEL_ERASE_START,LABEL_ERASE_STOP)
labelData = opPix.LabelImages[imageId][:].wait()
assert numpy.sum(labelData[labelData==1]) == 2, "Number of foreground dots was {}".format(
numpy.sum(labelData[labelData==1]) )
true_idx = numpy.array([center + dot for dot in dot_start_list[2:]])
idx = numpy.where(labelData == 1)
test_idx = numpy.array((idx[0],idx[1])).transpose()
# This test doesn't require *exact* pixel locations to match due to rounding differences in mouse strokes.
# Instead, we just require them to be close.
# FIXME: This should be fixable by ensuring that the image is properly zoomed to 1-1 scale before the test.
assert numpy.abs(test_idx - true_idx).max() <= 1
self.waitForViews([imgView])
# Save the project
saveThread = self.shell.onSaveProjectActionTriggered()
saveThread.join()
# Run this test from within the shell event loop
self.exec_in_shell(impl)
def test_5_AddBox(self):
"""
Add boxes and draw them in the volume editor.
"""
def impl():
workflow = self.shell.projectManager.workflow
countingClassApplet = workflow.countingApplet
gui = countingClassApplet.getMultiLaneGui()
opPix = countingClassApplet.topLevelOperator
# Select the labeling drawer
self.shell.setSelectedAppletDrawer(3)
# Turn off the huds and so we can capture the raw image
viewMenu = gui.currentGui().menus()[0]
viewMenu.actionToggleAllHuds.trigger()
# Select the labeling drawer
self.shell.setSelectedAppletDrawer(3)
# Turn off the huds and so we can capture the raw image
viewMenu = gui.currentGui().menus()[0]
viewMenu.actionToggleAllHuds.trigger()
## Turn off the slicing position lines
## FIXME: This disables the lines without unchecking the position
## box in the VolumeEditorWidget, making the checkbox out-of-sync
#gui.currentGui().editor.navCtrl.indicateSliceIntersection = False
# Do our tests at position 0,0,0
gui.currentGui().editor.posModel.slicingPos = (0,0,0)
assert gui.currentGui()._labelControlUi.liveUpdateButton.isChecked() == False
assert gui.currentGui()._labelControlUi.labelListModel.rowCount() == 2, "Got {} rows".format(gui.currentGui()._labelControlUi.labelListModel.rowCount())
# Select the brush
gui.currentGui()._labelControlUi.paintToolButton.click()
# Let the GUI catch up: Process all events
QApplication.processEvents()
# Draw some arbitrary labels in the view using mouse events.
gui.currentGui()._labelControlUi.AddBoxButton.click()
imgView = gui.currentGui().editor.imageViews[2]
start_box_list=[(-22,-1),(0,1)]
stop_box_list=[(0,10),(50,20)]
for start,stop in zip(start_box_list,stop_box_list):
self.strokeMouseFromCenter( imgView, start,stop)
added_boxes=len(gui.currentGui()._labelControlUi.boxListModel._elements)
assert added_boxes==2," Not all boxes added to the model curr = %d"%added_boxes
start_box_list= [(-128,-128), (128,128)]
stop_box_list = [(128,128), (-128,-128)]
for start,stop in zip(start_box_list,stop_box_list):
self.strokeMouseFromCenter( imgView, start,stop)
added_boxes=len(gui.currentGui()._labelControlUi.boxListModel._elements)
assert added_boxes==4," Not all boxes added to the model curr = %d"%added_boxes
self.waitForViews([imgView])
# Save the project
saveThread = self.shell.onSaveProjectActionTriggered()
saveThread.join()
# Run this test from within the shell event loop
self.exec_in_shell(impl)
def test_6_SwitchImages(self):
"""
Switch back and forth between a labeled image and an unlabeled one. boxes should disappear and then reappear.
"""
def impl():
workflow = self.shell.projectManager.workflow
countingClassApplet = workflow.countingApplet
gui = countingClassApplet.getMultiLaneGui()
# Select the second image
self.shell.imageSelectionCombo.setCurrentIndex(2)
gui = countingClassApplet.getMultiLaneGui()
gui.currentGui().editor.posModel.slicingPos = (0,0,0)
self.waitForViews(gui.currentGui().editor.imageViews)
#check that there is no box then add one
added_boxes=gui.currentGui()._labelControlUi.boxListModel._elements
assert len(added_boxes) == 0, " %s no boxes added yet for the new image"%len(added_boxes)
# Select the first image
self.shell.imageSelectionCombo.setCurrentIndex(0)
gui = countingClassApplet.getMultiLaneGui()
gui.currentGui().editor.posModel.slicingPos = (0,0,0)
self.waitForViews(gui.currentGui().editor.imageViews)
#Check that old boxes are still there
added_boxes=len(gui.currentGui()._labelControlUi.boxListModel._elements)
assert added_boxes==4," Not all boxes added to the model curr = %d"%added_boxes
# Select the second image
self.shell.imageSelectionCombo.setCurrentIndex(1)
gui = countingClassApplet.getMultiLaneGui()
gui.currentGui().editor.posModel.slicingPos = (0,0,0)
self.waitForViews(gui.currentGui().editor.imageViews)
# Run this test from within the shell event loop
self.exec_in_shell(impl)
def test_7_AddDotsAndBackground(self):
"""
Add labels and draw them in the volume editor.
"""
def impl():
imageId = 1
workflow = self.shell.projectManager.workflow
countingClassApplet = workflow.countingApplet
self.shell.imageSelectionCombo.setCurrentIndex(imageId)
gui = countingClassApplet.getMultiLaneGui()
self.waitForViews(gui.currentGui().editor.imageViews)
opPix = countingClassApplet.topLevelOperator
# Select the labeling drawer
self.shell.setSelectedAppletDrawer(3)
# Turn off the huds and so we can capture the raw image
viewMenu = gui.currentGui().menus()[0]
viewMenu.actionToggleAllHuds.trigger()
# Select the labeling drawer
self.shell.setSelectedAppletDrawer(3)
# Turn off the huds and so we can capture the raw image
viewMenu = gui.currentGui().menus()[0]
viewMenu.actionToggleAllHuds.trigger()
## Turn off the slicing position lines
## FIXME: This disables the lines without unchecking the position
## box in the VolumeEditorWidget, making the checkbox out-of-sync
#gui.currentGui().editor.navCtrl.indicateSliceIntersection = False
# Do our tests at position 0,0,0
gui.currentGui().editor.posModel.slicingPos = (0,0,0)
assert gui.currentGui()._labelControlUi.liveUpdateButton.isChecked() == False
assert gui.currentGui()._labelControlUi.labelListModel.rowCount() == 2, "Got {} rows".format(gui.currentGui()._labelControlUi.labelListModel.rowCount())
# Select the brush
gui.currentGui()._labelControlUi.paintToolButton.click()
# Let the GUI catch up: Process all events
QApplication.processEvents()
# Draw some arbitrary labels in the view using mouse events.
# Set the brush size
gui.currentGui()._labelControlUi.brushSizeComboBox.setCurrentIndex(1)
gui.currentGui()._labelControlUi.labelListModel.select(0)
imgView = gui.currentGui().editor.imageViews[2]
dot_start_list = [(-25,-20),(9,-15),(15,-3)]
dot_stop_list = [(-25,-11),(9,-12),(15,-3)]
LABEL_START = (-25,-30)
LABEL_STOP = (-25,-20)
LABEL_ERASE_START = (9,-15)
LABEL_ERASE_STOP = (15,-3)
#draw foreground dots
for start,stop in zip(dot_start_list,dot_stop_list):
self.strokeMouseFromCenter( imgView, start,stop )
labelData = opPix.LabelImages[imageId][:].wait()
assert numpy.sum(labelData[labelData==1]) == 3, "Number of foreground dots was {}".format(
numpy.sum(labelData[labelData==1]) )
center = (numpy.array(labelData.shape[:-1]))/2 + 1
true_idx = numpy.array([center + dot for dot in dot_start_list])
idx = numpy.where(labelData)
test_idx = numpy.array((idx[0],idx[1])).transpose()
# This test doesn't require *exact* pixel locations to match due to rounding differences in mouse strokes.
# Instead, we just require them to be close.
# FIXME: This should be fixable by ensuring that the image is properly zoomed to 1-1 scale before the test.
assert numpy.abs(test_idx - true_idx).max() <= 1
# Set the brush size
# Draw background
gui.currentGui()._labelControlUi.labelListModel.select(1)
gui.currentGui()._labelControlUi.brushSizeComboBox.setCurrentIndex(0)
self.strokeMouseFromCenter( imgView, LABEL_START,LABEL_STOP)
#The background in this configuration should override the dots
labelData = opPix.LabelImages[imageId][:].wait()
assert labelData.max() == 2, "Max label value was {}".format( labelData.max() )
assert numpy.sum(labelData[labelData==1]) == 2, "Number of foreground dots was {}".format(
numpy.sum(labelData[labelData==1]) )
assert numpy.sum(labelData[labelData==2]) == 22, "Number of background dots was {}".format(
numpy.sum(labelData[labelData==2]) )
#Now select eraser
gui.currentGui()._labelControlUi.eraserToolButton.click()
self.strokeMouseFromCenter( imgView, LABEL_ERASE_START,LABEL_ERASE_STOP)
labelData = opPix.LabelImages[imageId][:].wait()
assert numpy.sum(labelData[labelData==1]) == 0, "Number of foreground dots was {}".format(
numpy.sum(labelData[labelData==1]) )
self.waitForViews([imgView])
QApplication.processEvents()
LABEL_START = (-128,-128)
LABEL_STOP = (128,128)
LABEL_ERASE_START = (-128,-128)
LABEL_ERASE_STOP = (128,128)
gui.currentGui()._labelControlUi.labelListModel.select(1)
gui.currentGui()._labelControlUi.brushSizeComboBox.setCurrentIndex(0)
self.strokeMouseFromCenter( imgView, LABEL_START,LABEL_STOP)
self.waitForViews([imgView])
labelData = opPix.LabelImages[imageId][:].wait()
# assert numpy.sum(labelData[labelData==2]) > 22, "Number of background dots was {}".format(
# numpy.sum(labelData[labelData==2]) )
gui.currentGui()._labelControlUi.AddBoxButton.click()
self.strokeMouseFromCenter(imgView, LABEL_START, LABEL_STOP)
labelData = opPix.LabelImages[imageId][:].wait()
self.waitForViews([imgView])
gui.currentGui()._labelControlUi.eraserToolButton.click()
self.strokeMouseFromCenter( imgView, LABEL_ERASE_START,LABEL_ERASE_STOP)
labelData = opPix.LabelImages[imageId][:].wait()
# assert numpy.sum(labelData[labelData==2]) == 20, "Number of background dots was {}".format(
# numpy.sum(labelData[labelData==2]) )
# Save the project
saveThread = self.shell.onSaveProjectActionTriggered()
saveThread.join()
# Run this test from within the shell event loop
self.exec_in_shell(impl)
def test_8_UpdateSum(self):
"""
Click on the interactive mode to see if training has been
suceesfull in the secod images even if the labels are given
in the first one
"""
def impl():
workflow = self.shell.projectManager.workflow
countingClassApplet = workflow.countingApplet
gui = countingClassApplet.getMultiLaneGui()
clicked=False
def toggle(clicked):
clicked= not clicked
gui.currentGui()._labelControlUi.liveUpdateButton.click()
return clicked
SVROptions=gui.currentGui()._labelControlUi.SVROptions
#Test each one of the counting modality which is registered
for el in range(SVROptions.count()):
#if clicked:
# clicked=toggle(clicked)
SVROptions.setCurrentIndex(el)
#clicked=toggle(clicked)
imgView = gui.currentGui().editor.imageViews[2]
# FIXME: somehow this triggers computation of the density but
# this value is not updated
gui.currentGui().labelingDrawerUi.DensityButton.click()
self.waitForViews([imgView])
density = gui.currentGui().op.OutputSum[...].wait()
# Check that the predicted count is in a fine range
assert density[0]>70,"Density value is too low: {0:.2f}".format(density[0])
assert density[0]<150,"Density value is too high: {0:.2f}".format(density[0])
#if clicked:
#clicked=toggle(clicked)
# Run this test from within the shell event loop
self.exec_in_shell(impl)
def test_9_CheckDensity(self):
"""
Test if the different operators produce the same density
"""
def impl():
workflow = self.shell.projectManager.workflow
countingClassApplet = workflow.countingApplet
gui = countingClassApplet.getMultiLaneGui()
self.shell.imageSelectionCombo.setCurrentIndex(0)
gui.currentGui().editor.posModel.slicingPos = (0,0,0)
self.waitForViews(gui.currentGui().editor.imageViews)
operatorDensity = numpy.sum(gui.currentGui().op.Density[...].wait())
sumDensity = gui.currentGui().op.OutputSum[...].wait()
gui.currentGui().labelingDrawerUi.liveUpdateButton.setChecked(False)
displayedDensity = gui.currentGui()._labelControlUi.CountText.text()
while str(displayedDensity) == ' -- --':
gui.currentGui().labelingDrawerUi.DensityButton.click()
displayedDensity = gui.currentGui()._labelControlUi.CountText.text()
self.waitForViews(gui.currentGui().editor.imageViews)
displayedDensity = float(str(displayedDensity))
assert abs(displayedDensity - operatorDensity) < 1E-1, "Density mismatch:, the displayed Density {} is not\
equal to the internal density from the Operator {}".format(displayedDensity, operatorDensity)
assert abs(operatorDensity - sumDensity) < 1E-1, "Density mismatch: the Sum operator {} does not return the same\
result as using numpy.sum {}".format(operatorDensity, sumDensity)
self.exec_in_shell(impl)
def test_6_CheckBox(self):
"""
Click on the interactive mode to see if training has been
suceesfull in the secod images even if the labels are given
in the first one
"""
def impl():
workflow = self.shell.projectManager.workflow
countingClassApplet = workflow.countingApplet
gui = countingClassApplet.getMultiLaneGui()
self.shell.imageSelectionCombo.setCurrentIndex(0)
gui.currentGui().editor.posModel.slicingPos = (0,0,0)
self.waitForViews(gui.currentGui().editor.imageViews)
boxes = gui.currentGui()._labelControlUi.boxListModel._elements
boxList = gui.currentGui().boxController._currentBoxesList
for box, boxHandle in zip(boxes, boxList):
start = boxHandle.getStart()
stop = boxHandle.getStop()
slicing = [slice(s1, s2) for s1, s2 in zip(start, stop)]
val = numpy.sum(gui.currentGui().op.Density[slicing[1:3]].wait())
val2 = float(box.density)
assert abs(val - val2) < 1E-3, "The value written to the box {} differs from the one gotten via the\
operator {}".format(val, val2)
self.exec_in_shell(impl)
def test_11_CheckBoxes(self):
"""
Click on the interactive mode to see if training has been
suceesfull in the secod images even if the labels are given
in the first one
"""
def impl():
workflow = self.shell.projectManager.workflow
countingClassApplet = workflow.countingApplet
gui = countingClassApplet.getMultiLaneGui()
self.shell.imageSelectionCombo.setCurrentIndex(0)
gui.currentGui().editor.posModel.slicingPos = (0,0,0)
self.waitForViews(gui.currentGui().editor.imageViews)
boxes = gui.currentGui()._labelControlUi.boxListModel._elements
fullBoxVal = float(boxes[2]._density)
fullBoxVal2 = float(boxes[3]._density)
assert abs(fullBoxVal - fullBoxVal2) < 1E-5, "Box mismatch: {.2f} is not close to\
{.2f}".format(fullBoxVal, fullBoxVal2)
self.shell.imageSelectionCombo.setCurrentIndex(1)
gui.currentGui().labelingDrawerUi.DensityButton.click()
boxes = gui.currentGui()._labelControlUi.boxListModel._elements
fullBoxVal = float(boxes[0]._density)
density = gui.currentGui().op.OutputSum[...].wait()
assert density[0] == fullBoxVal, "Box mismatch: {} != {}".format(density[0], fullBoxVal)
#FIXME: this test is disabled. for inconpatibility
# betwenn the coordinates which are passed when drowing the box with
# strokeMouseFromCenter and the coordinates of the boxes.
# It should check that the density of the entire image is = to the density of a box which covers the whole image
# Run this test from within the shell event loop
#self.exec_in_shell(impl)
def _switchToImg(self,img_number):
# Select the second image
self.shell.imageSelectionCombo.setCurrentIndex(img_number)
workflow = self.shell.projectManager.workflow
countingClassApplet = workflow.countingApplet
gui = countingClassApplet.getMultiLaneGui()
gui.currentGui().editor.posModel.slicingPos = (0,0,0)
self.waitForViews(gui.currentGui().editor.imageViews)
return gui,gui.currentGui(),gui.currentGui().editor.imageViews[2]
#FIXME Currently not working, for some reason the ctrl modifier has no effect here
# def test_8_MoveBoxAroundAndDelete(self):
# """
# Try to move around a box and delete mode to see if training has been
# suceesfull in the secod images even if the labels are given
# in the first one
# """
# def impl():
# gui,currentGui,imgView = self._switchToImg(0)
# gui.currentGui()._labelControlUi.AddBoxButton.click()
# start_box_list=[(-50,-50)]
# stop_box_list=[(150,150)]
# for start,stop in zip(start_box_list,stop_box_list):
# QApplication.processEvents()
# self.strokeMouseFromCenter( imgView, start,stop,Qt.NoModifier,1)
# QApplication.processEvents()
# import time
# time.sleep(3)
# #if clicked:
# #clicked=toggle(clicked)
# # Run this test from within the shell event loop
# self.exec_in_shell(impl)
if __name__ == "__main__":
from tests.helpers.shellGuiTestCaseBase import run_shell_nosetest
run_shell_nosetest(__file__)
|
ilastikdev/ilastik
|
tests/test_applets/objectCounting/testObjectCountingMultiImageGui.py
|
Python
|
gpl-3.0
| 31,053 | 0.010756 |
from __future__ import division, print_function, absolute_import
import time
from collections import defaultdict
import numpy as np
try:
import scipy.optimize
from scipy.optimize.optimize import rosen, rosen_der, rosen_hess
from scipy.optimize import leastsq
except ImportError:
pass
from . import test_functions as funcs
from .common import Benchmark
from .lsq_problems import extract_lsq_problems
class _BenchOptimizers(Benchmark):
"""a framework for benchmarking the optimizer
Parameters
----------
function_name : string
fun : callable
der : callable
function that returns the derivative (jacobian, gradient) of fun
hess : callable
function that returns the hessian of fun
minimizer_kwargs : kwargs
additional keywords passed to the minimizer. e.g. tol, maxiter
"""
def __init__(self, function_name, fun, der=None, hess=None,
**minimizer_kwargs):
self.function_name = function_name
self.fun = fun
self.der = der
self.hess = hess
self.minimizer_kwargs = minimizer_kwargs
if "tol" not in minimizer_kwargs:
minimizer_kwargs["tol"] = 1e-4
self.results = []
def reset(self):
self.results = []
def add_result(self, result, t, name):
"""add a result to the list"""
result.time = t
result.name = name
if not hasattr(result, "njev"):
result.njev = 0
if not hasattr(result, "nhev"):
result.nhev = 0
self.results.append(result)
def print_results(self):
"""print the current list of results"""
results = self.average_results()
results = sorted(results, key=lambda x: (x.nfail, x.mean_time))
if not results:
return
print("")
print("=========================================================")
print("Optimizer benchmark: %s" % (self.function_name))
print("dimensions: %d, extra kwargs: %s" % (results[0].ndim, str(self.minimizer_kwargs)))
print("averaged over %d starting configurations" % (results[0].ntrials))
print(" Optimizer nfail nfev njev nhev time")
print("---------------------------------------------------------")
for res in results:
print("%11s | %4d | %4d | %4d | %4d | %.6g" %
(res.name, res.nfail, res.mean_nfev, res.mean_njev, res.mean_nhev, res.mean_time))
def average_results(self):
"""group the results by minimizer and average over the runs"""
grouped_results = defaultdict(list)
for res in self.results:
grouped_results[res.name].append(res)
averaged_results = dict()
for name, result_list in grouped_results.items():
newres = scipy.optimize.OptimizeResult()
newres.name = name
newres.mean_nfev = np.mean([r.nfev for r in result_list])
newres.mean_njev = np.mean([r.njev for r in result_list])
newres.mean_nhev = np.mean([r.nhev for r in result_list])
newres.mean_time = np.mean([r.time for r in result_list])
newres.ntrials = len(result_list)
newres.nfail = len([r for r in result_list if not r.success])
try:
newres.ndim = len(result_list[0].x)
except TypeError:
newres.ndim = 1
averaged_results[name] = newres
return averaged_results.values()
def bench_run(self, x0, methods=None, **minimizer_kwargs):
"""do an optimization test starting at x0 for all the optimizers"""
kwargs = self.minimizer_kwargs
if methods is None:
methods = ["COBYLA", 'Powell',
'L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP',
"Newton-CG", 'dogleg', 'trust-ncg']
fonly_methods = ["COBYLA", 'Powell']
for method in fonly_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
**kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
gradient_methods = ['L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP']
if self.der is not None:
for method in gradient_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
jac=self.der, **kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
hessian_methods = ["Newton-CG", 'dogleg', 'trust-ncg']
if self.hess is not None:
for method in hessian_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
jac=self.der, hess=self.hess,
**kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
class BenchSmoothUnbounded(Benchmark):
"""Benchmark the optimizers with smooth, unbounded, functions"""
params = [
['rosenbrock', 'rosenbrock_tight',
'simple_quadratic', 'asymmetric_quadratic',
'sin_1d', 'booth', 'beale', 'LJ'],
["COBYLA", 'Powell',
'L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP',
"Newton-CG", 'dogleg', 'trust-ncg'],
["mean_nfev", "mean_time"]
]
param_names = ["test function", "solver", "result type"]
def setup(self, func_name, method_name, ret_val):
b = getattr(self, 'run_' + func_name)(methods=[method_name])
results = b.average_results()
result = None
for r in results:
if r.name == method_name:
result = getattr(r, ret_val)
break
if result is None:
raise NotImplementedError()
self.result = result
def track_all(self, func_name, method_name, ret_val):
return self.result
def run_rosenbrock(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen, der=rosen_der, hess=rosen_hess)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
def run_rosenbrock_tight(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen, der=rosen_der, hess=rosen_hess,
tol=1e-8)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
def run_simple_quadratic(self, methods=None):
s = funcs.SimpleQuadratic()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("simple quadratic function",
fun=s.fun, der=s.der, hess=s.hess)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 3), methods=methods)
return b
def run_asymmetric_quadratic(self, methods=None):
s = funcs.AsymmetricQuadratic()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("function sum(x**2) + x[0]",
fun=s.fun, der=s.der, hess=s.hess)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 3), methods=methods)
return b
def run_sin_1d(self, methods=None):
fun = lambda x: np.sin(x[0])
der = lambda x: np.array([np.cos(x[0])])
b = _BenchOptimizers("1d sin function",
fun=fun, der=der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 1), methods=methods)
return b
def run_booth(self, methods=None):
s = funcs.Booth()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("Booth's function",
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(0, 10, 2), methods=methods)
return b
def run_beale(self, methods=None):
s = funcs.Beale()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("Beale's function",
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(0, 10, 2), methods=methods)
return b
def run_LJ(self, methods=None):
s = funcs.LJ()
# print "checking gradient", scipy.optimize.check_grad(s.get_energy, s.get_gradient,
# np.random.uniform(-2,2,3*4))
natoms = 4
b = _BenchOptimizers("%d atom Lennard Jones potential" % (natoms),
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, natoms*3), methods=methods)
return b
class BenchLeastSquares(Benchmark):
"""Class for benchmarking nonlinear least squares solvers."""
problems = extract_lsq_problems()
params = [
list(problems.keys()),
["average time", "nfev", "success"]
]
param_names = [
"problem", "result type"
]
def track_all(self, problem_name, result_type):
problem = self.problems[problem_name]
if problem.lb is not None or problem.ub is not None:
raise NotImplementedError
ftol = 1e-5
if result_type == 'average time':
n_runs = 10
t0 = time.time()
for _ in range(n_runs):
leastsq(problem.fun, problem.x0, Dfun=problem.jac, ftol=ftol,
full_output=True)
return (time.time() - t0) / n_runs
x, cov_x, info, message, ier = leastsq(
problem.fun, problem.x0, Dfun=problem.jac,
ftol=ftol, full_output=True
)
if result_type == 'nfev':
return info['nfev']
elif result_type == 'success':
return int(problem.check_answer(x, ftol))
else:
raise NotImplementedError
|
WillieMaddox/scipy
|
benchmarks/benchmarks/optimize.py
|
Python
|
bsd-3-clause
| 10,537 | 0.000949 |
def foo(x):
pass
x = 42
y = 42
z = 42
foo(x, y, <caret>)
|
siosio/intellij-community
|
python/testData/multipleArgumentsCompletion/noExceptionIfMoreArgumentsThanParameters.py
|
Python
|
apache-2.0
| 61 | 0.065574 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Jean Gabes, naparuba@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
# Grégory Starck, g.starck@gmail.com
# Sebastien Coavoux, s.coavoux@free.fr
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from alignak_test import *
class TestStarInGroups(AlignakTest):
def setUp(self):
self.setup_with_file('etc/alignak_star_in_hostgroups.cfg')
# If we reach a good start, we are ok :)
# the bug was that an * hostgroup expand get all host_name != ''
# without looking at register 0 or not
def test_star_in_groups(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "TEST")
self.assertIsNot(svc, None)
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "TEST_HNAME_STAR")
self.assertIsNot(svc, None)
if __name__ == '__main__':
unittest.main()
|
ddurieux/alignak
|
test/test_star_in_hostgroups.py
|
Python
|
agpl-3.0
| 3,007 | 0.000665 |
## This file is part of PyGaze - the open-source toolbox for eye tracking
##
## PyGaze is a Python module for easily creating gaze contingent experiments
## or other software (as well as non-gaze contingent experiments/software)
## Copyright (C) 2012-2013 Edwin S. Dalmaijer
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>
#
# version: 0.4 (25-03-2013)
# MAIN
DUMMYMODE = True # False for gaze contingent display, True for dummy mode (using mouse or joystick)
LOGFILENAME = 'default' # logfilename, without path
LOGFILE = LOGFILENAME[:] # .txt; adding path before logfilename is optional; logs responses (NOT eye movements, these are stored in an EDF file!)
TRIALS = 5
# DISPLAY
# used in libscreen, for the *_display functions. The values may be adjusted,
# but not the constant's names
SCREENNR = 0 # number of the screen used for displaying experiment
DISPTYPE = 'pygame' # either 'psychopy' or 'pygame'
DISPSIZE = (1920, 1080) # resolution
SCREENSIZE = (34.5, 19.7) # physical display size in cm
MOUSEVISIBLE = False # mouse visibility
BGC = (125,125,125) # backgroundcolour
FGC = (0,0,0) # foregroundcolour
FULLSCREEN = False
# SOUND
# defaults used in libsound. The values may be adjusted, but not the constants'
# names
SOUNDOSCILLATOR = 'sine' # 'sine', 'saw', 'square' or 'whitenoise'
SOUNDFREQUENCY = 440 # Herz
SOUNDLENGTH = 100 # milliseconds (duration)
SOUNDATTACK = 0 # milliseconds (fade-in)
SOUNDDECAY = 5 # milliseconds (fade-out)
SOUNDBUFFERSIZE = 1024 # increase if playback is choppy
SOUNDSAMPLINGFREQUENCY = 48000 # samples per second
SOUNDSAMPLESIZE = -16 # determines bit depth (negative is signed
SOUNDCHANNELS = 2 # 1 = mono, 2 = stereo
# INPUT
# used in libinput. The values may be adjusted, but not the constant names.
MOUSEBUTTONLIST = None # None for all mouse buttons; list of numbers for buttons of choice (e.g. [1,3] for buttons 1 and 3)
MOUSETIMEOUT = None # None for no timeout, or a value in milliseconds
KEYLIST = None # None for all keys; list of keynames for keys of choice (e.g. ['space','9',':'] for space, 9 and ; keys)
KEYTIMEOUT = 1 # None for no timeout, or a value in milliseconds
JOYBUTTONLIST = None # None for all joystick buttons; list of button numbers (start counting at 0) for buttons of choice (e.g. [0,3] for buttons 0 and 3 - may be reffered to as 1 and 4 in other programs)
JOYTIMEOUT = None # None for no timeout, or a value in milliseconds
# EYETRACKER
# general
TRACKERTYPE = 'smi' # either 'smi', 'eyelink' or 'dummy' (NB: if DUMMYMODE is True, trackertype will be set to dummy automatically)
SACCVELTHRESH = 35 # degrees per second, saccade velocity threshold
SACCACCTHRESH = 9500 # degrees per second, saccade acceleration threshold
# EyeLink only
# SMI only
SMIIP = '127.0.0.1'
SMISENDPORT = 4444
SMIRECEIVEPORT = 5555
# FRL
# Used in libgazecon.FRL. The values may be adjusted, but not the constant names.
FRLSIZE = 200 # pixles, FRL-size
FRLDIST = 125 # distance between fixation point and FRL
FRLTYPE = 'gauss' # 'circle', 'gauss', 'ramp' or 'raisedCosine'
FRLPOS = 'center' # 'center', 'top', 'topright', 'right', 'bottomright', 'bottom', 'bottomleft', 'left', or 'topleft'
# CURSOR
# Used in libgazecon.Cursor. The values may be adjusted, but not the constants' names
CURSORTYPE = 'cross' # 'rectangle', 'ellipse', 'plus' (+), 'cross' (X), 'arrow'
CURSORSIZE = 20 # pixels, either an integer value or a tuple for width and height (w,h)
CURSORCOLOUR = 'pink' # colour name (e.g. 'red'), a tuple RGB-triplet (e.g. (255, 255, 255) for white or (0,0,0) for black), or a RGBA-value (e.g. (255,0,0,255) for red)
CURSORFILL = True # True for filled cursor, False for non filled cursor
CURSORPENWIDTH = 3 # cursor edge width in pixels (only if cursor is not filled)
|
esdalmaijer/PyGaze
|
examples/simple_experiment/constants.py
|
Python
|
gpl-3.0
| 4,461 | 0.014795 |
def send_simple_message():
return requests.post(
"https://api.mailgun.net/v3/sandbox049ff464a4d54974bb0143935f9577ef.mailgun.org/messages",
auth=("api", "key-679dc79b890e700f11f001a6bf86f4a1"),
data={"from": "Mailgun Sandbox <postmaster@sandbox049ff464a4d54974bb0143935f9577ef.mailgun.org>",
"to": "nick <nicorellius@gmail.com>",
"subject": "Hello nick",
"text": "Congratulations nick, you just sent an email with Mailgun! You are truly awesome! You can see a record of this email in your logs: https://mailgun.com/cp/log . You can send up to 300 emails/day from this sandbox server. Next, you should add your own domain so you can send 10,000 emails/month for free."})
# cURL command to send mail aith API key
# curl -s --user 'api:key-679dc79b890e700f11f001a6bf86f4a1' \
# https://api.mailgun.net/v3/mail.pdxpixel.com/messages \
# -F from='Excited User <mailgun@pdxpixel.com>' \
# -F to=nick@pdxpixel.com \
# -F subject='Hello' \
# -F text='Testing some Mailgun awesomness!'
|
nicorellius/pdxpixel
|
pdxpixel/core/mailgun.py
|
Python
|
mit
| 1,073 | 0.002796 |
import os
from pyramid.config import Configurator
from pyramid.renderers import JSONP
from pyramid.settings import aslist
from citedby import controller
from citedby.controller import cache_region as controller_cache_region
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.add_renderer('jsonp', JSONP(param_name='callback', indent=4))
def add_controller(request):
es = os.environ.get(
'ELASTICSEARCH_HOST',
settings.get('elasticsearch_host', '127.0.0.1:9200')
)
es_index = os.environ.get(
'ELASTICSEARCH_INDEX',
settings.get('elasticsearch_index', 'citations')
)
return controller.controller(
aslist(es),
sniff_on_connection_fail=True,
timeout=600
).set_base_index(es_index)
config.add_route('index', '/')
config.add_route('status', '/_status/')
config.add_route('citedby_pid', '/api/v1/pid/')
config.add_route('citedby_doi', '/api/v1/doi/')
config.add_route('citedby_meta', '/api/v1/meta/')
config.add_request_method(add_controller, 'controller', reify=True)
# Cache Settings Config
memcached_host = os.environ.get(
'MEMCACHED_HOST',
settings.get('memcached_host', None)
)
memcached_expiration_time = os.environ.get(
'MEMCACHED_EXPIRATION_TIME',
settings.get('memcached_expiration_time', 2592000) # a month cache
)
if 'memcached_host' is not None:
cache_config = {}
cache_config['expiration_time'] = int(memcached_expiration_time)
cache_config['arguments'] = {'url': memcached_host, 'binary': True}
controller_cache_region.configure('dogpile.cache.pylibmc', **cache_config)
else:
controller_cache_region.configure('dogpile.cache.null')
config.scan()
return config.make_wsgi_app()
|
scieloorg/citedby
|
citedby/__init__.py
|
Python
|
bsd-2-clause
| 1,974 | 0.000507 |
# Ridiculously simple test of the os.startfile function for Windows.
#
# empty.vbs is an empty file (except for a comment), which does
# nothing when run with cscript or wscript.
#
# A possible improvement would be to have empty.vbs do something that
# we can detect here, to make sure that not only the os.startfile()
# call succeeded, but also the script actually has run.
import unittest
from test import support
import os
import sys
from os import path
startfile = support.get_attribute(os, 'startfile')
class TestCase(unittest.TestCase):
def test_nonexisting(self):
self.assertRaises(OSError, startfile, "nonexisting.vbs")
def test_empty(self):
# We need to make sure the child process starts in a directory
# we're not about to delete. If we're running under -j, that
# means the test harness provided directory isn't a safe option.
# See http://bugs.python.org/issue15526 for more details
with support.change_cwd(path.dirname(sys.executable)):
empty = path.join(path.dirname(__file__), "empty.vbs")
startfile(empty)
startfile(empty, "open")
if __name__ == "__main__":
unittest.main()
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/test/test_startfile.py
|
Python
|
gpl-3.0
| 1,193 | 0.000838 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import shutil
from profile_creators import profile_generator
from profile_creators import small_profile_extender
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class Typical25ProfileSharedState(shared_page_state.SharedDesktopPageState):
"""Shared state associated with a profile generated from 25 navigations.
Generates a shared profile on initialization.
"""
def __init__(self, test, finder_options, story_set):
super(Typical25ProfileSharedState, self).__init__(
test, finder_options, story_set)
generator = profile_generator.ProfileGenerator(
small_profile_extender.SmallProfileExtender,
'small_profile')
self._out_dir, self._owns_out_dir = generator.Run(finder_options)
if self._out_dir:
finder_options.browser_options.profile_dir = self._out_dir
else:
finder_options.browser_options.dont_override_profile = True
def TearDownState(self):
"""Clean up generated profile directory."""
super(Typical25ProfileSharedState, self).TearDownState()
if self._owns_out_dir:
shutil.rmtree(self._out_dir)
class Typical25Page(page_module.Page):
def __init__(self, url, page_set, run_no_page_interactions,
shared_page_state_class=shared_page_state.SharedDesktopPageState):
super(Typical25Page, self).__init__(
url=url, page_set=page_set,
shared_page_state_class=shared_page_state_class)
self._run_no_page_interactions = run_no_page_interactions
def RunPageInteractions(self, action_runner):
if self._run_no_page_interactions:
return
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage()
class Typical25PageSet(story.StorySet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self, run_no_page_interactions=False,
page_class=Typical25Page):
super(Typical25PageSet, self).__init__(
archive_data_file='data/typical_25.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
urls_list = [
# Why: Alexa games #48
'http://www.nick.com/games',
# Why: Alexa sports #45
'http://www.rei.com/',
# Why: Alexa sports #50
'http://www.fifa.com/',
# Why: Alexa shopping #41
'http://www.gamestop.com/ps3',
# Why: Alexa shopping #25
'http://www.barnesandnoble.com/u/books-bestselling-books/379003057/',
# Why: Alexa news #55
('http://www.economist.com/news/science-and-technology/21573529-small-'
'models-cosmic-phenomena-are-shedding-light-real-thing-how-build'),
# Why: Alexa news #67
'http://www.theonion.com',
'http://arstechnica.com/',
# Why: Alexa home #10
'http://allrecipes.com/Recipe/Pull-Apart-Hot-Cross-Buns/Detail.aspx',
'http://www.html5rocks.com/en/',
'http://www.mlb.com/',
# pylint: disable=line-too-long
'http://gawker.com/5939683/based-on-a-true-story-is-a-rotten-lie-i-hope-you-never-believe',
'http://www.imdb.com/title/tt0910970/',
'http://www.flickr.com/search/?q=monkeys&f=hp',
'http://money.cnn.com/',
'http://www.nationalgeographic.com/',
'http://premierleague.com',
'http://www.osubeavers.com/',
'http://walgreens.com',
'http://colorado.edu',
('http://www.ticketmaster.com/JAY-Z-and-Justin-Timberlake-tickets/artist/'
'1837448?brand=none&tm_link=tm_homeA_rc_name2'),
# pylint: disable=line-too-long
'http://www.theverge.com/2013/3/5/4061684/inside-ted-the-smartest-bubble-in-the-world',
'http://www.airbnb.com/',
'http://www.ign.com/',
# Why: Alexa health #25
'http://www.fda.gov',
]
for url in urls_list:
self.AddStory(
page_class(url, self, run_no_page_interactions))
|
highweb-project/highweb-webcl-html5spec
|
tools/perf/page_sets/typical_25.py
|
Python
|
bsd-3-clause
| 4,012 | 0.004487 |
import os
from unittest import TestCase
from click.testing import CliRunner
from regparser.commands.clear import clear
from regparser.index import entry
class CommandsClearTests(TestCase):
def setUp(self):
self.cli = CliRunner()
def test_no_errors_when_clear(self):
"""Should raise no errors when no cached files are present"""
with self.cli.isolated_filesystem():
self.cli.invoke(clear)
def test_deletes_fr_cache(self):
with self.cli.isolated_filesystem():
open('fr_cache.sqlite', 'w').close()
self.assertTrue(os.path.exists('fr_cache.sqlite'))
# flag must be present
self.cli.invoke(clear)
self.assertTrue(os.path.exists('fr_cache.sqlite'))
self.cli.invoke(clear, ['--http-cache'])
self.assertFalse(os.path.exists('fr_cache.sqlite'))
def test_deletes_index(self):
with self.cli.isolated_filesystem():
entry.Entry('aaa', 'bbb').write('ccc')
entry.Entry('bbb', 'ccc').write('ddd')
self.assertEqual(1, len(entry.Entry("aaa")))
self.assertEqual(1, len(entry.Entry("bbb")))
self.cli.invoke(clear)
self.assertEqual(0, len(entry.Entry("aaa")))
self.assertEqual(0, len(entry.Entry("bbb")))
def test_deletes_can_be_focused(self):
"""If params are provided to delete certain directories, only those
directories should get removed"""
with self.cli.isolated_filesystem():
to_delete = ['delroot/aaa/bbb', 'delroot/aaa/ccc',
'root/delsub/aaa', 'root/delsub/bbb']
to_keep = ['root/othersub/aaa', 'root/aaa',
'top-level-file', 'other-root/aaa']
for path in to_delete + to_keep:
entry.Entry(*path.split('/')).write('')
self.cli.invoke(clear, ['delroot', 'root/delsub'])
self.assertItemsEqual(['top-level-file', 'root', 'other-root'],
list(entry.Entry()))
self.assertItemsEqual(['othersub', 'aaa'],
list(entry.Entry('root')))
self.assertItemsEqual(['aaa'],
list(entry.Entry('other-root')))
|
cmc333333/regulations-parser
|
tests/commands_clear_tests.py
|
Python
|
cc0-1.0
| 2,301 | 0 |
from util import app
import hashlib
import os
phase2_url = '/phase2-%s/' % os.environ.get('PHASE2_TOKEN')
admin_password = u'adminpass'
admin_hash = hashlib.sha1(admin_password.encode('utf-8')).hexdigest()
session_key = 'sessionkey'
admin_session_key = 'adminsessionkey'
def init_data(redis):
redis.set('user:test:password', hashlib.sha1(b'test').hexdigest())
redis.set('user:admin:password', admin_hash)
redis.set('user:test:1', 'Buy groceries')
redis.set('user:test:2', 'Clean the patio')
redis.set('user:test:3', 'Take over the world')
redis.rpush('items:test', 1, 2, 3)
redis.set('session:%s' % session_key, 'test')
redis.set('session:%s' % admin_session_key, 'admin')
return app
def test_home(app):
rv = app.get(phase2_url)
assert b'Sign In' in rv.data
assert rv.status_code == 200
def test_404(app):
rv = app.get(phase2_url + 'asdf')
assert rv.status_code == 404
def test_get_405(app):
rv = app.get(phase2_url + 'login/')
assert rv.status_code == 405
def test_403s(app):
"""These should return 403 instead of 404."""
for url in ('dashboard/', 'dashboard/test/1/', 'dashboard/abc/def/'):
rv = app.get(phase2_url + url)
assert rv.status_code == 403
rv = app.get(phase2_url + url, headers={'Cookie': 'session=asdf'})
assert rv.status_code == 403
def test_post_405(app):
"""Be sure this returns 405, instead of 404 or 403."""
for url in ('', 'dashboard/', 'dashboard/test/1/', 'dashboard/abc/def/'):
rv = app.post(phase2_url + url)
assert rv.status_code == 405
def test_bad_login(app):
url = phase2_url + 'login/'
init_data(app.application.redis)
rv = app.post(url)
assert 'dashboard' not in rv.headers.get('Location')
assert rv.status_code == 303
rv = app.post(url, data={'username': 'abcdef', 'password': 'abcdef'})
assert 'dashboard' not in rv.headers.get('Location')
assert rv.status_code == 303
rv = app.post(url, data={'username': 'test'})
assert 'dashboard' not in rv.headers.get('Location')
assert rv.status_code == 303
rv = app.post(url, data={'username': 'test', 'password': 'abcdef'})
assert 'dashboard' not in rv.headers.get('Location')
assert rv.status_code == 303
def test_good_login(app):
url = phase2_url + 'login/'
init_data(app.application.redis)
rv = app.post(url, data={'username': 'test', 'password': 'test'})
assert rv.status_code == 303
assert 'session=' in rv.headers.get('Set-Cookie')
assert 'dashboard' in rv.headers.get('Location')
rv = app.post(url, data={'username': 'admin', 'password': admin_password})
assert rv.status_code == 303
assert 'session=' in rv.headers.get('Set-Cookie')
assert 'dashboard' in rv.headers.get('Location')
def test_dashboard(app):
url = phase2_url + 'dashboard/'
init_data(app.application.redis)
rv = app.get(url, headers={'Cookie': 'session=%s' % session_key})
assert b'Buy groceries' in rv.data
assert b'Take over the world' in rv.data
assert rv.status_code == 200
def test_item_404(app):
url = phase2_url + 'dashboard/'
init_data(app.application.redis)
rv = app.get(url + 'abcdef/0/', headers={
'Cookie': 'session=%s' % session_key})
assert rv.status_code == 404
rv = app.get(url + 'test/0/', headers={
'Cookie': 'session=%s' % session_key})
assert rv.status_code == 404
rv = app.get(url + 'admin/1/', headers={
'Cookie': 'session=%s' % session_key})
assert rv.status_code == 404
def test_solution(app):
url = phase2_url + 'dashboard/admin/password/'
init_data(app.application.redis)
rv = app.get(url, headers={'Cookie': 'session=%s' % session_key})
assert admin_hash.encode('utf-8') in rv.data
assert rv.status_code == 200
def test_admin_dashboard(app):
url = phase2_url + 'dashboard/'
init_data(app.application.redis)
rv = app.get(url, headers={'Cookie': 'session=%s' % admin_session_key})
assert b'Challenge complete!' in rv.data
assert rv.status_code == 200
|
nickfrostatx/polyrents-challenge
|
tests/test_phase2.py
|
Python
|
mit
| 4,200 | 0 |
import unittest
from mock import patch, Mock
from the_ark import rhino_client
__author__ = 'chaley'
rhino_client_ojb = None
class UtilsTestCase(unittest.TestCase):
def setUp(self):
self.rhino_client_obj = rhino_client.RhinoClient('test_name',
'url', 'brand',
'branch', 'build_id',
'user',
'rhino_client_url')
def test_set_log(self):
self.rhino_client_obj.set_log("file_path", "link_text")
self.assertEqual('file_path',
self.rhino_client_obj.test_data['result_url'])
self.assertEqual('link_text',
self.rhino_client_obj.test_data['result_text'])
@patch('requests.get')
def test_get(self, requests_get):
r = Mock()
r.json.return_value = {"stuff": "stuff"}
requests_get.return_value = r
response = self.rhino_client_obj.get('test_id')
self.assertEqual({"stuff": "stuff"}, response)
@patch('requests.post')
def test_post(self, requests_post):
request_json = Mock()
request_json.status_code = 201
requests_post.return_value = request_json
self.rhino_client_obj.post()
self.assertEqual(True, self.rhino_client_obj.posted)
@patch('requests.post')
def test_post_fail(self, requests_post):
request_json = Mock()
request_json.status_code = 400
requests_post.return_value = request_json
self.assertRaises(Exception, self.rhino_client_obj.post)
@patch('requests.put')
def test_put(self, requests_put):
self.rhino_client_obj.test_data['test_id'] = 156465465
self.rhino_client_obj.posted = True
request_json = Mock()
request_json.status_code = 201
request_json.json.return_value = {"stuff": "stuff"}
requests_put.return_value = request_json
self.rhino_client_obj.put()
self.assertEqual(True, self.rhino_client_obj.posted)
def test_put_posted_false(self):
self.assertRaises(Exception, self.rhino_client_obj.put)
@patch('requests.put')
def test_put_status_false(self, requests_put):
self.rhino_client_obj.test_data['test_id'] = 156465465
self.rhino_client_obj.posted = True
request_json = Mock()
request_json.status_code = 500
requests_put.return_value = request_json
self.assertRaises(rhino_client.RhinoClientException,
self.rhino_client_obj.put)
@patch('requests.post')
def test_send_test_post(self, requests_post):
request_json = Mock()
request_json.status_code = 201
requests_post.return_value = request_json
self.rhino_client_obj.send_test("status")
self.assertEqual(True, self.rhino_client_obj.posted)
@patch('requests.put')
def test_send_test_put(self, requests_put):
self.rhino_client_obj.test_data['test_id'] = 156465465
self.rhino_client_obj.posted = True
request_json = Mock()
request_json.status_code = 201
requests_put.return_value = request_json
self.rhino_client_obj.send_test("status")
self.assertEqual(True, self.rhino_client_obj.posted)
if __name__ == '__main__':
unittest.main()
|
meltmedia/the-ark
|
tests/test_rhino_client.py
|
Python
|
apache-2.0
| 3,445 | 0.00029 |
import zipfile
import imghdr
from django import forms
from .models import Image, ImageBatchUpload, Album
class AlbumAdminForm(forms.ModelForm):
class Meta:
model = Album
fields = '__all__'
def clean(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('authorized_users') is None:
pass
else:
if cleaned_data.get('all_users') and cleaned_data.get('authorized_users').count() != 0:
cleaned_data['all_users'] = False
return cleaned_data
class ImageAdminForm(forms.ModelForm):
class Meta:
model = Image
fields = ('public', 'title', 'image', 'albums', 'user')
def clean_image(self):
image = self.cleaned_data['image']
if image is None:
return image
elif not imghdr.what(image):
raise forms.ValidationError(u"The file is not an image file")
else:
return image
class ImageBatchUploadAdminForm(forms.ModelForm):
class Meta:
model = ImageBatchUpload
fields = ('public', 'title', 'zip_file', 'albums', 'user')
def clean_zip_file(self):
image_zip = self.cleaned_data['zip_file']
if image_zip is None:
return image_zip
elif not zipfile.is_zipfile(image_zip):
raise forms.ValidationError(u"The file is not a zip file")
else:
return image_zip
|
dsimandl/teamsurmandl
|
gallery/forms.py
|
Python
|
mit
| 1,431 | 0.002096 |
import CatalogItem
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPLocalizer
from direct.interval.IntervalGlobal import *
from direct.gui.DirectGui import *
class CatalogNametagItem(CatalogItem.CatalogItem):
sequenceNumber = 0
def makeNewItem(self, nametagStyle):
self.nametagStyle = nametagStyle
CatalogItem.CatalogItem.makeNewItem(self)
def getPurchaseLimit(self):
return 1
def reachedPurchaseLimit(self, avatar):
if self in avatar.onOrder or self in avatar.mailboxContents or self in avatar.onGiftOrder or self in avatar.awardMailboxContents or self in avatar.onAwardOrder:
return 1
if avatar.nametagStyle == self.nametagStyle:
return 1
return 0
def getAcceptItemErrorText(self, retcode):
if retcode == ToontownGlobals.P_ItemAvailable:
return TTLocalizer.CatalogAcceptNametag
return CatalogItem.CatalogItem.getAcceptItemErrorText(self, retcode)
def saveHistory(self):
return 1
def getTypeName(self):
return TTLocalizer.NametagTypeName
def getName(self):
if self.nametagStyle == 100:
name = TTLocalizer.UnpaidNameTag
else:
name = TTLocalizer.NametagFontNames[self.nametagStyle]
if TTLocalizer.NametagReverse:
name = TTLocalizer.NametagLabel + name
else:
name = name + TTLocalizer.NametagLabel
return name
if self.nametagStyle == 0:
name = TTLocalizer.NametagPaid
elif self.nametagStyle == 1:
name = TTLocalizer.NametagAction
elif self.nametagStyle == 2:
name = TTLocalizer.NametagFrilly
def recordPurchase(self, avatar, optional):
if avatar:
avatar.b_setNametagStyle(self.nametagStyle)
return ToontownGlobals.P_ItemAvailable
def getPicture(self, avatar):
frame = self.makeFrame()
if self.nametagStyle == 100:
inFont = ToontownGlobals.getToonFont()
else:
inFont = ToontownGlobals.getNametagFont(self.nametagStyle)
nameTagDemo = DirectLabel(parent=frame, relief=None, pos=(0, 0, 0.24), scale=0.5, text=base.localAvatar.getName(), text_fg=(1.0, 1.0, 1.0, 1), text_shadow=(0, 0, 0, 1), text_font=inFont, text_wordwrap=9)
self.hasPicture = True
return (frame, None)
def output(self, store = -1):
return 'CatalogNametagItem(%s%s)' % (self.nametagStyle, self.formatOptionalData(store))
def compareTo(self, other):
return self.nametagStyle - other.nametagStyle
def getHashContents(self):
return self.nametagStyle
def getBasePrice(self):
return 500
cost = 500
if self.nametagStyle == 0:
cost = 600
elif self.nametagStyle == 1:
cost = 600
elif self.nametagStyle == 2:
cost = 600
elif self.nametagStyle == 100:
cost = 50
return cost
def decodeDatagram(self, di, versionNumber, store):
CatalogItem.CatalogItem.decodeDatagram(self, di, versionNumber, store)
self.nametagStyle = di.getUint16()
def encodeDatagram(self, dg, store):
CatalogItem.CatalogItem.encodeDatagram(self, dg, store)
dg.addUint16(self.nametagStyle)
def isGift(self):
return 0
def getBackSticky(self):
itemType = 1
numSticky = 4
return (itemType, numSticky)
|
Spiderlover/Toontown
|
toontown/catalog/CatalogNametagItem.py
|
Python
|
mit
| 3,528 | 0.001701 |
# This file is part of VoltDB.
# Copyright (C) 2008-2016 VoltDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
import time
@VOLT.Command(
bundles = VOLT.AdminBundle(),
description = 'Pause the VoltDB cluster and switch it to admin mode.',
options = (
VOLT.BooleanOption('-w', '--wait', 'waiting',
'wait for all DR and Export transactions to be externally processed',
default = False)
)
)
def pause(runner):
# Check the STATUS column. runner.call_proc() detects and aborts on errors.
status = runner.call_proc('@Pause', [], []).table(0).tuple(0).column_integer(0)
if status <> 0:
runner.error('The cluster has failed to pause with status: %d' % status)
return
runner.info('The cluster is paused.')
if runner.opts.waiting:
status = runner.call_proc('@Quiesce', [], []).table(0).tuple(0).column_integer(0)
if status <> 0:
runner.error('The cluster has failed to quiesce with status: %d' % status)
return
runner.info('The cluster is quiesced.')
# check the dr stats
partition_min_host = dict()
partition_min = dict()
partition_max = dict()
check_dr(runner, partition_min_host, partition_min, partition_max)
# check the export stats twice because they are periodic
export_tables_with_data = dict()
check_dr(runner, partition_min_host, partition_min, partition_max)
last_table_stat_time = 0
last_table_stat_time = check_export(runner, export_tables_with_data, last_table_stat_time)
if not partition_min and last_table_stat_time == 1:
# there are no outstanding export or dr transactions
runner.info('All export and DR transactions have been processed.')
return
# after 10 seconds notify admin of what transactions have not drained
notifyInterval = 10
# have to get two samples of table stats because the cached value could be from before Quiesce
while True:
time.sleep(1)
if partition_min:
check_dr(runner, partition_min_host, partition_min, partition_max)
if last_table_stat_time > 1:
curr_table_stat_time = check_export(runner, export_tables_with_data, last_table_stat_time)
if last_table_stat_time == 1 or curr_table_stat_time > last_table_stat_time:
# have a new sample from table stat cache or there are no tables
if not export_tables_with_data and not partition_min:
runner.info('All export and DR transactions have been processed.')
return
notifyInterval -= 1
if notifyInterval == 0:
notifyInterval = 10
if last_table_stat_time > 1 and export_tables_with_data:
print_export_pending(runner, export_tables_with_data)
if partition_min:
print_dr_pending(runner, partition_min_host, partition_min, partition_max)
def get_stats(runner, component):
retry = 5
while True:
response = runner.call_proc('@Statistics', [VOLT.FastSerializer.VOLTTYPE_STRING,
VOLT.FastSerializer.VOLTTYPE_INTEGER], [component, 0])
status = response.status()
if status <> 1 and "timeout" in response.statusString:
if retry == 0:
runner.error('Unable to collect DR or export statistics from the cluster')
else:
sleep(1)
retry -= 1
continue
if status <> 1:
runner.error("Unexpected response to @Statistics %s: %s" % (component, resp))
return response
def check_dr(runner, partition_min_host, partition_min, partition_max):
resp = get_stats(runner, 'DRPRODUCER')
partition_data = resp.table(0)
for pid in partition_min:
# reset all min values to find the new min
if pid in partition_max:
partition_min[pid] = partition_max[pid]
for r in partition_data.tuples():
pid = r[3]
hostname = str(r[2])
if str(r[8]) == 'None':
last_queued = -1
else:
last_queued = r[8]
if str(r[9]) == 'None':
last_acked = -1
else:
last_acked = r[9]
# check TOTALBYTES
if r[5] > 0:
# track the highest seen drId for each partition
# use last queued to get the upper bound
if pid in partition_max:
partition_max[pid] = max(last_queued, partition_max[pid])
else:
partition_max[pid] = last_queued
if pid in partition_min:
if last_acked < partition_min[pid]:
# this replica is farther behind
partition_min[pid] = last_acked
else:
partition_min_host[pid] = set()
partition_min[pid] = last_acked
partition_min_host[pid].add(hostname)
else:
# this hostname's partition has an empty InvocationBufferQueue
if pid in partition_min:
# it was not empty on a previous call
partition_min_host[pid].discard(hostname)
if not partition_min_host[pid]:
del partition_min_host[pid]
del partition_min[pid]
if pid in partition_max:
if partition_max[pid] > last_acked:
runner.warning("DR Producer reports no data for partition %i on host %s but last acked drId (%i) does not match other hosts last acked drId (%s)" % (pid, hostname, last_acked, partition_max[pid]))
partition_max[pid] = max(last_acked, partition_max[pid])
else:
partition_max[pid] = last_acked
def print_dr_pending(runner, partition_min_host, partition_min, partition_max):
runner.info('The following partitions have pending DR transactions that the consumer cluster has not processed:')
summaryline = " Partition %i needs acknowledgements for drIds %i to %i on hosts: %s."
for pid in partition_min_host:
runner.info(summaryline % (pid, partition_min[pid]+1, partition_max[pid], ', '.join(partition_min_host[pid])))
def check_export(runner, export_tables_with_data, last_collection_time):
resp = get_stats(runner, 'TABLE')
export_tables = 0
collection_time = 0
if not resp.table_count() > 0:
# this is an empty database and we don't need to wait for export to drain
return 1
else:
tablestats = resp.table(0)
firsttuple = tablestats.tuple(0)
if firsttuple.column(0) == last_collection_time:
# this statistic is the same cached set as the last call
return last_collection_time
else:
collection_time = firsttuple.column(0)
for r in tablestats.tuples():
# first look for streaming (export) tables
if str(r[6]) == 'StreamedTable':
pendingData = r[8]
tablename = str(r[5])
pid = r[4]
hostname = str(r[2])
if pendingData > 0:
if not tablename in export_tables_with_data:
export_tables_with_data[tablename] = dict()
tabledata = export_tables_with_data[tablename]
if not hostname in tabledata:
tabledata[hostname] = set()
tabledata[hostname].add(pid)
else:
if tablename in export_tables_with_data:
tabledata = export_tables_with_data[tablename]
if hostname in tabledata:
tabledata[hostname].discard(pid)
if not tabledata[hostname]:
del tabledata[hostname]
if not export_tables_with_data[tablename]:
del export_tables_with_data[tablename]
return collection_time
def print_export_pending(runner, export_tables_with_data):
runner.info('The following export tables have unacknowledged transactions:')
summaryline = " %s needs acknowledgements on host(s) %s for partition(s) %s."
for table in export_tables_with_data:
pidlist = set()
hostlist = list(export_tables_with_data[table].keys())
for host in hostlist:
pidlist = pidlist | export_tables_with_data[table][host]
partlist = reduce(lambda a,x: a+","+str(x), list(pidlist), "")[1:]
runner.info(summaryline % (table, ', '.join(hostlist), partlist))
|
paulmartel/voltdb
|
lib/python/voltcli/voltadmin.d/pause.py
|
Python
|
agpl-3.0
| 9,312 | 0.00494 |
#!/usr/bin/env python3
import os, sys, glob, pickle, subprocess
sys.path.insert(0, os.path.dirname(__file__))
from clang import cindex
sys.path = sys.path[1:]
def configure_libclang():
llvm_libdirs = ['/usr/lib/llvm-3.2/lib', '/usr/lib64/llvm']
try:
libdir = subprocess.check_output(['llvm-config', '--libdir']).decode('utf-8').strip()
llvm_libdirs.insert(0, libdir)
except OSError:
pass
for d in llvm_libdirs:
if not os.path.exists(d):
continue
files = glob.glob(os.path.join(d, 'libclang.so*'))
if len(files) != 0:
cindex.Config.set_library_file(files[0])
return
class Call:
def __init__(self, cursor, decl):
self.ident = cursor.displayname.decode('utf-8')
self.filename = cursor.location.file.name.decode('utf-8')
ex = cursor.extent
self.start_line = ex.start.line
self.start_column = ex.start.column
self.end_line = ex.end.line
self.end_column = ex.end.column
self.decl_filename = decl.location.file.name.decode('utf-8')
class Definition:
def __init__(self, cursor):
self.ident = cursor.spelling.decode('utf-8')
self.display = cursor.displayname.decode('utf-8')
self.filename = cursor.location.file.name.decode('utf-8')
ex = cursor.extent
self.start_line = ex.start.line
self.start_column = ex.start.column
self.end_line = ex.end.line
self.end_column = ex.end.column
def process_diagnostics(tu):
diagnostics = tu.diagnostics
haserr = False
for d in diagnostics:
sys.stderr.write('{0}\n'.format(d.format.decode('utf-8')))
if d.severity > cindex.Diagnostic.Warning:
haserr = True
if haserr:
sys.exit(1)
def walk_cursors(tu, files):
proc = list(tu.cursor.get_children())
while len(proc) > 0:
cursor = proc[0]
proc = proc[1:]
if cursor.location.file is None:
continue
fname = cursor.location.file.name.decode('utf-8')
if fname in files:
yield cursor
proc += list(cursor.get_children())
def newer(a, b):
try:
return os.stat(a).st_mtime > os.stat(b).st_mtime
except:
return True
def scan_libgit2_glib(cflags, files, git2dir):
files = [os.path.abspath(f) for f in files]
dname = os.path.dirname(__file__)
allcalls = {}
l = 0
if not os.getenv('SILENT'):
sys.stderr.write('\n')
i = 0
for f in files:
if not os.getenv('SILENT'):
name = os.path.basename(f)
if len(name) > l:
l = len(name)
perc = int((i / len(files)) * 100)
sys.stderr.write('[{0: >3}%] Processing ... {1}{2}\r'.format(perc, name, ' ' * (l - len(name))))
i += 1
astf = os.path.join(dname, '.' + os.path.basename(f) + '.cache')
if not newer(f, astf):
with open(astf, 'rb') as fo:
calls = pickle.load(fo)
else:
tu = cindex.TranslationUnit.from_source(f, cflags)
process_diagnostics(tu)
calls = {}
for cursor in walk_cursors(tu, files):
if cursor.kind == cindex.CursorKind.CALL_EXPR or \
cursor.kind == cindex.CursorKind.DECL_REF_EXPR:
cdecl = cursor.get_referenced()
if cdecl.kind != cindex.CursorKind.FUNCTION_DECL:
continue
if (not cdecl is None) and (not cdecl.location.file is None):
fdefname = cdecl.location.file.name.decode('utf-8')
if fdefname.startswith(git2dir):
call = Call(cursor, cdecl)
if call.ident in calls:
calls[call.ident].append(call)
else:
calls[call.ident] = [call]
with open(astf, 'wb') as fo:
pickle.dump(calls, fo)
for k in calls:
if k in allcalls:
allcalls[k] += calls[k]
else:
allcalls[k] = list(calls[k])
if not os.getenv('SILENT'):
sys.stderr.write('\r[100%] Processing ... done{0}\n'.format(' ' * (l - 4)))
return allcalls
def scan_libgit2(cflags, git2dir):
tu = cindex.TranslationUnit.from_source(git2dir + '.h', cflags)
process_diagnostics(tu)
headers = glob.glob(os.path.join(git2dir, '*.h'))
defs = {}
objapi = ['lookup', 'lookup_prefix', 'free', 'id', 'owner']
objderiv = ['commit', 'tree', 'tag', 'blob']
ignore = set()
for deriv in objderiv:
for api in objapi:
ignore.add('git_' + deriv + '_' + api)
for cursor in walk_cursors(tu, headers):
if cursor.kind == cindex.CursorKind.FUNCTION_DECL:
deff = Definition(cursor)
if not deff.ident in ignore:
defs[deff.ident] = deff
return defs
configure_libclang()
pos = sys.argv.index('--')
cflags = sys.argv[1:pos]
files = sys.argv[pos+1:]
incdir = os.getenv('LIBGIT2_INCLUDE_DIR')
defs = scan_libgit2(cflags, incdir)
calls = scan_libgit2_glib(cflags, files, incdir)
notused = {}
perfile = {}
nperfile = {}
for d in defs:
o = defs[d]
if not d in calls:
notused[d] = defs[d]
if not o.filename in nperfile:
nperfile[o.filename] = [o]
else:
nperfile[o.filename].append(o)
if not o.filename in perfile:
perfile[o.filename] = [o]
else:
perfile[o.filename].append(o)
ss = [notused[f] for f in notused]
ss.sort(key=lambda x: '{0} {1}'.format(os.path.basename(x.filename), x.ident))
lastf = None
keys = list(perfile.keys())
keys.sort()
for filename in keys:
b = os.path.basename(filename)
f = perfile[filename]
n_perfile = len(f)
if filename in nperfile:
n_nperfile = len(nperfile[filename])
else:
n_nperfile = 0
perc = int(((n_perfile - n_nperfile) / n_perfile) * 100)
print('\n File {0}, coverage {1}% ({2} out of {3}):'.format(b, perc, n_perfile - n_nperfile, n_perfile))
cp = list(f)
cp.sort(key=lambda x: "{0} {1}".format(not x.ident in calls, x.ident))
for d in cp:
if d.ident in calls:
print(' \033[32m✓ {0}\033[0m'.format(d.display))
else:
print(' \033[31m✗ {0}\033[0m'.format(d.display))
perc = int(((len(defs) - len(notused)) / len(defs)) * 100)
print('\nTotal coverage: {0}% ({1} functions out of {2} are being called)\n'.format(perc, len(defs) - len(notused), len(defs)))
# vi:ts=4:et
|
chergert/libgit2-glib
|
tools/coverage.py
|
Python
|
lgpl-2.1
| 6,733 | 0.003864 |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/bjornl/ros/workspaces/bjorn_ws/install/include".split(';') if "/home/bjornl/ros/workspaces/bjorn_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rosserial_mbed"
PROJECT_SPACE_DIR = "/home/bjornl/ros/workspaces/bjorn_ws/install"
PROJECT_VERSION = "0.7.6"
|
blutjens/perc_neuron_ros_ur10
|
pn_ros/bjorn_ws/build/rosserial/rosserial_mbed/catkin_generated/pkg.installspace.context.pc.py
|
Python
|
gpl-3.0
| 511 | 0.001957 |
# -*- coding: utf-8 -*-
'''
Funimation|Now Add-on
Copyright (C) 2016 Funimation|Now
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging;
import re;
import xbmc;
import os;
import xbmcgui;
from resources.lib.modules import utils;
logger = logging.getLogger('funimationnow');
EXIT_CODE = 2;
SUCCESS_CODE = 3;
EXPIRE_CODE = 4;
HOME_SCREEN_CODE = 5;
BACK_CODE = 6;
LOGOUT_CODE = 7;
REST_CODE = 8;
SEARCH_WINDOW = 100100;
HOME_WINDOW = 110101;
QUEUE_WINDOW = 110102;
ALL_WINDOW = 110103;
SIMALCAST_WINDOW = 110104;
GENRE_WINDOW = 110105;
SETTINGS_WINDOW = 110106;
HELP_WINDOW = 110107;
LOGOUT_WINDOW = 110108;
func = dict({
SEARCH_WINDOW: 'search',
HOME_WINDOW: 'home',
QUEUE_WINDOW: 'queue',
ALL_WINDOW: 'all',
SIMALCAST_WINDOW: 'simalcast',
GENRE_WINDOW: 'genres',
SETTINGS_WINDOW: 'settings',
HELP_WINDOW: 'help',
LOGOUT_WINDOW: 'logout',
});
def chooser(landing_page, parent, child, controlID):
result = EXIT_CODE;
logger.debug(controlID);
logger.debug(child);
logger.debug(func.get(controlID));
try:
result = globals()[func.get(controlID)](landing_page, parent, child, controlID);
except Exception as inst:
logger.error(inst);
landing_page.result_code = result;
return result;
def home(landing_page, parent, child, controlID):
RESULT_CODE = REST_CODE;
if child == HOME_WINDOW:
RESULT_CODE = REST_CODE;
else:
RESULT_CODE = HOME_SCREEN_CODE;
return RESULT_CODE;
def search(landing_page, parent, child, controlID):
RESULT_CODE = REST_CODE;
if child == SEARCH_WINDOW:
pass;
else:
try:
from resources.lib.gui.searchgui import search;
search(landing_page);
except Exception as inst:
logger.error(inst);
return RESULT_CODE;
def queue(landing_page, parent, child, controlID):
RESULT_CODE = REST_CODE;
if child == QUEUE_WINDOW:
pass;
else:
try:
from resources.lib.gui.watchlistgui import watchlist;
mnavset = dict({
'width': 95,
'title': 'MY QUEUE',
'params': 'id=myqueue&title=My Queue',
'target': 'longlist',
'path': 'longlist/myqueue/'
});
watchlist(landing_page, mnavset);
except Exception as inst:
logger.error(inst);
return RESULT_CODE;
def all(landing_page, parent, child, controlID):
RESULT_CODE = REST_CODE;
if child == ALL_WINDOW:
pass;
else:
try:
from resources.lib.gui.genreselectgui import genreselect;
mnavset = dict({
'width': 140,
'title': 'RECENTLY ADDED',
'params': 'id=shows&title=All Shows&showGenres=true',
'target': 'longlist',
'path': 'longlist/content/',
'offset': 0,
'limit': 144
});
genreselect(landing_page, mnavset);
except Exception as inst:
logger.error(inst);
return RESULT_CODE;
def simalcast(landing_page, parent, child, controlID):
RESULT_CODE = REST_CODE;
if child == SIMALCAST_WINDOW:
pass;
else:
try:
from resources.lib.gui.audioselectgui import audioselect;
mnavset = dict({
'width': 108,
'title': 'SIMULDUBS',
#'params': 'id=simulcasts&title=Simulcasts',
'params': 'id=broadcast-dubs&title=Broadcast Dubs',
'target': 'longlist',
'path': 'longlist/content/'
});
audioselect(landing_page, mnavset);
except Exception as inst:
logger.error(inst);
return RESULT_CODE;
def genres(landing_page, parent, child, controlID):
RESULT_CODE = REST_CODE;
if child == GENRE_WINDOW:
pass;
else:
try:
from resources.lib.gui.genreshowsgui import genreshows;
mnavset = dict({
'width': 140,
'title': 'RECENTLY ADDED',
'params': 'id=genres&title=Genres&role=b',
'target': 'longlist',
'path': 'longlist/genres/',
'offset': 0,
'limit': 144
});
genreshows(landing_page, mnavset);
except Exception as inst:
logger.error(inst);
return RESULT_CODE;
def settings(landing_page, parent, child, controlID):
RESULT_CODE = REST_CODE;
try:
#xbmc.executebuiltin('Addon.OpenSettings(%s)' % utils.getAddonInfo('id'));
utils.addon.openSettings();
utils.lock();
utils.sleep(2000);
utils.unlock();
addon_data = xbmc.translatePath(utils.getAddonInfo('profile')).decode('utf-8');
tokens = xbmc.translatePath(os.path.join(addon_data, 'tokens.db'));
if not os.path.exists(tokens):
RESULT_CODE = LOGOUT_CODE;
except Exception as inst:
logger.error(inst);
return RESULT_CODE;
def help(landing_page, parent, child, controlID):
RESULT_CODE = REST_CODE;
try:
from resources.lib.gui.helpmenugui import helpmenu;
helpmenu();
except Exception as inst:
logger.error(inst);
return RESULT_CODE;
def logout(landing_page, parent, child, controlID):
RESULT_CODE = LOGOUT_CODE;
from resources.lib.modules import cleardata;
logger.debug('Running Cleanup Script');
try:
cleardata.cleanup();
except:
pass;
return RESULT_CODE;
|
Protocol-X/script.video.funimationnow
|
resources/lib/modules/menunav.py
|
Python
|
gpl-3.0
| 6,384 | 0.01911 |
import AnimatedProp
from direct.actor import Actor
from direct.interval.IntervalGlobal import *
from toontown.effects.Splash import *
from toontown.effects.Ripples import *
import random
class FishAnimatedProp(AnimatedProp.AnimatedProp):
def __init__(self, node):
AnimatedProp.AnimatedProp.__init__(self, node)
parent = node.getParent()
self.fish = Actor.Actor(node, copy=0)
self.fish.reparentTo(parent)
self.fish.setTransform(node.getTransform())
node.clearMat()
self.fish.loadAnims({'jump': 'phase_4/models/props/SZ_fish-jump',
'swim': 'phase_4/models/props/SZ_fish-swim'})
self.splashSfxList = (loader.loadSfx('phase_4/audio/sfx/TT_splash1.ogg'), loader.loadSfx('phase_4/audio/sfx/TT_splash2.ogg'))
self.node = self.fish
self.geom = self.fish.getGeomNode()
self.exitRipples = Ripples(self.geom)
self.exitRipples.setBin('fixed', 25, 1)
self.exitRipples.setPosHprScale(-0.3, 0.0, 1.24, 0.0, 0.0, 0.0, 0.7, 0.7, 0.7)
self.splash = Splash(self.geom, wantParticles=0)
self.splash.setPosHprScale(-1, 0.0, 1.23, 0.0, 0.0, 0.0, 0.7, 0.7, 0.7)
randomSplash = random.choice(self.splashSfxList)
self.track = Sequence(FunctionInterval(self.randomizePosition), Func(self.node.unstash), Parallel(self.fish.actorInterval('jump'), Sequence(Wait(0.25), Func(self.exitRipples.play, 0.75)), Sequence(Wait(1.14), Func(self.splash.play), SoundInterval(randomSplash, volume=0.8, node=self.node))), Wait(1), Func(self.node.stash), Wait(4 + 10 * random.random()), name=self.uniqueName('Fish'))
def delete(self):
self.exitRipples.destroy()
del self.exitRipples
self.splash.destroy()
del self.splash
del self.track
self.fish.removeNode()
del self.fish
del self.node
del self.geom
def randomizePosition(self):
x = 5 * (random.random() - 0.5)
y = 5 * (random.random() - 0.5)
h = 360 * random.random()
self.geom.setPos(x, y, 0)
self.geom.setHpr(h, 0, 0)
def enter(self):
AnimatedProp.AnimatedProp.enter(self)
self.track.loop()
def exit(self):
AnimatedProp.AnimatedProp.exit(self)
self.track.finish()
self.splash.stop()
self.exitRipples.stop()
|
Spiderlover/Toontown
|
toontown/hood/FishAnimatedProp.py
|
Python
|
mit
| 2,348 | 0.002129 |
"""
viscount.task.models
Task models
"""
from ..core import db
from ..utils import JSONSerializer
class TaskInputFile(JSONSerializer, db.Model):
__tablename__ = 'tasks_input_files'
id = db.Column(db.Integer, primary_key=True)
task_id = db.Column(db.Integer, db.ForeignKey('tasks.id'), nullable=False)
file_type_id = db.Column(db.Integer, db.ForeignKey('file_types.id'), nullable=False)
name = db.Column(db.String(255), nullable=False, primary_key=True)
description = db.Column(db.Text, nullable=False)
class TaskOutputFile(JSONSerializer, db.Model):
__tablename__ = 'tasks_output_files'
id = db.Column(db.Integer, primary_key=True)
task_id = db.Column(db.Integer, db.ForeignKey('tasks.id'), nullable=False)
file_type_id = db.Column(db.Integer, db.ForeignKey('file_types.id'), nullable=False)
name = db.Column(db.String(255), nullable=False, primary_key=True)
description = db.Column(db.Text, nullable=False)
class TaskJSONSerializer(JSONSerializer):
__json_modifiers__ = {
'events': lambda events, _: [dict(id=event.id) for event in events],
'inputs': lambda inputs, _: [dict(id=input.id) for input in inputs],
'outputs': lambda outputs, _: [dict(id=output.id) for output in outputs],
'task_instances': lambda task_instances, _: [dict(id=task_instance.id) for task_instance in task_instances],
}
class Task(TaskJSONSerializer, db.Model):
__tablename__ = 'tasks'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(32), unique=True)
owner_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
description = db.Column(db.Text, index=False, unique=False, nullable=False)
source_file = db.Column(db.Integer, db.ForeignKey('files.id'))
events = db.relationship('Event', backref='task', lazy='dynamic')
inputs = db.relationship('TaskInputFile', backref='task', lazy='dynamic')
outputs = db.relationship('TaskOutputFile', backref='task', lazy='dynamic')
task_instances = db.relationship('WorkflowTaskInstance', backref='task', lazy='dynamic')
def __repr__(self):
return '<Task %r>' % (self.name)
|
dacb/viscount
|
viscount/tasks/models.py
|
Python
|
bsd-2-clause
| 2,077 | 0.01637 |
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.conf import settings
from django.db import models
from django.core.urlresolvers import reverse
from django.db.models.signals import pre_save
from django.utils import timezone
from django.utils.text import slugify
class CareerManager(models.Manager):
def active(self, *args, **kwargs):
return super(CareerManager, self).filter(draft = False).filter(published_at__lte = timezone.now())
@python_2_unicode_compatible
class Career(models.Model):
FULLTIME = 'Full-time'
PARTTIME = 'Part-time'
INTERNSHIP = 'Internship'
RESEARCH = 'Research'
ROLE_CATEGORY_CHOICES = (
(FULLTIME, 'Full-time'),
(PARTTIME, 'Part-time'),
(INTERNSHIP, 'Internship'),
(RESEARCH, 'Research'),
)
role_category = models.CharField(
max_length=12,
choices=ROLE_CATEGORY_CHOICES,
default=FULLTIME,
)
# Role
role = models.CharField(max_length = 120)
# Location
city = models.CharField(max_length=255)
# Plain text and urlify slug
career_slug = models.SlugField(unique = True)
career_offer_title = models.CharField(max_length=255, default="")
career_offer_description = models.TextField(default="")
career_experience = models.TextField(default="")
career_terms = models.TextField(default="")
# Time and meta staff
draft = models.BooleanField(default = False)
published_at = models.DateField(auto_now = False, auto_now_add = False)
updated = models.DateTimeField(auto_now = True, auto_now_add = False)
timestamp = models.DateTimeField(auto_now = False, auto_now_add = True)
objects = CareerManager()
def __unicode__(self):
return self.role
def __str__(self):
return self.role
def get_absolute_url(self):
return reverse('careers:detail', kwargs = {'slug':self.career_slug})
class Meta:
ordering = ["-timestamp", "-updated"]
def create_slug(instance, new_slug = None):
career_slug = slugify(instance.title)
if new_slug is not None:
career_slug = new_slug
qs = Career.objects.filter(career_slug = career_slug).order_by("-id")
exists = qs.exists()
if exists:
new_slug = "%s-%s" %(career_slug, qs.first().id)
return create_slug(instance, slug = new_slug)
return career_slug
def pre_save_post_receiver(sender, instance, *args, **kwargs):
if not instance.career_slug:
instance.career_slug = create_slug(instance)
pre_save.connect(pre_save_post_receiver, sender = Career)
|
neldom/qessera
|
careers/models.py
|
Python
|
mit
| 2,634 | 0.015186 |
def task_hello():
"""hello py """
def python_hello(times, text, targets):
with open(targets[0], "a") as output:
output.write(times * text)
return {'actions': [(python_hello, [3, "py!\n"])],
'targets': ["hello.txt"],
}
|
gh0std4ncer/doit
|
doc/tutorial/tutorial_02.py
|
Python
|
mit
| 276 | 0 |
from numpy import *
from matplotlib.pyplot import *
import scipy.constants as sc
import copy
import scipy.integrate as integ
# test sun/earth with hw5(1.989e30,5.972e24,149.6e6,0.0167,1000)
def hw5(m1, m2, a, e, tmax, tstep=0.001, tplot=0.025, method='leapfrog'):
if method != 'leapfrog' and method != 'odeint':
print("That's not a method")
return()
# initialize commonly used variables
period = sqrt((4*(pi**2)*(a**3)) / (sc.G*(m1 + m2)))
dt = period*tstep
# initialize objects at time 0
q = m1 / m2
r0 = (1-e)*a/(1+q)
v0 = (1/(1+q))*sqrt((1+e)/(1-e))*sqrt(sc.G*(m1+m2)/a)
rv = array([r0, 0, 0, v0, -q*r0, 0, 0, -q*v0])
# set up figure
figure(1)
gca().set_aspect('equal')
xlim([-2*a, 2*a])
ylim([-2*a, 2*a])
rv_list = []
if method == 'leapfrog':
timeCounter = 0
frameCounter = 0
while timeCounter < tmax:
# plot positions if tplot time has passed
if frameCounter >= tplot:
frameCounter = 0
rv_list.append(copy.deepcopy(rv))
# calc positions
rv[0] = rv[0] + rv[2]*dt
rv[1] = rv[1] + rv[3]*dt
rv[4] = rv[4] + rv[6]*dt
rv[5] = rv[5] + rv[7]*dt
# calc acceleration
r = array([rv[0] - rv[4], rv[1] - rv[5]])
force = ((sc.G*m1*m2)/(np.linalg.norm(r)**2))*(r/np.linalg.norm(r))
# calc velocity
rv[2] = rv[2] - (force[0]/m1)*dt
rv[3] = rv[3] - (force[1]/m1)*dt
rv[6] = rv[6] + (force[0]/m2)*dt
rv[7] = rv[7] + (force[1]/m2)*dt
# increment counters
timeCounter += tstep
frameCounter += tstep
# plot final position
rv_list.append(copy.deepcopy(rv))
rv_list_plot = rv_list
else:
# odeint
rv_list = integ.odeint(deriv, rv, arange(0, tmax*period, dt), (m1, m2))
# needed to calculate using tstep, but we want to plot
# using tplot,
t_interval = tplot / tstep
rv_list_plot = rv_list[::t_interval]
# plot
for i in range(len(rv_list_plot)):
plot(rv_list_plot[i][0],rv_list_plot[i][1],'bo')
plot(rv_list_plot[i][4],rv_list_plot[i][5],'go')
draw()
def deriv(rv, dt, m1, m2):
# calc position deriv
rv_copy = zeros(8)
rv_copy[0] = rv[2]
rv_copy[1] = rv[3]
rv_copy[4] = rv[6]
rv_copy[5] = rv[7]
# calc velocity deriv
r = array([rv[0] - rv[4], rv[1] - rv[5]])
force = ((sc.G*m1*m2)/(np.linalg.norm(r)**2))*(r/np.linalg.norm(r))
rv_copy[2] = - (force[0]/m1)
rv_copy[3] = - (force[1]/m1)
rv_copy[6] = + (force[0]/m2)
rv_copy[7] = + (force[1]/m2)
return rv_copy
|
smorad/ast119
|
hw5.py
|
Python
|
gpl-2.0
| 2,857 | 0.00805 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# FetchGNX design notes documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 9 13:29:00 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import cloud_sptheme as csp
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'FetchGNX design notes'
copyright = '2015, Stephen Leach'
author = 'Stephen Leach'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.8.3'
# The full version, including alpha/beta/rc tags.
release = '0.8.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'cloud'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"defaultcollapsed":True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [csp.get_theme_dir()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'FetchGNXdesignnotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'FetchGNXdesignnotes.tex', 'FetchGNX design notes Documentation',
'Stephen Leach', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fetchgnxdesignnotes', 'FetchGNX design notes Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'FetchGNXdesignnotes', 'FetchGNX design notes Documentation',
author, 'FetchGNXdesignnotes', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
Spicery/ginger
|
apps/fetchgnx/design/conf.py
|
Python
|
gpl-3.0
| 9,404 | 0.005955 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-09 23:22
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('player', '0002_auto_20160505_0350'),
]
operations = [
migrations.RenameField(
model_name='player',
old_name='username',
new_name='email',
),
]
|
betonreddit/betonreddit
|
betonreddit/player/migrations/0003_auto_20160509_2322.py
|
Python
|
apache-2.0
| 427 | 0 |
#!/usr/bin/env python
import sys
import textwrap
try:
import virtualenv # @UnresolvedImport
except:
from .lib import virtualenv # @Reimport
from . import snippits
__version__ = "0.9.1"
def file_search_dirs():
dirs = []
for d in virtualenv.file_search_dirs():
if "vootstrap" not in d:
dirs.append(d)
return dirs
def make_parser():
parser = virtualenv.ConfigOptionParser(
usage="usage: %prog [OPTIONS] OUTFILE",
version=__version__,
formatter=virtualenv.UpdatingDefaultsHelpFormatter())
parser.add_option(
"-v", "--verbose",
action="count",
dest="verbose",
default=0,
help="Increase verbosity")
parser.add_option(
"-q", "--quiet",
action="count",
dest="quiet",
default=0,
help="Decrease verbosity")
parser.add_option(
"-p", "--python",
dest="python",
metavar="PYTHON_EXE",
help="The Python interpreter to use, e.g., --python=python2.5 will "
"use the python2.5 interpreter to create the new environment. The "
"default is the interpreter that virtualenv was installed with (%s)"
% sys.executable)
parser.add_option(
"--clear",
dest="clear",
action="store_true",
help="Clear out the non-root install and start from scratch")
parser.add_option(
"--no-site-packages",
dest="no_site_packages",
action="store_true",
help="Don't give access to the global site-packages dir to the "
"virtual environment (default; deprecated)")
parser.add_option(
"--system-site-packages",
dest="system_site_packages",
action="store_true",
help="Give access to the global site-packages dir to the "
"virtual environment")
parser.add_option(
"--unzip-setuptools",
dest="unzip_setuptools",
action="store_true",
help="Unzip Setuptools or Distribute when installing it")
parser.add_option(
"--relocatable",
dest="relocatable",
action="store_true",
help="Make an EXISTING virtualenv environment relocatable. "
"This fixes up scripts and makes all .pth files relative")
parser.add_option(
"--distribute",
"--use-distribute",
dest="use_distribute",
action="store_true",
help="Use Distribute instead of Setuptools. Set environ variable "
"VIRTUALENV_DISTRIBUTE to make it the default ")
parser.add_option(
"--extra-search-dir",
dest="search_dirs",
action="append",
default=['.'],
help="Directory to look for setuptools/distribute/pip distributions "
"in. You can add any number of additional --extra-search-dir paths.")
parser.add_option(
"--never-download",
dest="never_download",
action="store_true",
help="Never download anything from the network. Instead, virtualenv "
"will fail if local distributions of setuptools/distribute/pip are "
"not present.")
parser.add_option(
"--prompt",
dest="prompt",
help="Provides an alternative prompt prefix for this environment")
parser.add_option("--install-requirements",
default=False,
action="store_true",
dest="install_requirements",
help="Install requirements.txt after vootstrapping")
parser.add_option(
"--path",
action="append",
dest="path",
help="Directory to add to vootstrapped sys.path. You can add any "
"number of additional --path paths. Relative directories are relative "
"to the vootstrapped directory")
return parser
def adjust_options(options):
out_str = "def adjust_options(options, args):\n"
opts = [
"verbose",
"quiet",
"python",
"clear",
"no_site_packages",
"system_site_packages",
"unzip_setuptools",
"relocatable",
"use_distribute",
"search_dirs",
"never_download",
"prompt"
]
for opt in opts:
out_str += " options.%s = %s\n" % (opt, getattr(options, opt))
out_str += snippits.ADJUST_OPTIONS_ARGS
return textwrap.dedent(out_str)
def after_install(options):
if not (options.install_requirements or options.path):
return ""
out_str = snippits.AFTER_INSTALL_PREFIX
if options.path:
out_str += snippits.AFTER_INSTALL_PATH(options.path)
if options.install_requirements:
out_str += snippits.AFTER_INSTALL_REQUIREMENTS
return textwrap.dedent(out_str)
def vootify(options):
return virtualenv.create_bootstrap_script(
adjust_options(options) +
after_install(options)
)
def main():
parser = make_parser()
(options, args) = parser.parse_args()
if not len(args):
parser.print_help()
return 1
with open(args[0], "w") as outfile:
outfile.write(vootify(options))
return 0
if __name__ == "__main__":
exit_code = main()
if exit_code():
sys.exit(exit_code)
|
tonyczeh/vootstrap
|
vootstrap/__init__.py
|
Python
|
mit
| 5,176 | 0.001546 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Import sile objects
from .sile import *
from sisl._internal import set_module
from sisl import Geometry
__all__ = ['moldenSile']
@set_module("sisl.io")
class moldenSile(Sile):
""" Molden file object """
@sile_fh_open()
def write_supercell(self, sc):
""" Writes the supercell to the contained file """
# Check that we can write to the file
sile_raise_write(self)
# Write the number of atoms in the geometry
self._write('[Molden Format]\n')
# Sadly, MOLDEN does not read this information...
@sile_fh_open()
def write_geometry(self, geometry, fmt='.8f'):
""" Writes the geometry to the contained file """
# Check that we can write to the file
sile_raise_write(self)
# Be sure to write the supercell
self.write_supercell(geometry.sc)
# Write in ATOM mode
self._write('[Atoms] Angs\n')
# Write out the cell information in the comment field
# This contains the cell vectors in a single vector (3 + 3 + 3)
# quantities, plus the number of supercells (3 ints)
fmt_str = '{{0:2s}} {{1:4d}} {{2:4d}} {{3:{0}}} {{4:{0}}} {{5:{0}}}\n'.format(fmt)
for ia, a, _ in geometry.iter_species():
self._write(fmt_str.format(a.symbol, ia, a.Z, *geometry.xyz[ia, :]))
def ArgumentParser(self, p=None, *args, **kwargs):
""" Returns the arguments that is available for this Sile """
newkw = Geometry._ArgumentParser_args_single()
newkw.update(kwargs)
return self.read_geometry().ArgumentParser(p, *args, **newkw)
add_sile('molf', moldenSile, case=False, gzip=True)
|
zerothi/sisl
|
sisl/io/molden.py
|
Python
|
mpl-2.0
| 1,873 | 0.001068 |
#!/usr/bin/env python
from __future__ import print_function
import boto3
import time
from botocore.exceptions import ClientError
from datetime import datetime
def get_unix_timestamp():
"""
Generate a Unix timestamp string.
"""
d = datetime.now()
t = time.mktime(d.timetuple())
return str(int(t))
def lambda_handler(event, context):
"""
Create EBS AMI for instances identified by the filter.
"""
if not 'DryRun' in event:
event['DryRun'] = False
if not 'Filters' in event:
event['Filters'] = [{
'Name': 'tag-key',
'Values': ['ops:snapshot']
}]
ec2 = boto3.resource('ec2')
# Iterate through instances identified by the filter.
for instance in ec2.instances.filter(Filters=event['Filters']):
instance_name = instance.instance_id
instance_tags = []
# If a Name tag is available, use it to identify the instance
# instead of the instance_id.
for tag in instance.tags:
if tag['Key'] == 'Name' and tag['Value'] != '':
instance_name = tag['Value']
else:
instance_tags.append(tag)
try:
# Create the AMI
image_name = instance_name + '-' + get_unix_timestamp()
image = instance.create_image(
Name=image_name,
NoReboot=True,
DryRun=event['DryRun']
)
print('Started image creation: ' + image_name)
image_tags = [{'Key': 'ops:retention', 'Value': '30'}] + instance_tags
image.create_tags(
Tags=image_tags,
DryRun=event['DryRun']
)
except ClientError as e:
if e.response['Error']['Code'] == 'DryRunOperation':
pass
|
meredith-digops/awsops
|
amicreation/amicreation.py
|
Python
|
mit
| 1,866 | 0.001608 |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import numpy as N
import os
import sys
def parse_args(args):
p = argparse.ArgumentParser()
p.add_argument('-i', '--input-files', default=[sys.stdin], nargs="+",
type=argparse.FileType('rt'),
help='input file or empty (stdin)')
p.add_argument('-d', '--decorate',default=False,action='store_true'
,help='put the stat name before the value (e.g mean:1)')
g = p.add_mutually_exclusive_group()
g.add_argument('-A','--all-stats',action='store_true',default=False)
h = p.add_argument_group('stat')
h.add_argument('-a', '--mean', action='store_true', default=False)
h.add_argument('-D', '--median', action='store_true', default=False)
h.add_argument('-s', '--standard_deviation',action='store_true',default=False)
h.add_argument('-v', '--variance', action='store_true', default=False)
h.add_argument('-m', '--min', action='store_true', default=False)
h.add_argument('-M', '--max', action='store_true', default=False)
if not args:
p.print_help()
sys.exit(0)
return p.parse_args(args)
def main():
args = parse_args(sys.argv[1:])
for input_file in args.input_files:
vals = [float(x) for x in input_file.read().split(os.linesep) if x]
a = N.array(vals)
s = []
for (name,value,f) in [('mean', args.mean, N.mean)
, ('median', args.median, N.median)
, ('standard_deviation', args.standard_deviation
, N.std)
, ('variance', args.variance, N.var)
, ('min', args.min, N.amin)
, ('max', args.max, N.amax)]:
if not args.all_stats and not value:
continue
r = f(a)
if args.decorate:
s.append('{}:{}'.format(name,r))
else:
s.append('{}'.format(r))
print(' '.join(s))
if __name__=='__main__':
main()
|
melrief/Hadoop-Log-Tools
|
hadoop/util/stats.py
|
Python
|
apache-2.0
| 1,993 | 0.024586 |
"""
This module implements atom/bond/structure-wise descriptor calculated from
pretrained megnet model
"""
import os
from typing import Dict, Union
import numpy as np
from tensorflow.keras.models import Model
from megnet.models import GraphModel, MEGNetModel
from megnet.utils.typing import StructureOrMolecule
DEFAULT_MODEL = os.path.join(os.path.dirname(__file__), "../../mvl_models/mp-2019.4.1/formation_energy.hdf5")
class MEGNetDescriptor:
"""
MEGNet descriptors. This class takes a trained model and
then compute the intermediate outputs as structure features
"""
def __init__(self, model_name: Union[str, GraphModel, MEGNetModel] = DEFAULT_MODEL, use_cache: bool = True):
"""
Args:
model_name (str or MEGNetModel): trained model. If it is
str, then only models in mvl_models are used.
use_cache (bool): whether to use cache for structure
graph calculations
"""
if isinstance(model_name, str):
model = MEGNetModel.from_file(model_name)
elif isinstance(model_name, GraphModel):
model = model_name
else:
raise ValueError("model_name only support str or GraphModel object")
layers = model.layers
important_prefix = ["meg", "set", "concatenate"]
all_names = [i.name for i in layers if any(i.name.startswith(j) for j in important_prefix)]
if any(i.startswith("megnet") for i in all_names):
self.version = "v2"
else:
self.version = "v1"
valid_outputs = [i.output for i in layers if any(i.name.startswith(j) for j in important_prefix)]
outputs = []
valid_names = []
for i, j in zip(all_names, valid_outputs):
if isinstance(j, list):
for k, l in enumerate(j):
valid_names.append(i + f"_{k}")
outputs.append(l)
else:
valid_names.append(i)
outputs.append(j)
full_model = Model(inputs=model.inputs, outputs=outputs)
model.model = full_model
self.model = model
self.valid_names = valid_names
self._cache: Dict[str, float] = {}
self.use_cache = use_cache
def _predict_structure(self, structure: StructureOrMolecule) -> np.ndarray:
graph = self.model.graph_converter.convert(structure)
inp = self.model.graph_converter.graph_to_input(graph)
return self.model.predict(inp)
def _predict_feature(self, structure: StructureOrMolecule) -> np.ndarray:
if not self.use_cache:
return self._predict_structure(structure)
s = str(structure)
if s in self._cache:
return self._cache[s]
result = self._predict_structure(structure)
self._cache[s] = result
return result
def _get_features(self, structure: StructureOrMolecule, prefix: str, level: int, index: int = None) -> np.ndarray:
name = prefix
if level is not None:
name = f"{prefix}_{level}"
if index is not None:
name += f"_{index}"
if name not in self.valid_names:
raise ValueError(f"{name} not in original megnet model")
ind = self.valid_names.index(name)
out_all = self._predict_feature(structure)
return out_all[ind][0]
def _get_updated_prefix_level(self, prefix: str, level: int):
mapping = {
"meg_net_layer": ["megnet", level - 1],
"set2_set": ["set2set_atom" if level == 1 else "set2set_bond", None],
"concatenate": ["concatenate", None],
}
if self.version == "v2":
return mapping[prefix][0], mapping[prefix][1] # type: ignore
return prefix, level
def get_atom_features(self, structure: StructureOrMolecule, level: int = 3) -> np.ndarray:
"""
Get megnet atom features from structure
Args:
structure: pymatgen structure or molecule
level: int, indicating the block number of megnet, starting
from 1
Returns:
nxm atomic feature matrix
"""
prefix, level = self._get_updated_prefix_level("meg_net_layer", level)
return self._get_features(structure, prefix=prefix, level=level, index=0)
def get_bond_features(self, structure: StructureOrMolecule, level: int = 3) -> np.ndarray:
"""
Get bond features at megnet block level
Args:
structure: pymatgen structure
level: int
Returns:
n_bond x m bond feature matrix
"""
prefix, level = self._get_updated_prefix_level("meg_net_layer", level)
return self._get_features(structure, prefix=prefix, level=level, index=1)
def get_global_features(self, structure: StructureOrMolecule, level: int = 2) -> np.ndarray:
"""
Get state features at megnet block level
Args:
structure: pymatgen structure or molecule
level: int
Returns:
1 x m_g global feature vector
"""
prefix, level = self._get_updated_prefix_level("meg_net_layer", level)
return self._get_features(structure, prefix=prefix, level=level, index=2)
def get_set2set(self, structure: StructureOrMolecule, ftype: str = "atom") -> np.ndarray:
"""
Get set2set output as features
Args:
structure (StructureOrMolecule): pymatgen structure
or molecule
ftype (str): atom or bond
Returns:
feature matrix, each row is a vector for an atom
or bond
"""
mapping = {"atom": 1, "bond": 2}
prefix, level = self._get_updated_prefix_level("set2_set", level=mapping[ftype])
return self._get_features(structure, prefix=prefix, level=level)
def get_structure_features(self, structure: StructureOrMolecule) -> np.ndarray:
"""
Get structure level feature vector
Args:
structure (StructureOrMolecule): pymatgen structure
or molecule
Returns:
one feature vector for the structure
"""
prefix, level = self._get_updated_prefix_level("concatenate", level=1)
return self._get_features(structure, prefix=prefix, level=level)
|
materialsvirtuallab/megnet
|
megnet/utils/descriptor.py
|
Python
|
bsd-3-clause
| 6,396 | 0.002502 |
#!/usr/bin/env python
import sys
import json
import logging
from logging import warning, error, info
from math import pi, degrees
from PyQt4 import Qt, QtCore, QtGui
from connection import Connection
arrow_points = (
Qt.QPoint(-1, -4),
Qt.QPoint(1, -4),
Qt.QPoint(1, 4),
Qt.QPoint(4, 4),
Qt.QPoint(0, 12),
Qt.QPoint(-4, 4),
Qt.QPoint(-1, 4)
)
class PlotGroup:
def __init__(self, color=Qt.Qt.black, symbol='cross'):
self.color = color
self.symbol = symbol
self.data = []
class XYPlot(Qt.QWidget):
def __init__(self):
Qt.QWidget.__init__(self)
# little dance to make the background white.
p = self.palette()
p.setColor(self.backgroundRole(), Qt.Qt.white)
self.setPalette(p)
self.setAutoFillBackground(True)
# map scale
self._scale = 1.0
self.symbol_size = 5.0
self._symbol_scale = self.symbol_size/self._scale
self._offset_x = 400
self._offset_y = 300
self.messages = []
self.groups = []
def translate(self, x, y):
self._offset_x += x
self._offset_y += y
self.update()
def scale(self, s):
self._scale *= s
self._symbol_scale = self.symbol_size/self._scale
self.update()
def drawArrow(self, qp, x, y, angle):
qp.save()
qp.translate(x, y)
qp.rotate(angle)
qp.scale(self._symbol_scale*0.5, self._symbol_scale*0.5)
qp.drawPolygon(*arrow_points)
qp.restore()
def drawCross(self, qp, x, y):
qp.save()
qp.translate(x, y)
qp.scale(self._symbol_scale, self._symbol_scale)
qp.drawLine(-1, -1, 1, 1)
qp.drawLine(-1, 1, 1, -1)
qp.restore()
def drawPlus(self, qp, x, y):
qp.save()
qp.translate(x, y)
qp.scale(self._symbol_scale, self._symbol_scale)
qp.drawLine(-1, 0, 1, 0)
qp.drawLine(0, -1, 0, 1)
qp.restore()
def drawModel(self, qp, x, y, angle, steer):
# all the units are x10 because there is some rounding(?)
# issue where lines don't joint correctly when using
# the meter units directly.
# there is a scale(0.1,0.1) further down to put things
# back to the correct size.
Lf = 16 # length of chass from middle to front axle
Lb = 23 # length of chassis from middle to back axle
Wa = 13 # half axle length
Lw = 10 # wheel length
qp.save()
qp.translate(x,y)
qp.rotate(angle)
#qp.scale(self._symbol_scale, self._symbol_scale)
qp.scale(0.1, 0.1)
qp.drawLine(0, -Lb, 0, Lf) # main body
qp.save() # begin rear end
qp.translate(0.0, -Lb)
qp.drawLine(-Wa, 0.0, Wa, 0.0) # rear axle
qp.drawLine(-Wa,-Lw, -Wa, Lw) #left wheel
qp.drawLine(Wa, -Lw, Wa, Lw) # right wheel
qp.restore()
qp.translate(0.0, Lf) # begin front end
qp.drawLine(-Wa, 0.0, Wa, 0.0) # front axle
qp.save() # begin left wheel
qp.translate(-Wa, 0.0)
qp.rotate(-steer)
qp.drawLine(0.0, -Lw, 0.0, Lw)
qp.restore()
qp.save() # begine right wheel
qp.translate(Wa, 0.0)
qp.rotate(-steer)
qp.drawLine(0.0, -Lw, 0.0, Lw)
qp.restore()
qp.restore()
def paintGrid(self, qp):
pass
def paintEvent(self, e):
#print self.offset_x, self.offset_y, self.s
qp = QtGui.QPainter()
qp.begin(self)
qp.setRenderHint(QtGui.QPainter.Antialiasing, True)
line_y = 20
for line in self.messages:
qp.drawText(20, line_y, line)
line_y += 20
qp.translate(self._offset_x, self._offset_y)
qp.scale(self._scale, -self._scale)
#qp.translate(200, 200)
qp.setBrush(Qt.Qt.black)
qp.setPen(Qt.Qt.black)
self.drawCross(qp, 0, 0)
for group in self.groups:
if group.symbol == 'arrow':
qp.setBrush(group.color)
qp.setPen(Qt.Qt.NoPen)
for v in group.data:
self.drawArrow(qp, v[0], v[1], v[2])
elif group.symbol == 'cross':
qp.setBrush(Qt.Qt.NoBrush)
qp.setPen(group.color)
for v in group.data:
self.drawCross(qp, v[0], v[1])
elif group.symbol == 'model':
pen = Qt.QPen()
pen.setWidth(self._symbol_scale)
pen.setColor(group.color)
qp.setBrush(group.color)
qp.setPen(pen)
for v in group.data:
#print("Draw model %0.2f %0.2f %0.2f %0.2f" % (v[0:4]))
self.drawModel(qp, v[0], v[1], v[2], v[3])
qp.end()
def add_plot_group(self, g):
self.groups.append(g)
#def update(self):
class MapPlot(XYPlot):
def __init__(self):
XYPlot.__init__(self)
self.current_pos = PlotGroup(color=Qt.Qt.blue, symbol='model')
self.add_plot_group(self.current_pos)
self.waypoint_group = PlotGroup(color=Qt.Qt.black, symbol='cross')
self.add_plot_group(self.waypoint_group)
self.scale(12)
def on_msg(self, msg):
try:
#t = msg[u'state'][u'time']
current = (msg[u'state'][u'x'],
msg[u'state'][u'y'],
degrees(msg[u'state'][u'yaw']),
degrees(msg[u'controls'][u'steer']))
waypoints = msg[u'waypoint_control'][u'points']
except KeyError:
logging.error("Invalid message.")
else:
self.current_pos.data = [current]
self.waypoint_group.data = waypoints
self.update()
class MainWindow(Qt.QWidget):
def __init__(self):
Qt.QWidget.__init__(self)
self.grid = Qt.QGridLayout()
self.setLayout(self.grid)
self.plot = MapPlot()
self.grid.addWidget(self.plot, 0, 0)
self.connection = Connection('localhost', 60212, self.update)
def update(self, msg):
self.plot.on_msg(msg)
def keyPressEvent(self, e):
if e.key() == Qt.Qt.Key_Escape:
self.close()
elif e.key() == Qt.Qt.Key_A:
self.plot.scale(2)
elif e.key() == Qt.Qt.Key_Z:
self.plot.scale(0.5)
elif e.key() == Qt.Qt.Key_Up:
self.plot.translate(0, 10)
elif e.key() == Qt.Qt.Key_Down:
self.plot.translate(0, -10)
elif e.key() == Qt.Qt.Key_Left:
self.plot.translate(10, 0)
elif e.key() == Qt.Qt.Key_Right:
self.plot.translate(-10, 0)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
app = Qt.QApplication([])
demo = MainWindow()
demo.resize(800, 600)
demo.show()
sys.exit(app.exec_())
|
zwarren/morse-car-controller
|
user/map.py
|
Python
|
mit
| 7,092 | 0.006204 |
""" Example of reasoning about the approximate node completeness. """
from tulip import *
from tulipgui import *
import tulippaths as tp
# Load graph
graphFile = '../data/514_4hops.tlp'
graph = tlp.loadGraph(graphFile)
# Compute completeness for each node label
completeness = tp.utils.getApproximateAnnotationCompleteness(graph)
# Tally completeness
numComplete = 0
numAlmostComplete = 0
numIncomplete = 0
for node in graph.getNodes():
currCompleteness = completeness[node]
if currCompleteness <= 1.0 and currCompleteness > 0.75:
numComplete += 1
elif currCompleteness <= 0.75 and currCompleteness > 0.25:
numAlmostComplete += 1
else:
graph.delNode(node)
numIncomplete += 1
print('num complete, num almost complete, num incomplete')
print((str(numComplete) + ', ' + str(numAlmostComplete) + ', ' + str(numIncomplete)))
nodeLinkView = tlpgui.createNodeLinkDiagramView(graph)
|
visdesignlab/TulipPaths
|
demos/simpleNodeCompleteness.py
|
Python
|
mit
| 928 | 0.002155 |
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from BTrees.OOBTree import OOBTree
from cStringIO import StringIO
import MaKaC.webinterface.pages.tracks as tracks
import MaKaC.webinterface.pages.conferences as conferences
import MaKaC.webinterface.urlHandlers as urlHandlers
import MaKaC.webinterface.common.abstractFilters as abstractFilters
import MaKaC.review as review
from MaKaC.webinterface.rh.conferenceBase import RHTrackBase
from MaKaC.webinterface.rh.base import RHModificationBaseProtected
from MaKaC.errors import MaKaCError, FormValuesError
from MaKaC.PDFinterface.conference import TrackManagerAbstractToPDF, TrackManagerAbstractsToPDF
from indico.core.config import Config
import MaKaC.common.filters as filters
import MaKaC.webinterface.common.contribFilters as contribFilters
from MaKaC.webinterface.common.contribStatusWrapper import ContribStatusList
from MaKaC.PDFinterface.conference import ContribsToPDF
from MaKaC.webinterface.mail import GenericMailer, GenericNotification
from MaKaC.i18n import _
from MaKaC.abstractReviewing import ConferenceAbstractReview
from MaKaC.paperReviewing import Answer
from MaKaC.webinterface.common.tools import cleanHTMLHeaderFilename
from MaKaC.webinterface.rh.abstractModif import _AbstractWrapper
from MaKaC.webinterface.common.abstractNotificator import EmailNotificator
from indico.web.flask.util import send_file
class RHTrackModifBase( RHTrackBase, RHModificationBaseProtected ):
def _checkParams( self, params ):
RHTrackBase._checkParams( self, params )
def _checkProtection( self ):
RHModificationBaseProtected._checkProtection( self )
class RHTrackModification( RHTrackModifBase ):
def _process( self ):
p = tracks.WPTrackModification( self, self._track )
return p.display()
class RHTrackDataModification( RHTrackModifBase ):
def _process( self ):
p = tracks.WPTrackDataModification( self, self._track )
return p.display()
class RHTrackPerformDataModification(RHTrackModifBase):
def _checkParams(self,params):
RHTrackModifBase._checkParams(self,params)
self._cancel=params.has_key("cancel")
def _process(self):
if self._cancel:
self._redirect(urlHandlers.UHTrackModification.getURL(self._track))
else:
params=self._getRequestParams()
self._track.setTitle(params["title"])
self._track.setDescription(params["description"])
self._track.setCode(params["code"])
self._redirect(urlHandlers.UHTrackModification.getURL(self._track))
class RHTrackCoordination( RHTrackModifBase ):
def _checkProtection(self):
RHTrackModifBase._checkProtection(self)
if not self._conf.hasEnabledSection("cfa"):
raise MaKaCError( _("You cannot access this option because \"Abstracts\" was disabled"))
def _process( self ):
p = tracks.WPTrackModifCoordination( self, self._track )
return p.display()
class TrackCoordinationError( MaKaCError ):
pass
class RHTrackAbstractsBase( RHTrackModifBase ):
"""Base class for the areas accessible with track coordination privileges.
"""
def _checkProtection( self, checkCFADisabled=True ):
"""
"""
if not self._target.canCoordinate( self.getAW() ):
if self._getUser() == None:
self._checkSessionUser()
else:
raise TrackCoordinationError("You don't have rights to coordinate this track")
if checkCFADisabled and not self._conf.hasEnabledSection("cfa"):
raise MaKaCError( _("You cannot access this option because \"Abstracts\" was disabled"))
class _TrackAbstractFilterField( filters.FilterField ):
def __init__( self, track, values, showNoValue=True ):
self._track = track
filters.FilterField.__init__(self,track.getConference(),values,showNoValue)
class _StatusFilterField( _TrackAbstractFilterField ):
"""
"""
_id = "status"
def satisfies( self, abstract ):
"""
"""
s = tracks.AbstractStatusTrackViewFactory().getStatus( self._track, abstract )
return s.getId() in self.getValues()
class _ContribTypeFilterField( _TrackAbstractFilterField, abstractFilters.ContribTypeFilterField ):
"""
"""
_id = "type"
def __init__( self, track, values, showNoValue=True ):
_TrackAbstractFilterField.__init__( self, track, values, showNoValue )
def satisfies( self, abstract ):
"""
"""
return abstractFilters.ContribTypeFilterField.satisfies(self, abstract)
class _MultipleTrackFilterField(_TrackAbstractFilterField):
_id = "multiple_tracks"
def satisfies( self, abstract ):
return len( abstract.getTrackList() )>1
class _CommentsTrackFilterField(_TrackAbstractFilterField, abstractFilters.CommentFilterField):
_id = "comment"
def __init__( self, track, values, showNoValue=True ):
_TrackAbstractFilterField.__init__( self, track, values, showNoValue )
def satisfies( self, abstract ):
"""
"""
return abstractFilters.CommentFilterField.satisfies(self, abstract)
class _AccContribTypeFilterField(_TrackAbstractFilterField,abstractFilters.AccContribTypeFilterField):
"""
"""
_id = "acc_type"
def __init__(self,track,values,showNoValue=True):
_TrackAbstractFilterField.__init__(self,track,values,showNoValue)
def satisfies(self,abstract):
astv = tracks.AbstractStatusTrackViewFactory().getStatus( self._track, abstract )
if astv.__class__ in [tracks._ASTrackViewAccepted,\
tracks._ASTrackViewPA]:
if astv.getContribType() is None or astv.getContribType()=="":
return self._showNoValue
return astv.getContribType() in self._values
else:
return self._showNoValue
class TrackAbstractsFilterCrit(filters.FilterCriteria):
_availableFields = { \
_ContribTypeFilterField.getId(): _ContribTypeFilterField, \
_StatusFilterField.getId(): _StatusFilterField, \
_MultipleTrackFilterField.getId(): _MultipleTrackFilterField, \
_CommentsTrackFilterField.getId(): _CommentsTrackFilterField,
_AccContribTypeFilterField.getId(): _AccContribTypeFilterField }
def __init__(self,track,crit={}):
self._track = track
filters.FilterCriteria.__init__(self,track.getConference(),crit)
def _createField(self,klass,values ):
return klass(self._track,values)
def satisfies( self, abstract ):
for field in self._fields.values():
if not field.satisfies( abstract ):
return False
return True
class _TrackAbstractsSortingField( filters.SortingField ):
def __init__( self, track ):
self._track = track
filters.SortingField.__init__( self )
class _ContribTypeSF( _TrackAbstractsSortingField, abstractFilters.ContribTypeSortingField ):
_id = "type"
def __init__( self, track ):
_TrackAbstractsSortingField.__init__( self, track )
def compare( self, a1, a2 ):
return abstractFilters.ContribTypeSortingField.compare( self, a1, a2 )
class _StatusSF( _TrackAbstractsSortingField ):
_id = "status"
def compare( self, a1, a2 ):
statusA1 = tracks.AbstractStatusTrackViewFactory().getStatus( self._track, a1 )
statusA2 = tracks.AbstractStatusTrackViewFactory().getStatus( self._track, a2 )
return cmp( statusA1.getLabel(), statusA2.getLabel() )
class _NumberSF( _TrackAbstractsSortingField ):
_id = "number"
def compare( self, a1, a2 ):
try:
a = int(a1.getId())
b = int(a2.getId())
except:
a = a1.getId()
b = a2.getId()
return cmp( a, b )
class _DateSF( _TrackAbstractsSortingField ):
_id = "date"
def compare( self, a1, a2 ):
return cmp( a2.getSubmissionDate(), a1.getSubmissionDate() )
class TrackAbstractsSortingCrit( filters.SortingCriteria ):
"""
"""
_availableFields = { _ContribTypeSF.getId(): _ContribTypeSF, \
_StatusSF.getId(): _StatusSF, \
_NumberSF.getId(): _NumberSF, \
_DateSF.getId(): _DateSF }
def __init__( self, track, crit=[] ):
"""
"""
self._track = track
filters.SortingCriteria.__init__( self, crit )
def _createField( self, fieldKlass ):
"""
"""
return fieldKlass( self._track )
class RHTrackAbstractList( RHTrackAbstractsBase ):
def _checkParams( self, params ):
RHTrackAbstractsBase._checkParams( self, params )
self._filterUsed = params.has_key( "OK" ) #this variable is true when the
# filter has been used
filter = {}
ltypes = []
if not self._filterUsed:
for type in self._conf.getContribTypeList():
ltypes.append(type)
else:
for id in self._normaliseListParam(params.get("selTypes",[])):
ltypes.append(self._conf.getContribTypeById(id))
filter["type"]=ltypes
lstatus=[]
if not self._filterUsed:
sl = tracks.AbstractStatusTrackViewFactory().getStatusList()
for statusKlass in sl:
lstatus.append( statusKlass.getId() )
pass
filter["status"] = self._normaliseListParam( params.get("selStatus", lstatus) )
ltypes = []
if not self._filterUsed:
for type in self._conf.getContribTypeList():
ltypes.append( type )
else:
for id in self._normaliseListParam(params.get("selAccTypes",[])):
ltypes.append(self._conf.getContribTypeById(id))
filter["acc_type"]=ltypes
if params.has_key("selMultipleTracks"):
filter["multiple_tracks"] = ""
if params.has_key("selOnlyComment"):
filter["comment"] = ""
self._criteria = TrackAbstractsFilterCrit( self._track, filter )
typeShowNoValue,accTypeShowNoValue=True,True
if self._filterUsed:
typeShowNoValue = params.has_key("typeShowNoValue")
accTypeShowNoValue= params.has_key("accTypeShowNoValue")
self._criteria.getField("type").setShowNoValue( typeShowNoValue )
self._criteria.getField("acc_type").setShowNoValue(accTypeShowNoValue)
self._sortingCrit = TrackAbstractsSortingCrit( self._track, [params.get( "sortBy", "number").strip()] )
self._selectAll = params.get("selectAll", None)
self._msg = params.get("directAbstractMsg","")
self._order = params.get("order","down")
def _process( self ):
p = tracks.WPTrackModifAbstracts( self, self._track, self._msg, self._filterUsed, self._order )
return p.display( filterCrit= self._criteria, \
sortingCrit = self._sortingCrit, \
selectAll = self._selectAll )
class RHTrackAbstractBase( RHTrackAbstractsBase ):
def _checkParams( self, params ):
RHTrackModifBase._checkParams( self, params )
absId = params.get( "abstractId", "" ).strip()
if absId == "":
raise MaKaCError( _("Abstract identifier not specified"))
self._abstract = self._track.getAbstractById( absId )
if self._abstract == None:
raise MaKaCError( _("The abstract with id %s does not belong to the track with id %s")%(absId, self._track.getId()))
class RHTrackAbstract( RHTrackAbstractBase ):
def _process( self ):
p = tracks.WPTrackAbstractModif( self, self._track, self._abstract )
return p.display()
class RHTrackAbstractDirectAccess( RHTrackAbstractBase ):
def _checkParams(self, params):
self._params = params
RHTrackBase._checkParams(self, params)
self._abstractId = params.get("abstractId","")
self._abstractExist = False
try:
abstract = self._track.getAbstractById(self._abstractId)
self._abstractExist = True
RHTrackAbstractBase._checkParams(self, params)
except KeyError:
pass
def _process( self ):
if self._abstractExist:
p = tracks.WPTrackAbstractModif( self, self._track, self._abstract )
return p.display()
else:
url = urlHandlers.UHTrackModifAbstracts.getURL(self._track)
#url.addParam("directAbstractMsg","There is no abstract number %s in this track"%self._abstractId)
self._redirect(url)
return
class RHTrackAbstractSetStatusBase(RHTrackAbstractBase):
""" This is the base class for the accept/reject functionality for the track coordinators """
def _checkProtection(self):
RHTrackAbstractBase._checkProtection(self)
if not self._abstract.getConference().getConfAbstractReview().canReviewerAccept():
raise MaKaCError(_("The acceptance or rejection of abstracts is not allowed. Only the managers of the conference can perform this action."))
def _checkParams(self, params):
RHTrackAbstractBase._checkParams(self, params)
self._action = params.get("accept", None)
if self._action:
self._typeId = params.get("type", "")
self._session=self._conf.getSessionById(params.get("session", ""))
else:
self._action = params.get("reject", None)
self._comments = params.get("comments", "")
self._doNotify = params.has_key("notify")
def _notifyStatus(self, status):
wrapper = _AbstractWrapper(status)
tpl = self._abstract.getOwner().getNotifTplForAbstract(wrapper)
if self._doNotify and tpl:
n = EmailNotificator()
self._abstract.notify(n, self._getUser())
class RHTrackAbstractAccept(RHTrackAbstractSetStatusBase):
def _process(self):
if self._action:
cType = self._abstract.getConference().getContribTypeById(self._typeId)
self._abstract.accept(self._getUser(), self._track, cType, self._comments, self._session)
self._notifyStatus(review.AbstractStatusAccepted(self._abstract, None, self._track, cType))
self._redirect(urlHandlers.UHTrackAbstractModif.getURL( self._track, self._abstract ))
else:
p = tracks.WPTrackAbstractAccept(self, self._track, self._abstract)
return p.display(**self._getRequestParams())
class RHTrackAbstractReject(RHTrackAbstractSetStatusBase):
def _process(self):
if self._action:
self._abstract.reject(self._getUser(), self._comments)
self._notifyStatus(review.AbstractStatusRejected(self._abstract, None, None))
self._redirect(urlHandlers.UHTrackAbstractModif.getURL( self._track, self._abstract ))
else:
p = tracks.WPTrackAbstractReject(self, self._track, self._abstract)
return p.display(**self._getRequestParams())
class RHTrackAbstractPropBase(RHTrackAbstractBase):
""" Base class for propose to accept/reject classes """
def _checkParams(self,params):
RHTrackAbstractBase._checkParams(self,params)
self._action = ""
self._comment = params.get("comment","")
self._answers = []
if params.has_key("OK"):
self._action = "GO"
# get answers and make the list
scaleLower = self._target.getConference().getConfAbstractReview().getScaleLower()
scaleHigher = self._target.getConference().getConfAbstractReview().getScaleHigher()
numberOfAnswers = self._target.getConference().getConfAbstractReview().getNumberOfAnswers()
c = 0
for question in self._target.getConference().getConfAbstractReview().getReviewingQuestions():
c += 1
if not params.has_key("RB_"+str(c)):
raise FormValuesError(_("Please, reply to all the reviewing questions. Question \"%s\" is missing the answer.")%question.getText())
rbValue = int(params.get("RB_"+str(c),scaleLower))
newId = self._target.getConference().getConfAbstractReview().getNewAnswerId()
newAnswer = Answer(newId, rbValue, numberOfAnswers, question)
newAnswer.calculateRatingValue(scaleLower, scaleHigher)
self._answers.append(newAnswer)
elif params.has_key("CANCEL"):
self._action="CANCEL"
class RHTrackAbstractPropToAccept( RHTrackAbstractPropBase ):
def _checkParams(self,params):
RHTrackAbstractPropBase._checkParams(self,params)
self._contribType = params.get("contribType",self._abstract.getContribType())
if params.has_key("OK"):
ctId = ""
if self._abstract.getContribType():
ctId = self._abstract.getContribType().getId()
ctId = params.get("contribType",ctId)
self._contribType = self._abstract.getConference().getContribTypeById(ctId)
def _process( self ):
url = urlHandlers.UHTrackAbstractModif.getURL( self._track, self._abstract )
if self._action == "CANCEL":
self._redirect( url )
elif self._action == "GO":
r = self._getUser()
self._abstract.proposeToAccept( r, self._track, self._contribType, self._comment, self._answers )
self._redirect( url )
else:
p=tracks.WPTrackAbstractPropToAcc(self,self._track,self._abstract)
return p.display(contribType=self._contribType,\
comment=self._comment)
class RHTrackAbstractPropToReject( RHTrackAbstractPropBase ):
def _process( self ):
url = urlHandlers.UHTrackAbstractModif.getURL( self._track, self._abstract )
if self._action == "CANCEL":
self._redirect( url )
elif self._action == "GO":
r = self._getUser()
self._abstract.proposeToReject( r, self._track, self._comment , self._answers)
self._redirect( url )
else:
p = tracks.WPTrackAbstractPropToRej( self, self._track, self._abstract )
return p.display()
class RHTrackAbstractPropForOtherTracks( RHTrackAbstractBase ):
def _checkParams( self, params ):
RHTrackAbstractBase._checkParams( self, params )
self._action, self._comment = "", ""
if params.has_key("OK"):
self._action = "GO"
self._comment = params.get("comment", "")
self._propTracks = []
for trackId in self._normaliseListParam( params.get("selTracks", []) ):
self._propTracks.append( self._conf.getTrackById(trackId) )
elif params.has_key("CANCEL"):
self._action = "CANCEL"
def _process( self ):
url = urlHandlers.UHTrackAbstractModif.getURL( self._track, self._abstract )
if self._action == "CANCEL":
self._redirect( url )
elif self._action == "GO":
if self._propTracks != []:
r = self._getUser()
self._abstract.proposeForOtherTracks( r, self._track, self._comment, self._propTracks )
self._redirect( url )
else:
p = tracks.WPAbstractPropForOtherTracks( self, self._track, self._abstract )
return p.display()
class RHModAbstractMarkAsDup(RHTrackAbstractBase):
def _checkParams(self, params):
RHTrackAbstractBase._checkParams(self, params)
self._action, self._comments, self._original = "", "", None
self._originalId = ""
if "OK" in params:
self._action = "MARK"
self._comments = params.get("comments", "")
self._originalId = params.get("id", "")
self._original = self._abstract.getOwner(
).getAbstractById(self._originalId)
def _process(self):
if self._action == "MARK":
if self._original is None or self._target == self._original:
raise MaKaCError(_("invalid original abstract id"))
self._abstract.markAsDuplicated(
self._getUser(), self._original, self._comments, self._track)
self._redirect(urlHandlers.UHTrackAbstractModif.getURL(
self._track, self._abstract))
return
p = tracks.WPModAbstractMarkAsDup(self, self._track, self._abstract)
return p.display(comments=self._comments, originalId=self._originalId)
class RHModAbstractUnMarkAsDup(RHTrackAbstractBase):
def _checkParams( self, params ):
RHTrackAbstractBase._checkParams( self, params )
self._action,self._comments="",""
if params.has_key("OK"):
self._action="UNMARK"
self._comments=params.get("comments","")
def _process( self ):
if self._action=="UNMARK":
self._abstract.unMarkAsDuplicated(self._getUser(),self._comments, self._track)
self._redirect(urlHandlers.UHTrackAbstractModif.getURL(self._track,self._abstract))
return
p = tracks.WPModAbstractUnMarkAsDup(self,self._track,self._abstract)
return p.display(comments=self._comments)
class RHAbstractToPDF(RHTrackAbstractBase):
def _process(self):
tz = self._conf.getTimezone()
filename = "%s - Abstract.pdf" % self._target.getTitle()
pdf = TrackManagerAbstractToPDF(self._abstract, self._track, tz=tz)
return send_file(filename, pdf.generate(), 'PDF')
class RHAbstractsActions:
"""
class to select the action to do with the selected abstracts
"""
def _checkParams( self, params ):
self._pdf = params.get("PDF.x", None)
self._mail = params.get("mail", None)
self._participant = params.get("PART", None)
self._tplPreview = params.get("tplPreview", None)
self._params = params
def _process(self):
if self._pdf:
return RHAbstractsToPDF().process(self._params)
elif self._mail:
return RHAbstractSendNotificationMail().process(self._params)
elif self._tplPreview:
return RHAbstractTPLPreview().process(self._params)
elif self._participant:
return RHAbstractsParticipantList().process(self._params)
else:
return "no action to do"
def process(self, params):
self._checkParams(params)
ret = self._process()
if not ret:
return "None"
return ret
class RHAbstractTPLPreview(RHTrackBase):
def _checkParams(self, params):
RHTrackBase._checkParams( self, params )
self._notifTplId = params.get("notifTpl","")
def _process(self):
tpl = self._conf.getAbstractMgr().getNotificationTplById(self._notifTplId)
self._redirect(urlHandlers.UHAbstractModNotifTplPreview.getURL(tpl))
class AbstractNotification:
def __init__(self, conf, abstract):
self._conf = conf
self._abstract = abstract
def getDict(self):
dict = {}
dict["conference_title"] = self._conf.getTitle()
dict["conference_URL"] = str(urlHandlers.UHConferenceDisplay.getURL(self._conf))
dict["abstract_title"] = self._abstract.getTitle()
dict["abstract_track"] = "No track attributed"
dict["contribution_type"] = "No type defined"
if self._abstract.getCurrentStatus().__class__ == review.AbstractStatusAccepted:
dict["abstract_track"] = self._abstract.getCurrentStatus().getTrack().getTitle()
dict["contribution_type"] = self._abstract.getContribType()#.getName()
dict["submitter_first_name"] = self._abstract.getSubmitter().getFirstName()
dict["submitter_familly_name"] = self._abstract.getSubmitter().getSurName()
dict["submitter_title"] = self._abstract.getSubmitter().getTitle()
dict["abstract_URL"] = str(urlHandlers.UHAbstractDisplay.getURL(self._abstract))
return dict
class RHAbstractSendNotificationMail(RHTrackModification):
def _checkParams( self, params ):
RHTrackModification._checkParams( self, params )
notifTplId = params.get("notifTpl", "")
self._notifTpl = self._conf.getAbstractMgr().getNotificationTplById(notifTplId)
self._abstractIds = normaliseListParam( params.get("abstracts", []) )
self._abstracts = []
abMgr = self._conf.getAbstractMgr()
for id in self._abstractIds:
self._abstracts.append(abMgr.getAbstractById(id))
def _process( self ):
count = 0
for abstract in self._abstracts:
dict = AbstractNotification(self._conf, abstract).getDict()
s = self._notifTpl.getTplSubject()
b = self._notifTpl.getTplBody()
maildata = { "fromAddr": self._notifTpl.getFromAddr(), "toList": [abstract.getSubmitter().getEmail()], "subject": s%dict, "body": text }
GenericMailer.send(GenericNotification(maildata))
self._conf.newSentMail(abstract.getSubmitter(), mail.getSubject(), b%dict)
count += 1
#self._redirect(urlHandlers.UHConfAbstractManagment.getURL(self._conf))
p = conferences.WPAbstractSendNotificationMail(self, self._conf, count )
return p.display()
class RHAbstractsToPDF(RHTrackAbstractsBase):
def _checkParams( self, params ):
RHTrackAbstractsBase._checkParams( self, params )
self._abstractIds = self._normaliseListParam( params.get("abstracts", []) )
def _process(self):
tz = self._conf.getTimezone()
if not self._abstractIds:
return "No abstract to print"
pdf = TrackManagerAbstractsToPDF(self._conf, self._track, self._abstractIds,tz=tz)
return send_file('Abstracts.pdf', pdf.generate(), 'PDF')
class RHAbstractIntComments( RHTrackAbstractBase ):
def _process( self ):
p = tracks.WPModAbstractIntComments(self,self._track,self._abstract)
return p.display()
class RHAbstractIntCommentNew(RHAbstractIntComments):
def _checkParams(self,params):
RHAbstractIntComments._checkParams(self,params)
self._action=""
if params.has_key("OK"):
self._action="UPDATE"
self._content=params.get("content","")
elif params.has_key("CANCEL"):
self._action="CANCEL"
def _process( self ):
if self._action=="UPDATE":
c=review.Comment(self._getUser())
c.setContent(self._content)
self._abstract.addIntComment(c)
self._redirect(urlHandlers.UHTrackAbstractModIntComments.getURL(self._track,self._abstract))
return
elif self._action=="CANCEL":
self._redirect(urlHandlers.UHTrackAbstractModIntComments.getURL(self._track,self._abstract))
return
p = tracks.WPModAbstractIntCommentNew(self,self._track,self._abstract)
return p.display()
class RHAbstractIntCommentBase(RHTrackAbstractBase):
def _checkParams(self,params):
RHTrackAbstractBase._checkParams(self,params)
id=params.get("intCommentId","")
if id=="":
raise MaKaCError( _("the internal comment identifier hasn't been specified"))
self._comment=self._abstract.getIntCommentById(id)
class RHAbstractIntCommentRem(RHAbstractIntCommentBase):
def _process(self):
self._abstract.removeIntComment(self._comment)
self._redirect(urlHandlers.UHTrackAbstractModIntComments.getURL(self._track,self._abstract))
class RHAbstractIntCommentEdit(RHAbstractIntCommentBase):
def _checkParams(self,params):
RHAbstractIntCommentBase._checkParams(self,params)
self._action=""
if params.has_key("OK"):
self._action="UPDATE"
self._content=params.get("content","")
elif params.has_key("CANCEL"):
self._action="CANCEL"
def _process(self):
if self._action=="UPDATE":
self._comment.setContent(self._content)
self._redirect(urlHandlers.UHTrackAbstractModIntComments.getURL(self._track,self._abstract))
return
elif self._action=="CANCEL":
self._redirect(urlHandlers.UHTrackAbstractModIntComments.getURL(self._track,self._abstract))
return
p=tracks.WPModAbstractIntCommentEdit(self,self._track,self._comment)
return p.display()
class RHAbstractsParticipantList(RHTrackAbstractsBase):
def _checkParams( self, params ):
RHTrackAbstractsBase._checkParams( self, params )
self._abstractIds = self._normaliseListParam( params.get("abstracts", []) )
self._displayedGroups = params.get("displayedGroups", [])
if type(self._displayedGroups) != list:
self._displayedGroups = [self._displayedGroups]
self._clickedGroup = params.get("clickedGroup","")
def _setGroupsToDisplay(self):
if self._clickedGroup in self._displayedGroups:
self._displayedGroups.remove(self._clickedGroup)
else:
self._displayedGroups.append(self._clickedGroup)
def _process( self ):
if not self._abstractIds:
return "<table align=\"center\" width=\"100%%\"><tr><td>There are no abstracts</td></tr></table>"
submitters = OOBTree()
primaryAuthors = OOBTree()
coAuthors = OOBTree()
submitterEmails = set()
primaryAuthorEmails = set()
coAuthorEmails = set()
self._setGroupsToDisplay()
abMgr = self._conf.getAbstractMgr()
for abstId in self._abstractIds:
abst = abMgr.getAbstractById(abstId)
#Submitters
subm = abst.getSubmitter()
keySB = "%s-%s-%s"%(subm.getSurName().lower(), subm.getFirstName().lower(), subm.getEmail().lower())
submitters[keySB] = subm
submitterEmails.add(subm.getEmail())
#Primary authors
for pAut in abst.getPrimaryAuthorList():
keyPA = "%s-%s-%s"%(pAut.getSurName().lower(), pAut.getFirstName().lower(), pAut.getEmail().lower())
primaryAuthors[keyPA] = pAut
primaryAuthorEmails.add(pAut.getEmail())
#Co-authors
for coAut in abst.getCoAuthorList():
keyCA = "%s-%s-%s"%(coAut.getSurName().lower(), coAut.getFirstName().lower(), coAut.getEmail().lower())
coAuthors[keyCA] = coAut
coAuthorEmails.add(coAut.getEmail())
emailList = {"submitters":{},"primaryAuthors":{},"coAuthors":{}}
emailList["submitters"]["tree"] = submitters
emailList["primaryAuthors"]["tree"] = primaryAuthors
emailList["coAuthors"]["tree"] = coAuthors
emailList["submitters"]["emails"] = submitterEmails
emailList["primaryAuthors"]["emails"] = primaryAuthorEmails
emailList["coAuthors"]["emails"] = coAuthorEmails
p = conferences.WPConfParticipantList(self, self._target.getConference(), emailList, self._displayedGroups, self._abstractIds )
return p.display()
class ContribFilterCrit(filters.FilterCriteria):
_availableFields = { \
contribFilters.TypeFilterField.getId():contribFilters.TypeFilterField, \
contribFilters.StatusFilterField.getId():contribFilters.StatusFilterField, \
contribFilters.AuthorFilterField.getId():contribFilters.AuthorFilterField, \
contribFilters.SessionFilterField.getId():contribFilters.SessionFilterField }
class ContribSortingCrit(filters.SortingCriteria):
_availableFields={\
contribFilters.NumberSF.getId():contribFilters.NumberSF,
contribFilters.DateSF.getId():contribFilters.DateSF,
contribFilters.ContribTypeSF.getId():contribFilters.ContribTypeSF,
contribFilters.TrackSF.getId():contribFilters.TrackSF,
contribFilters.SpeakerSF.getId():contribFilters.SpeakerSF,
contribFilters.BoardNumberSF.getId():contribFilters.BoardNumberSF,
contribFilters.SessionSF.getId():contribFilters.SessionSF,
contribFilters.TitleSF.getId():contribFilters.TitleSF
}
class RHContribList(RHTrackAbstractsBase):
def _checkProtection(self):
RHTrackAbstractsBase._checkProtection(self, False)
def _checkParams( self, params ):
RHTrackAbstractsBase._checkParams(self,params)
self._conf=self._track.getConference()
filterUsed=params.has_key("OK")
#sorting
self._sortingCrit=ContribSortingCrit([params.get("sortBy","number").strip()])
self._order = params.get("order","down")
#filtering
filter = {"author":params.get("authSearch","")}
ltypes = []
if not filterUsed:
for type in self._conf.getContribTypeList():
ltypes.append(type.getId())
else:
for id in self._normaliseListParam(params.get("types",[])):
ltypes.append(id)
filter["type"]=ltypes
lsessions= []
if not filterUsed:
for session in self._conf.getSessionList():
lsessions.append( session.getId() )
filter["session"]=self._normaliseListParam(params.get("sessions",lsessions))
lstatus=[]
if not filterUsed:
for status in ContribStatusList().getList():
lstatus.append(ContribStatusList().getId(status))
filter["status"]=self._normaliseListParam(params.get("status",lstatus))
self._filterCrit=ContribFilterCrit(self._conf,filter)
typeShowNoValue,sessionShowNoValue=True,True
if filterUsed:
typeShowNoValue = params.has_key("typeShowNoValue")
sessionShowNoValue = params.has_key("sessionShowNoValue")
self._filterCrit.getField("type").setShowNoValue(typeShowNoValue)
self._filterCrit.getField("session").setShowNoValue(sessionShowNoValue)
def _process( self ):
p = tracks.WPModContribList(self,self._track)
return p.display( filterCrit= self._filterCrit, sortingCrit=self._sortingCrit, order=self._order )
class RHContribsActions:
"""
class to select the action to do with the selected contributions
"""
def process(self, params):
if 'PDF' in params:
return RHContribsToPDF().process(params)
elif 'AUTH' in params:
return RHContribsParticipantList().process(params)
return "no action to do"
class RHContribsToPDF(RHTrackAbstractsBase):
def _checkProtection(self):
RHTrackAbstractsBase._checkProtection(self, False)
def _checkParams( self, params ):
RHTrackAbstractsBase._checkParams( self, params )
self._contribIds = self._normaliseListParam( params.get("contributions", []) )
self._contribs = []
for id in self._contribIds:
self._contribs.append(self._conf.getContributionById(id))
def _process(self):
tz = self._conf.getTimezone()
if not self._contribs:
return "No contributions to print"
pdf = ContribsToPDF(self._conf, self._contribs, tz)
return send_file('Contributions.pdf', pdf.generate(), 'PDF')
class RHContribsParticipantList(RHTrackAbstractsBase):
def _checkProtection( self ):
if len( self._conf.getCoordinatedTracks( self._getUser() ) ) == 0:
RHTrackAbstractsBase._checkProtection( self )
def _checkParams( self, params ):
RHTrackAbstractsBase._checkParams( self, params )
self._contribIds = self._normaliseListParam( params.get("contributions", []) )
self._displayedGroups = self._normaliseListParam( params.get("displayedGroups", []) )
self._clickedGroup = params.get("clickedGroup","")
def _setGroupsToDisplay(self):
if self._clickedGroup in self._displayedGroups:
self._displayedGroups.remove(self._clickedGroup)
else:
self._displayedGroups.append(self._clickedGroup)
def _process( self ):
if not self._contribIds:
return "<table align=\"center\" width=\"100%%\"><tr><td>There are no contributions</td></tr></table>"
speakers = OOBTree()
primaryAuthors = OOBTree()
coAuthors = OOBTree()
speakerEmails = set()
primaryAuthorEmails = set()
coAuthorEmails = set()
self._setGroupsToDisplay()
for contribId in self._contribIds:
contrib = self._conf.getContributionById(contribId)
#Primary authors
for pAut in contrib.getPrimaryAuthorList():
if pAut.getFamilyName().lower().strip() == "" and pAut.getFirstName().lower().strip() == "" and pAut.getEmail().lower().strip() == "":
continue
keyPA = "%s-%s-%s"%(pAut.getFamilyName().lower(), pAut.getFirstName().lower(), pAut.getEmail().lower())
if contrib.isSpeaker(pAut):
speakers[keyPA] = pAut
speakerEmails.add(pAut.getEmail())
primaryAuthors[keyPA] = pAut
primaryAuthorEmails.add(pAut.getEmail())
#Co-authors
for coAut in contrib.getCoAuthorList():
if coAut.getFamilyName().lower().strip() == "" and coAut.getFirstName().lower().strip() == "" and coAut.getEmail().lower().strip() == "":
continue
keyCA = "%s-%s-%s"%(coAut.getFamilyName().lower(), coAut.getFirstName().lower(), coAut.getEmail().lower())
if contrib.isSpeaker(coAut):
speakers[keyCA] = coAut
speakerEmails.add(coAut.getEmail())
coAuthors[keyCA] = coAut
coAuthorEmails.add(coAut.getEmail())
emailList = {"speakers":{},"primaryAuthors":{},"coAuthors":{}}
emailList["speakers"]["tree"] = speakers
emailList["primaryAuthors"]["tree"] = primaryAuthors
emailList["coAuthors"]["tree"] = coAuthors
emailList["speakers"]["emails"] = speakerEmails
emailList["primaryAuthors"]["emails"] = primaryAuthorEmails
emailList["coAuthors"]["emails"] = coAuthorEmails
p = tracks.WPModParticipantList(self, self._target, emailList, self._displayedGroups, self._contribIds )
return p.display()
class RHContribQuickAccess(RHTrackAbstractsBase):
def _checkProtection(self):
RHTrackAbstractsBase._checkProtection(self, False)
def _checkParams(self,params):
RHTrackAbstractsBase._checkParams(self,params)
self._contrib=self._target.getConference().getContributionById(params.get("selContrib",""))
def _process(self):
url=urlHandlers.UHTrackModContribList.getURL(self._target)
if self._contrib is not None:
url=urlHandlers.UHContributionModification.getURL(self._contrib)
self._redirect(url)
|
XeCycle/indico
|
indico/MaKaC/webinterface/rh/trackModif.py
|
Python
|
gpl-3.0
| 39,643 | 0.016195 |
import logging
from functools import partial
from unittest import (
TestCase,
mock,
)
from pcs.common import file_type_codes
from pcs.common.reports import ReportItemSeverity as severity
from pcs.common.reports import codes as report_codes
from pcs.lib.env import LibraryEnvironment
from pcs_test.tools.assertions import assert_raise_library_error
from pcs_test.tools.custom_mock import MockLibraryReportProcessor
from pcs_test.tools.misc import create_patcher
patch_env = create_patcher("pcs.lib.env")
patch_env_object = partial(mock.patch.object, LibraryEnvironment)
class LibraryEnvironmentTest(TestCase):
def setUp(self):
self.mock_logger = mock.MagicMock(logging.Logger)
self.mock_reporter = MockLibraryReportProcessor()
def test_logger(self):
env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
self.assertEqual(self.mock_logger, env.logger)
def test_report_processor(self):
env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
self.assertEqual(self.mock_reporter, env.report_processor)
def test_user_set(self):
user = "testuser"
env = LibraryEnvironment(
self.mock_logger, self.mock_reporter, user_login=user
)
self.assertEqual(user, env.user_login)
def test_user_not_set(self):
env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
self.assertEqual(None, env.user_login)
def test_usergroups_set(self):
groups = ["some", "group"]
env = LibraryEnvironment(
self.mock_logger, self.mock_reporter, user_groups=groups
)
self.assertEqual(groups, env.user_groups)
def test_usergroups_not_set(self):
env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
self.assertEqual([], env.user_groups)
class GhostFileCodes(TestCase):
def setUp(self):
self.mock_logger = mock.MagicMock(logging.Logger)
self.mock_reporter = MockLibraryReportProcessor()
def _fixture_get_env(self, cib_data=None, corosync_conf_data=None):
return LibraryEnvironment(
self.mock_logger,
self.mock_reporter,
cib_data=cib_data,
corosync_conf_data=corosync_conf_data,
)
def test_nothing(self):
self.assertEqual(self._fixture_get_env().ghost_file_codes, [])
def test_corosync(self):
self.assertEqual(
self._fixture_get_env(corosync_conf_data="x").ghost_file_codes,
[file_type_codes.COROSYNC_CONF],
)
def test_cib(self):
self.assertEqual(
self._fixture_get_env(cib_data="x").ghost_file_codes,
[file_type_codes.CIB],
)
def test_all(self):
self.assertEqual(
self._fixture_get_env(
cib_data="x",
corosync_conf_data="x",
).ghost_file_codes,
sorted([file_type_codes.COROSYNC_CONF, file_type_codes.CIB]),
)
@patch_env("CommandRunner")
class CmdRunner(TestCase):
def setUp(self):
self.mock_logger = mock.MagicMock(logging.Logger)
self.mock_reporter = MockLibraryReportProcessor()
def test_no_options(self, mock_runner):
expected_runner = mock.MagicMock()
mock_runner.return_value = expected_runner
env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
runner = env.cmd_runner()
self.assertEqual(expected_runner, runner)
mock_runner.assert_called_once_with(
self.mock_logger,
self.mock_reporter,
{
"LC_ALL": "C",
},
)
def test_user(self, mock_runner):
expected_runner = mock.MagicMock()
mock_runner.return_value = expected_runner
user = "testuser"
env = LibraryEnvironment(
self.mock_logger, self.mock_reporter, user_login=user
)
runner = env.cmd_runner()
self.assertEqual(expected_runner, runner)
mock_runner.assert_called_once_with(
self.mock_logger,
self.mock_reporter,
{
"CIB_user": user,
"LC_ALL": "C",
},
)
@patch_env("create_tmp_cib")
def test_dump_cib_file(self, mock_tmpfile, mock_runner):
tmp_file_name = "a file"
expected_runner = mock.MagicMock()
mock_runner.return_value = expected_runner
mock_instance = mock.MagicMock()
mock_instance.name = tmp_file_name
mock_tmpfile.return_value = mock_instance
env = LibraryEnvironment(
self.mock_logger, self.mock_reporter, cib_data="<cib />"
)
runner = env.cmd_runner()
self.assertEqual(expected_runner, runner)
mock_runner.assert_called_once_with(
self.mock_logger,
self.mock_reporter,
{
"LC_ALL": "C",
"CIB_file": tmp_file_name,
},
)
mock_tmpfile.assert_called_once_with(self.mock_reporter, "<cib />")
@patch_env_object("cmd_runner", lambda self: "runner")
class EnsureValidWait(TestCase):
def setUp(self):
self.create_env = partial(
LibraryEnvironment,
mock.MagicMock(logging.Logger),
MockLibraryReportProcessor(),
)
@property
def env_live(self):
return self.create_env()
@property
def env_fake(self):
return self.create_env(cib_data="<cib/>")
def test_not_raises_if_waiting_false_no_matter_if_env_is_live(self):
self.env_live.ensure_wait_satisfiable(False)
self.env_fake.ensure_wait_satisfiable(False)
def test_raises_when_is_not_live(self):
env = self.env_fake
assert_raise_library_error(
lambda: env.ensure_wait_satisfiable(10),
(
severity.ERROR,
report_codes.WAIT_FOR_IDLE_NOT_LIVE_CLUSTER,
{},
None,
),
)
@patch_env("get_valid_timeout_seconds")
def test_do_checks(self, get_valid_timeout):
timeout = 10
env = self.env_live
get_valid_timeout.return_value = timeout
env.ensure_wait_satisfiable(timeout)
get_valid_timeout.assert_called_once_with(timeout)
|
tomjelinek/pcs
|
pcs_test/tier0/lib/test_env.py
|
Python
|
gpl-2.0
| 6,324 | 0 |
#!/usr/bin/python3
try:
import qr_tools as qrTools # Module for this project
except:
import gen_3dwallet.qr_tools as qrTools
try:
import TextGenerator as textGen # Module for this project
except:
import gen_3dwallet.TextGenerator as textGen
import bitcoin # sudo pip3 install bitcoin
import argparse
import time
import math
import sys
import os
import distutils.spawn
def parse_args():
parser = argparse.ArgumentParser(description='Generate an STL file of a 3D-printable bitcoin, litecoin, dogecoin, or other type of coin.', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-ve', '--version', dest='versionByte', type=int, default=0, help='Version Bit of the address (for other altcoins).\nBitcoin: 0 (Default)\n Litecoin: 48\n Dogecoin: 30')
parser.add_argument('-ct', '--coin-title', dest='coinTitle', type=str, default="Bitcoin", help='Title of the coin, used for design purposes \n(Default: Bitcoin)')
parser.add_argument('-ls', '--layout-style', dest='layoutStyle', type=int, default=1, help="Layout style of the wallet.\n1) Address on the Front, Private Key on the Back (Default)\n2) Private Key Only\n3) Address Only (don't forget to export the Private Keys after)")
parser.add_argument('-wi', '--width', dest='walletWidth', type=float, default=54.0, help='The width of the wallet in mm. The length is calculated automatically. Default option is approximately standard credit card legnth and width. \n(Default: 54.0)')
parser.add_argument('-he', '--height', dest='walletHeight', type=float, default=8.0, help='The height of the wallet in mm. \n(Default: 8)')
parser.add_argument('-bo', '--black-offset', dest='blackOffset', type=int, default=-30, help='The percentage of the height that the black part of the QR code, and the text, will be raised or lowered by.\nNegative number for lowered, positive for raised. Option must be greater than -90. \n(Default: -20)')
parser.add_argument('-ec', '--qr-error-correction', dest='errorCorrection', type=str, default="M", help='The percentage of the QR codes that can be destroyed before they are irrecoverable\nL) 7 percent\nM) 15 percent (Default)\nQ) 25 percent\nH) 30 percent')
parser.add_argument('-dc', '--disable-round-corners', dest='roundCorners', action='store_false', help="Round the coners (four short edges) of the wallet. \n(Default: disabled)")
parser.add_argument('-co', '--copies', dest='copies', type=int, default=5, help='The number of wallets to generate. These will all be unique and randomly-generate wallets (not copies). \n(Default: 5)')
parser.add_argument('-sd', '--openscad-exe', dest='scadExe', type=str, default="openscad", help='The location and filename of the command line tools for OpenSCAD (leave as default if it is installed as a command [ie. Linux])\nIn most cases on Windows and Mac, the executable will be found automatically.')
parser.add_argument('-o', '--stl-folder', dest='outputSTLFolder', type=str, default="./WalletsOut/", help='The output folder to export the STL files into\n(Default: ./WalletsOut/)')
parser.add_argument('-oc', '--scad-folder', dest='outputSCADFolder', type=str, default='', help='The output folder to store the SCAD generation files in (optional, only used for debugging)\n(Default: disabled)')
parser.add_argument('-ea', '--export-address-csv', dest='exportAddressCSV', type=str, default='', help='The output CSV file to export the address list to (optional)\n(Default: disabled)')
parser.add_argument('-ep', '--export-privkey-csv', dest='exportPrivkeyCSV', type=str, default='', help='The output CSV file to export the private key list to (optional)\n(Default: disabled)')
parser.add_argument('-eap', '--export-address-privkey-csv', dest='exportAPCSV', type=str, default='', help='The output CSV file to export the address and private key list to, in the format of "address,privkey" (optional)\n(Default: disabled)')
parser.add_argument('-epa', '--export-privkey-address-csv', dest='exportPACSV', type=str, default='', help='The output CSV file to export the address and private key list to, in the format of "privkey,address" (optional)\n(Default: disabled)')
return parser.parse_args()
def main():
args = parse_args()
# Set DEBUG variable for testing purposes (changing styling)
# If true, prints the SCAD to the terminal and then breaks after first generation
DEBUG = False
# Generate the addresses
if args.copies < 1:
print("Please enter a valid number of copies (-co flag), and try again.")
sys.exit()
else: # Use an else statement here just in case we add the option to import a CSV file with the keys (generated somewhere else)
walletDataList = []
for i in range(args.copies):
thisData = {}
# Generate the addresses with keys
thisData["privateKey"] = bitcoin.main.random_key() # Secure: uses random library, time library and proprietary function
thisData["wif"] = bitcoin.encode_privkey(thisData["privateKey"], "wif", args.versionByte)
thisData["address"] = bitcoin.privkey_to_address(thisData["privateKey"], args.versionByte)
# Generate the QR codes
if args.errorCorrection.upper() not in ["L","M","Q","H"]:
print("Please select a valid QR Error Correction value (L, M, Q, or H).")
sys.exit()
thisData["wifQR"] = qrTools.getQRArray(thisData["wif"], args.errorCorrection.upper())
thisData["addressQR"] = qrTools.getQRArray(thisData["address"], args.errorCorrection.upper())
# Reverse them or else they appear backwards (unknown reason)
thisData["wifQR"] = list(reversed(thisData["wifQR"]))
thisData["addressQR"] = list(reversed(thisData["addressQR"]))
# Append ALL the wallet information, just in case we want to do something with it later
walletDataList.append(thisData)
# Validate other args and set some constants
walletWidth = args.walletWidth
walletHeight = args.walletHeight
if args.layoutStyle == 1 or args.layoutStyle == 2 or args.layoutStyle == 3:
walletLength = walletWidth*1.6 # Approximately the same ratio as a credit card
else:
print("Please choose a valid layout style option.")
sys.exit()
if args.blackOffset < -90.0:
print("Please ensure that --black-offset (-bo flag) is set correctly, and is greater than -90.")
sys.exit()
textDepth = (args.blackOffset/100) * walletHeight
# Check the openscad command
scadExe = args.scadExe
if args.scadExe == "openscad" and not distutils.spawn.find_executable("openscad"):
if os.path.isfile("/Applications/OpenSCAD.app/Contents/MacOS/OpenSCAD"):
print("Info: OpenSCAD found in Applications folder on Mac")
scadExe = "/Applications/OpenSCAD.app/Contents/MacOS/OpenSCAD"
elif os.path.isfile("%PROGRAMFILES%\OpenSCAD\openscad.exe"):
print("Info: OpenSCAD found in Program Files on Windows")
scadExe = "%PROGRAMFILES%\OpenSCAD\openscad.exe"
elif os.path.isfile("%PROGRAMFILES(x86)%\OpenSCAD\openscad.exe"):
print("Info: OpenSCAD found in Program Files (x86) on Windows")
scadExe = "%PROGRAMFILES(x86)%\OpenSCAD\openscad.exe"
if not distutils.spawn.find_executable(scadExe):
print("Please install OpenSCAD or specify the location of it with --openscad-exe.")
sys.exit()
# Set the master SCAD variable
masterSCAD = "// SCAD Code Generated By 3DGen.py - 3D Wallet Generator\n\n" # The beginning of the wallet are identical
scadOutputs = [] # Generated from loop for each wallet (different addresses)
# Include some modules at the beginning
masterSCAD += "// Import some modules\n"
masterSCAD += """
$fn=100;
module createMeniscus(h,radius)difference(){translate([radius/2+0.1,radius/2+0.1,0]){cube([radius+0.2,radius+0.1,h+0.2],center=true);}cylinder(h=h+0.2,r=radius,center=true);}
module roundCornersCube(x,y,z)translate([x/2,y/2,z/2]){difference(){r=((x+y)/2)*0.052;cube([x,y,z],center=true);translate([x/2-r,y/2-r]){rotate(0){createMeniscus(z,r);}}translate([-x/2+r,y/2-r]){rotate(90){createMeniscus(z,r);}}translate([-x/2+r,-y/2+r]){rotate(180){createMeniscus(z,r);}}translate([x/2-r,-y/2+r]){rotate(270){createMeniscus(z,r);}}}}
""" # The rounding corners modules for creating a rounded rectangle
masterSCAD += "\n"
# Draw the main prism
if args.roundCorners:
mainCube = "roundCornersCube(" + str(walletLength) + "," + str(walletWidth) + "," + str(walletHeight) + ");"
else:
mainCube = "cube([" + str(walletLength) + "," + str(walletWidth) + "," + str(walletHeight) + "]);"
mainCube += "\n\n"
# Init a variable to keep all the additive/subtractive parts
finalParts = []
# Init variables to keep the CSV output data in
addressOut = []
privkeyOut = []
APOut = []
PAOut = []
# Set a counter for naming the files
filenameCounter = 1
# Break into the loop for each wallet
for data in walletDataList:
# 'data' = wif, address, wifQR, addressQR
# Generate the texts
addressLine1 = data["address"][:math.ceil(len(data["address"])/2.0)]
addressLine2 = data["address"][math.ceil(len(data["address"])/2.0):]
wifLine1 = data["wif"][:17]
wifLine2 = data["wif"][17:34]
wifLine3 = data["wif"][34:]
addressLine1Dots = textGen.getArray(addressLine1)
addressLine2Dots = textGen.getArray(addressLine2)
privkeyLine1Dots = textGen.getArray(wifLine1)
privkeyLine2Dots = textGen.getArray(wifLine2)
privkeyLine3Dots = textGen.getArray(wifLine3)
bigTitle = textGen.getArray("3D " + args.coinTitle + " Wallet")
addressTitle = textGen.getArray("Address")
privkeyTitle = textGen.getArray("Private Key")
# Create the big title union so that it can be sized and moved
bigTitleUnion = ""
for rowIndex in range(len(bigTitle)):
row = bigTitle[rowIndex]
for colIndex in range(len(row)):
if row[colIndex] == '1':
translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth
bigTitleUnion += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight))
# Translate the title to where it goes
bigTitleFinal = "translate([(1/17)*length,(14/17)*width,0]){resize([(15/17)*length,0,0],auto=[true,true,false]){bigTitleUnion}}".replace('length',str(walletLength)).replace('width',str(walletWidth)).replace('bigTitleUnion',bigTitleUnion)
finalParts.append(bigTitleFinal+"\n\n")
if args.layoutStyle == 1:
# Need to copy it on to the backside as well - rotate then move it, and then create a union of the two titles (front and back)
bigTitle2 = "translate([length,0,height]){rotate(180,v=[0,1,0]){bigTitleFinal}}".replace('length',str(walletLength)).replace('height',str(walletHeight)).replace('bigTitleFinal',bigTitleFinal).replace('translateHeight',str(translateHeight))
finalParts.append(bigTitle2+"\n\n")
# Draw the word "Address" on the front, and draw on the actual address
if args.layoutStyle == 1 or args.layoutStyle == 3:
# Draw the address on the front
addressParts = []
# Create the address title union and size/move it
addressTitleUnion = "union(){"
for rowIndex in range(len(addressTitle)):
row = addressTitle[rowIndex]
for colIndex in range(len(row)):
if row[colIndex] == '1':
translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth
addressTitleUnion += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight))
addressTitleUnion += "}"
addressTitleFinal = "translate([(10/17)*length,(6/11)*width,0]){resize([0,(4/55)*width,0],auto=[true,true,false]){addressTitleUnion}}\n\n".replace('length',str(walletLength)).replace('width',str(walletWidth)).replace('addressTitleUnion',addressTitleUnion)
addressParts.append(addressTitleFinal)
# Create the first line of the address
addressLine1Union = "union(){"
for rowIndex in range(len(addressLine1Dots)):
row = addressLine1Dots[rowIndex]
for colIndex in range(len(row)):
if row[colIndex] == '1':
translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth
addressLine1Union += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight))
addressLine1Union += "}"
addressLine1Final = "translate([(8.2/17)*length,(5/11)*width,0]){resize([0,(3/55)*width,0],auto=[true,true,false]){addressLine1Union}}\n\n".replace('length',str(walletLength)).replace('width',str(walletWidth)).replace('addressLine1Union',addressLine1Union)
addressParts.append(addressLine1Final)
# Create the second line of the address
addressLine2Union = "union(){"
for rowIndex in range(len(addressLine2Dots)):
row = addressLine2Dots[rowIndex]
for colIndex in range(len(row)):
if row[colIndex] == '1':
translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth
addressLine2Union += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight))
addressLine2Union += "}"
addressLine2Final = "translate([(8.2/17)*length,(4.1/11)*width,0]){resize([0,(3/55)*width,0],auto=[true,true,false]){addressLine2Union}}\n\n".replace('length',str(walletLength)).replace('width',str(walletWidth)).replace('addressLine2Union',addressLine2Union)
addressParts.append(addressLine2Final)
# Create the QR code
addressQRUnion = "union(){"
for rowIndex in range(len(data["addressQR"])):
row = data["addressQR"][rowIndex]
for colIndex in range(len(row)):
if row[colIndex] == 0:
translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth
addressQRUnion += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight))
addressQRUnion += "}"
addressQRFinal = "translate([(0.6/17)*length,(0.6/11)*width,0]){resize([0,(8/12)*width,0],auto=[true,true,false]){addressQRUnion}}\n\n".replace('length',str(walletLength)).replace('width',str(walletWidth)).replace('addressQRUnion',addressQRUnion)
addressParts.append(addressQRFinal)
finalParts.extend(addressParts)
# Draw all the things having to do with the private key
if args.layoutStyle == 1 or args.layoutStyle == 2:
privkeyParts = []
# Create the privkey title union and size/move it
privkeyTitleUnion = "union(){"
for rowIndex in range(len(privkeyTitle)):
row = privkeyTitle[rowIndex]
for colIndex in range(len(row)):
if row[colIndex] == '1':
translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth
privkeyTitleUnion += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight))
privkeyTitleUnion += "}"
privkeyTitleFinal = "translate([(8.7/17)*length,(7/11)*width,0]){resize([0,(4/55)*width,0],auto=[true,true,false]){privkeyTitleUnion}}\n\n".replace('length',str(walletLength)).replace('width',str(walletWidth)).replace('privkeyTitleUnion',privkeyTitleUnion)
privkeyParts.append(privkeyTitleFinal)
# Create the first line of the privkey
privkeyLine1Union = "union(){"
for rowIndex in range(len(privkeyLine1Dots)):
row = privkeyLine1Dots[rowIndex]
for colIndex in range(len(row)):
if row[colIndex] == '1':
translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth
privkeyLine1Union += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight))
privkeyLine1Union += "}"
privkeyLine1Final = "translate([(8.2/17)*length,(6/11)*width,0]){resize([0,(3/55)*width,0],auto=[true,true,false]){privkeyLine1Union}}\n\n".replace('length',str(walletLength)).replace('width',str(walletWidth)).replace('privkeyLine1Union',privkeyLine1Union)
privkeyParts.append(privkeyLine1Final)
# Create the second line of the privkey
privkeyLine2Union = "union(){"
for rowIndex in range(len(privkeyLine2Dots)):
row = privkeyLine2Dots[rowIndex]
for colIndex in range(len(row)):
if row[colIndex] == '1':
translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth
privkeyLine2Union += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight))
privkeyLine2Union += "}"
privkeyLine2Final = "translate([(8.2/17)*length,(5.1/11)*width,0]){resize([0,(3/55)*width,0],auto=[true,true,false]){privkeyLine2Union}}\n\n".replace('length',str(walletLength)).replace('width',str(walletWidth)).replace('privkeyLine2Union',privkeyLine2Union)
privkeyParts.append(privkeyLine2Final)
# Create the third line of the privkey
privkeyLine3Union = "union(){"
for rowIndex in range(len(privkeyLine3Dots)):
row = privkeyLine3Dots[rowIndex]
for colIndex in range(len(row)):
if row[colIndex] == '1':
translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth
privkeyLine3Union += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight))
privkeyLine3Union += "}"
privkeyLine3Final = "translate([(8.2/17)*length,(4.2/11)*width,0]){resize([0,(3/55)*width,0],auto=[true,true,false]){privkeyLine3Union}}\n\n".replace('length',str(walletLength)).replace('width',str(walletWidth)).replace('privkeyLine3Union',privkeyLine3Union)
privkeyParts.append(privkeyLine3Final)
# Create the QR code
privkeyQRUnion = "union(){"
for rowIndex in range(len(data["wifQR"])):
row = data["wifQR"][rowIndex]
for colIndex in range(len(row)):
if row[colIndex] == 0:
translateHeight = walletHeight if textDepth>0 else walletHeight+textDepth
privkeyQRUnion += "translate([colIndex,rowIndex,translateHeight]){cube([1,1,textDepth]);}".replace('colIndex',str(colIndex)).replace('rowIndex',str(rowIndex)).replace('textDepth',str(abs(textDepth))).replace('translateHeight',str(translateHeight))
privkeyQRUnion += "}"
privkeyQRFinal = "translate([(0.6/17)*length,(0.6/11)*width,0]){resize([0,(8/12)*width,0],auto=[true,true,false]){privkeyQRUnion}}\n\n".replace('length',str(walletLength)).replace('width',str(walletWidth)).replace('privkeyQRUnion',privkeyQRUnion)
privkeyParts.append(privkeyQRFinal)
if args.layoutStyle == 2:
# Just add it all to the finalParts
finalParts.extend(privkeyParts)
elif args.layoutStyle == 1:
# Rotate it all and then add it to the finalParts
privkeyPartsNew = []
for part in privkeyParts:
privkeyPartsNew.append("translate([length,0,height]){rotate(180,v=[0,1,0]){part}}".replace('length',str(walletLength)).replace('height',str(walletHeight)).replace('part',part).replace('translateHeight',str(translateHeight)))
finalParts.extend(privkeyPartsNew)
# Put it all together
finalSCAD = masterSCAD
if textDepth < 0:
finalSCAD += "difference() {\n\n"
else:
finalSCAD += "union() {\n\n"
finalSCAD += mainCube
finalSCAD += "".join(finalParts)
finalSCAD += "}"
if DEBUG:
print(finalSCAD)
break
if args.outputSCADFolder:
try:
os.makedirs(args.outputSCADFolder)
except FileExistsError:
pass
scadOutFile = open(args.outputSCADFolder + '/wallet' + str(filenameCounter) + '.scad','w')
scadOutFile.write(finalSCAD)
scadOutFile.close()
# Log some info
print("Status: Done generating data for wallet #" + str(filenameCounter) + "...Starting generating STL file")
if args.outputSTLFolder:
try:
os.makedirs(args.outputSTLFolder)
except FileExistsError:
pass
scadOutFile = open('temp.scad','w')
scadOutFile.write(finalSCAD)
scadOutFile.close()
os.system(scadExe + " -o " + args.outputSTLFolder + "/wallet" + str(filenameCounter) + ".stl temp.scad")
try:
os.remove('temp.scad')
except:
pass
else:
print("Please provide a folder to output the STL files.")
# Update the CSV file variables
addressOut.append(data["address"])
privkeyOut.append(data["wif"])
APOut.append(data["address"] + "," + data["wif"])
PAOut.append(data["wif"] + "," + data["address"])
# Print some more stats
print("Status: Done generating STL file (" + str(round(filenameCounter/args.copies*100)) + "% done)")
filenameCounter += 1
# Export the CSV files
if args.exportAddressCSV:
csvFile = open(args.exportAddressCSV,'a')
csvFile.write(','.join(addressOut))
csvFile.close()
if args.exportPrivkeyCSV:
csvFile = open(args.exportPrivkeyCSV,'a')
csvFile.write(','.join(privkeyOut))
csvFile.close()
if args.exportAPCSV:
csvFile = open(args.exportAPCSV,'a')
csvFile.write('\n'.join(exportAPCSV))
csvFile.close()
if args.exportPACSV:
csvFile = open(args.exportPACSV,'a')
csvFile.write('\n'.join(exportPACSV))
csvFile.close()
|
btcspry/3d-wallet-generator
|
gen_3dwallet/base.py
|
Python
|
mit
| 24,069 | 0.008226 |
def test(options, buildout):
from subprocess import Popen, PIPE
import os
import sys
python = options['python']
if not os.path.exists(python):
raise IOError("There is no file at %s" % python)
if sys.platform == 'darwin':
output = Popen([python, "-c", "import platform; print (platform.mac_ver())"], stdout=PIPE).communicate()[0]
if not output.startswith("('10."):
raise IOError("Your python at %s doesn't return proper data for platform.mac_ver(), got: %s" % (python, output))
elif sys.platform == 'linux2' and (2, 4) <= sys.version_info < (2, 5):
output = Popen([python, "-c", "import socket; print (hasattr(socket, 'ssl'))"], stdout=PIPE).communicate()[0]
if not output.startswith("True"):
raise IOError("Your python at %s doesn't have ssl support, got: %s" % (python, output))
|
upiq/plonebuild
|
python/src/test-python.py
|
Python
|
mit
| 873 | 0.004582 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Python interface to the Chandra Data Archive (CDA) web services and an
interface to a local disk copy of the Observation Catalog (Ocat).
"""
from pathlib import Path
import re
import warnings
import time
import requests
import numpy as np
import tables
from astropy.table import Table, MaskedColumn
from astropy.coordinates import SkyCoord
from mica.common import MICA_ARCHIVE
__all__ = ['get_archive_file_list', 'get_proposal_abstract',
'get_ocat_web', 'get_ocat_local']
OCAT_TABLE_PATH = Path(MICA_ARCHIVE) / 'ocat_target_table.h5'
OCAT_TABLE_CACHE = {}
URL_CDA_SERVICES = "https://cda.harvard.edu/srservices"
CDA_SERVICES = {
'prop_abstract': 'propAbstract',
'ocat_summary': 'ocatList',
'ocat_details': 'ocatDetails',
'archive_file_list': 'archiveFileList'}
# Units copied from https://github.com/jzuhone/pycda/blob/
# 5a4261328eab989bab91bed17f426ad17d876988/pycda/obscat.py#L38
OCAT_UNITS = {
"app_exp": "ks",
"count_rate": "s**-1",
"est_cnt_rate": "s**-1",
"evfil_lo": "keV",
"evfil_ra": "keV",
"exp_time": "ks",
"f_time": "s",
"forder_cnt_rate": "s**-1",
"soe_roll": "degree",
"x_sim": "mm",
"y_off": "arcmin",
"z_off": "arcmin",
"z_sim": "mm",
}
RETURN_TYPE_DOCS = """If ``return_type='auto'`` the return type is determined by the rules:
- If ``obsid`` is provided
AND the obsid corresponds to an integer
AND the returned result has a single row
THEN the return type is ``dict``
ELSE the return tuple is a ``Table``.
If ``return_type='table'`` then always return a ``Table``."""
CDA_PARAM_DOCS = """Additional function args for CDA search parameters::
instrument=ACIS,ACIS-I,ACIS-S,HRC,HRC-I,HRC-S
grating=NONE,LETG,HETG
type=ER,TOO,CAL,GO,GTO,DDT
cycle=00,01,02,03,04, ...
category=SOLAR SYSTEM,
NORMAL GALAXIES,
STARS AND WD,
WD BINARIES AND CV,
BH AND NS BINARIES,
NORMAL GALAXIES
CLUSTERS OF GALAXIES,
ACTIVE GALAXIES AND QUASARS,
GALACTIC DIFFUSE EMISSION AND SURVEYS,
EXTRAGALACTIC DIFFUSE EMISSION AND SURVEYS
jointObservation= HST,XMM,Spitzer,NOAO,NRAO,NuSTAR,Suzaku,Swift,RXTE
status= archived,observed,scheduled, unobserved,untriggered,canceled,deleted
expMode= ACIS TE,ACIS CC,HRC Timing
grid = 'is not null' or 'is null'
Input coordinate specifications::
inputCoordFrame=J2000 (other options: b1950, bxxxx, ec1950, ecxxxx, galactic)
inputCoordEquinox=2000 (4 digit year)
These parameters are single text entries::
target: matches any part of target name
piName: matches any part of PI name
observer: matches any part of observer name
propNum: proposal number
propTitle: matches any part of proposal title
These parameters form a cone search; if you use one you should use them all::
lon
lat
radius (arcmin, default=1.0)
These parameters form a box search; one lon & one lat are required.
Open-ended ranges are allowed. (Such as lonMin=15 with no lonMax.)
::
lonMin
lonMax
latMin
latMax
These parameters are range lists, where the range is indicated by a hyphen (-).
Multiple ranges can be entered separated by commas::
obsid (eg. obsid=100,200-300,600-1000,1800)
seqNum
expTime
appExpTime
countRate
These parameters are date range lists, where the range is
indicated by a hyphen (/). Multiple ranges can be entered separated
by commas. Valid dates are in one of the following formats:
YYYY-MM-DD, YYYY-MM-DD hh:mm, or YYYY-MM-DD hh:mm:ss
::
startDate
releaseDate
These specify how the data is displayed and ordered::
outputCoordFrame=J2000 (other options: b1950, bxxxx, ec1950, ecxxxx, galactic)
outputCoordEquinox=2000 (4 digit year)
outputCoordUnits=decimal (other option: sexagesimal)
sortColumn=ra (other options:
dec,seqNum,instrument,grating,
appExpTime,expTime,
target,piName,observer,status,
startDate,releaseDate,
obsid,propNum,category,type,cycle)
sortOrder=ascending (other option: descending)
maxResults=# (the number of results after which to stop displaying)
Special parameters that change the output table contents are available for
full output (``summary=False``):
- ``acisWindows='true'``: return ACIS windows details for a single obsid
- ``rollReqs='true'``: return roll requirements for a single obsid
- ``timeReqs='true'``: return time requirements for a single obsid
"""
COMMON_PARAM_DOCS = """:param target_name: str, optional
Target name, used in SkyCoord.from_name() to define ``ra`` and ``dec``
if ``resolve_name`` is True, otherwise matches a substring of the
table column ``target_name`` (ignoring spaces).
:param resolve_name: bool, optional
If True, use ``target_name`` to resolve ``ra`` and ``dec``.
:param ra: float, optional
Right Ascension in decimal degrees
:param dec: float, optional
Declination in decimal degrees
:param radius: float, optional
Search radius in arcmin (default=1.0)"""
def html_to_text(html):
from bs4 import BeautifulSoup
soup = BeautifulSoup(html, features='lxml')
text = soup.get_text()
text = re.sub(r'\n+', '\n', text)
return text
def clean_text(text):
out = text.encode('ascii', errors='ignore').decode('ascii')
out = out.replace('\n', ' ').replace('\r', '').strip()
return out
def get_archive_file_list(obsid, detector, level, dataset='flight', **params):
"""
Get list of archive files for given ``obsid``, ``detector``, ``level``, and ``dataset``.
Other parameters can be ``subdetector``, ``filetype``, ``filename``, and ``obi``.
Note: this may not be working for level 0 products.
Examples::
>>> get_archive_file_list(obsid=2365, detector='pcad',
... subdetector='aca', level=1, obi=2)
<Table length=27>
Filename Filesize Timestamp
str30 int64 str19
------------------------------ -------- -------------------
pcadf126690624N007_asol1.fits 7300800 2021-04-09 08:04:29
pcadf02365_002N001_asol1.fits 4728960 2021-04-09 08:04:30
... ... ...
pcadf126695890N007_adat61.fits 1293120 2021-04-09 08:04:28
pcadf126695890N007_adat71.fits 1293120 2021-04-09 08:04:28
>>> get_archive_file_list(obsid=400, detector='acis', level=2, filetype='evt2')
<Table length=1>
Filename Filesize Timestamp
str24 int64 str19
------------------------ -------- -------------------
acisf00400N007_evt2.fits 4619520 2011-07-08 13:52:57
:param obsid: int, str
Observation ID
:param detector: str
Detector name (e.g. 'pcad', 'acis')
:param level: int, float, str
Level name (e.g. 0, 0.5, 1, 1.5, 2, 3)
:param dataset: str
Dataset name (default='flight')
:param **params: dict
Additional parameters to filter query (subdetector, filetype, obi, filename)
:return: astropy Table
Table of archive files
"""
params['dataset'] = dataset
params['detector'] = detector
params['level'] = level
params['obsid'] = obsid
text = _get_cda_service_text('archive_file_list', **params)
dat = Table.read(text.splitlines(), format='ascii.basic', delimiter='\t', guess=False)
# Original Filesize has commas for the thousands like 11,233,456
filesize = [int(x.replace(',', '')) for x in dat['Filesize']]
dat['Filesize'] = filesize
return dat
def get_proposal_abstract(obsid=None, propnum=None, timeout=60):
"""Get a proposal abstract from the CDA services.
One of ``obsid`` or ``propnum`` must be provided.
:param obsid: int, str
Observation ID
:param propnum: str
Proposal number, including leading zeros e.g. '08900073'
:param timeout: int, float
Timeout in seconds for the request
:returns: dict
Dictionary of proposal abstract
"""
params = {}
if obsid is not None:
params['obsid'] = obsid
if propnum is not None:
params['propNum'] = propnum
if not params:
raise ValueError('must provide obsid or propnum')
html = _get_cda_service_text('prop_abstract', timeout=timeout, **params)
text = html_to_text(html)
# Return value is a text string with these section header lines. Use them
# to split the text into sections.
delims = ['Proposal Title',
'Proposal Number',
'Principal Investigator',
'Abstract',
'']
out = {}
for delim0, delim1 in zip(delims[:-1], delims[1:]):
name = '_'.join(word.lower() for word in delim0.split())
if match := re.search(rf'{delim0}:(.+){delim1}:', text, re.DOTALL):
out[name] = clean_text(match.group(1))
else:
warnings.warn(f'failed to find {delim0} in result')
return out
def _update_params_from_kwargs(params, obsid,
target_name, resolve_name,
ra, dec, radius):
"""Update params dict for CDA Ocat queries from specified keyword args.
"""
if obsid is not None:
params['obsid'] = obsid
if ra is not None:
params['ra'] = ra
if dec is not None:
params['dec'] = dec
if target_name is not None:
if resolve_name:
coord = SkyCoord.from_name(target_name)
params['ra'] = coord.ra.deg
params['dec'] = coord.dec.deg
else:
# SR services API uses "target" to select substrings of target_name
params['target'] = target_name
# For any positional search include the radius
if 'ra' in params and 'dec' in params:
params['radius'] = radius
return params
def get_ocat_web(obsid=None, *, summary=False,
target_name=None, resolve_name=False,
ra=None, dec=None, radius=1.0,
return_type='auto',
timeout=60, **params):
"""
Get the Ocat target table data from Chandra Data Archive web services.
{RETURN_TYPE_DOCS}
{CDA_PARAM_DOCS}
:param obsid: int, str
Observation ID or string with ObsId range or list of ObsIds
:param summary: bool
Return summary data (26 columns) instead of full data (124 columns)
{COMMON_PARAM_DOCS}
:param timeout: int, float
Timeout in seconds for the request (default=60)
:param return_type: str
Return type (default='auto' => Table or dict)
:param **params: dict
Parameters passed to CDA web service
:return: astropy Table or dict of the observation details
"""
# These special params change the returned data and should always be a table
if set(['acisWindows', 'rollReqs', 'timeReqs']) & set(params):
return_type = 'table'
if return_type not in ('auto', 'table'):
raise ValueError(f"invalid return_type {return_type!r}, must be 'auto' or 'table'")
_update_params_from_kwargs(params, obsid,
target_name, resolve_name,
ra, dec, radius)
params['format'] = 'text'
# Force RA, Dec in sexagesimal because decimal returns only 3 decimal digits
# which is insufficient.
params['outputCoordUnits'] = 'sexagesimal'
service = 'ocat_summary' if summary else 'ocat_details'
text = _get_cda_service_text(service, timeout=timeout, **params)
dat = _get_table_or_dict_from_cda_rdb_text(text, return_type, params.get('obsid'))
if dat is None:
# Query returned no rows. If a single obsid was specified with return_type
# of 'auto' then we would have expected to return a dict, but instead
# raise a ValueError. Otherwise we return an empty table with the right
# column names.
if return_type == 'auto' and _is_int(params.get('obsid')):
raise ValueError(f"failed to find obsid {params['obsid']}")
else:
dat = get_ocat_web(summary=summary, return_type='table', obsid=8000)[0:0]
# Change RA, Dec to decimal if those columns exist
try:
ra, dec = dat['ra'], dat['dec']
except KeyError:
pass
else:
sc = SkyCoord(ra, dec, unit='hr,deg')
dat['ra'] = sc.ra.deg
dat['dec'] = sc.dec.deg
return dat
get_ocat_web.__doc__ = get_ocat_web.__doc__.format(
RETURN_TYPE_DOCS=RETURN_TYPE_DOCS,
CDA_PARAM_DOCS=CDA_PARAM_DOCS,
COMMON_PARAM_DOCS=COMMON_PARAM_DOCS)
def _get_cda_service_text(service, timeout=60, **params):
"""
Fetch all observation details from one of the CDA SRService pages
:param service: str
Name of the service ('prop_abstract', 'ocat_summary', 'ocat_details', 'archive_file_list')
:param timeout: int, float
Timeout in seconds for the request
:param **params: dict
Additional parameters to pass to the service
:return: str
Returned text from the service
"""
if service not in CDA_SERVICES:
raise ValueError(f'unknown service {service!r}, must be one of {list(CDA_SERVICES)}')
# Query the service and check for errors
url = f'{URL_CDA_SERVICES}/{CDA_SERVICES[service]}.do'
verbose = params.pop('verbose', False)
resp = requests.get(url, timeout=timeout, params=params)
if verbose:
print(f'GET {resp.url}')
if not resp.ok:
raise RuntimeError(f'got error {resp.status_code} for {resp.url}\n'
f'{html_to_text(resp.text)}')
return resp.text
def _get_table_or_dict_from_cda_rdb_text(text, return_type, obsid):
"""Get astropy Table or dict from the quasi-RDB text returned by the CDA services.
:param text: str
Text returned by the CDA services for a format='text' query
:param return_type: str
Return type (default='auto' => Table or dict)
:param obsid: int, str, None
Observation ID if provided
:return: astropy Table, dict, None
Table of the returned data, or dict if just one obsid selected, or None
if the query returned no data.
"""
lines = text.splitlines()
# Convert the type line to standard RDB
# First find the line that begins the column descriptions
for i, line in enumerate(lines):
if not line.startswith('#'):
header_start = i
break
else:
return None
# The line with the lengths and types is next (header_start + 1)
ctypes = lines[header_start + 1].split("\t")
# Munge length descriptions back to standard RDB (from "20" to "20S" etc)
# while leaving the numeric types alone ("20N" stays "20N").
for j, ctype in enumerate(ctypes):
if not ctype.endswith("N"):
ctypes[j] = ctype + "S"
lines[header_start + 1] = "\t".join(ctypes)
dat = Table.read(lines, format='ascii.rdb', guess=False)
# Fix the column names
# Transform summary names to corresponding detail names.
trans = {'obs_id': 'obsid',
'grating': 'grat', 'instrument': 'instr', 'appr_exp': 'app_exp',
'exposure': 'exp_time', 'data_mode': 'datamode', 'exp_mode': 'mode',
'avg_cnt_rate': 'count_rate', 'public_release_date': 'public_avail',
'proposal_num': 'pr_num', 'science_category': 'category',
'alternate_group': 'alt_group', 'appr._triggers': 'alt_trig'}
names = (name.lower() for name in dat.colnames)
names = (name.replace(' (ks)', '') for name in names)
names = (name.replace(' ', '_') for name in names)
names = [trans.get(name, name) for name in names]
dat.rename_columns(dat.colnames, names)
# Apply units to the columns
for name, col in dat.columns.items():
if name in OCAT_UNITS:
col.info.unit = OCAT_UNITS[name]
# Possibly get just the first row as a dict
dat = _get_table_or_dict(return_type, obsid, dat)
return dat
def _is_int(val):
"""Check if a value looks like an integet."""
try:
return int(val) == float(val)
except (ValueError, TypeError):
return False
def _get_table_or_dict(return_type, obsid, dat):
# If obsid is a single integer and there was just one row then return the
# row as a dict.
if return_type not in ('auto', 'table'):
raise ValueError(f"invalid return_type {return_type!r}, must be 'auto' or 'table'")
if return_type == 'auto' and _is_int(obsid):
if len(dat) == 1:
dat = dict(dat[0])
else:
raise ValueError(f"failed to find obsid {obsid}")
return dat
def main_update_ocat_local():
"""
Command line interface to write a local Ocat HDF5 file.
This overwrites the file from scratch each time.
"""
import argparse
from ska_helpers.retry import retry_call
parser = argparse.ArgumentParser(
description="Update target table")
parser.add_argument("--datafile",
default='ocat_target_table.h5')
opt = parser.parse_args()
retry_call(update_ocat_local, [opt.datafile], {"timeout": 120},
tries=3, delay=1)
def update_ocat_local(datafile, **params):
"""Write HDF5 ``datafile`` with the Ocat "details" data.
:param **params: dict
Parameters to filter ``get_ocat_details_web`` query
"""
dat = get_ocat_web(**params)
# Encode unicode strings to bytes manually. Fixed in numpy 1.20.
# Eventually we will want just dat.convert_bytestring_to_unicode().
for name, col in dat.columns.items():
if col.info.dtype.kind == 'U':
dat[name] = np.char.encode(col, 'utf-8')
dat.write(datafile, path='data', serialize_meta=True, overwrite=True,
format='hdf5', compression=True)
def get_ocat_local(obsid=None, *,
target_name=None, resolve_name=False,
ra=None, dec=None, radius=1.0,
return_type='auto',
datafile=None, where=None, **params):
"""
Get Ocat target table from a local HDF5 data file.
The local data file is assumed to be an HDF5 file that contains a copy of
the Ocat details, typically updated by a cron job running on HEAD and
(if necessary) synced to the local host.
{RETURN_TYPE_DOCS}
:param obsid: int, optional
Observation ID
{COMMON_PARAM_DOCS}
:param datafile: str, optional
HDF5 Ocat target table data file.
Defaults to MICA_ARCHIVE/ocat_target_table.h5
:param where: str
Filter string to pass to tables read_where() to limit returned results.
See https://www.pytables.org/usersguide/condition_syntax.html
:param **params: dict
Additional filter criteria as ``<colname> == <value>`` key/value pairs.
:returns: astropy Table or dict of Ocat details
"""
where_parts = [] # build up bits of the where clause
if where is not None:
where_parts.append(where)
if datafile is None:
datafile = OCAT_TABLE_PATH
if obsid is not None:
where_parts.append(f"obsid=={obsid}")
if target_name is not None and resolve_name:
coord = SkyCoord.from_name(target_name)
ra = coord.ra.deg
dec = coord.dec.deg
if ra is not None and dec is not None:
d2r = np.pi / 180.0 # Degrees to radians
# Use great-circle distance to find targets within radius. This is
# accurate enough for this application.
where = (f'arccos(sin({ra * d2r})*sin(ra*{d2r}) + '
f'cos({ra * d2r})*cos(ra*{d2r})*cos({dec*d2r}-dec*{d2r}))'
f'< {radius / 60 * d2r}')
where_parts.append(where)
for col_name, value in params.items():
where_parts.append(f'{col_name}=={value!r}')
if where_parts:
dat = _table_read_where(datafile, where_parts)
else:
dat = _table_read_cached(datafile)
# Decode bytes to strings manually. Fixed in numpy 1.20.
# Eventually we will want just dat.convert_bytestring_to_unicode().
for name, col in dat.columns.items():
zero_length = len(dat) == 0
if col.info.dtype.kind == 'S':
dat[name] = col.astype('U') if zero_length else np.char.decode(col, 'utf-8')
# Match target_name as a substring of the table target_name column.
if len(dat) > 0 and target_name is not None and not resolve_name:
target_name = target_name.lower().replace(' ', '')
# Numpy bug: np.char.replace(dat['target_name'], ' ', '') returns float
# array for a zero-length input, so we need the len(dat) > 0 above.
target_names = np.char.lower(np.char.replace(dat['target_name'], ' ', ''))
ok = np.char.find(target_names, target_name) != -1
dat = dat[ok]
# Apply units to the columns
for name, col in dat.columns.items():
if name in OCAT_UNITS:
col.info.unit = OCAT_UNITS[name]
# Possibly get just the first row as a dict
dat = _get_table_or_dict(return_type, obsid, dat)
return dat
get_ocat_local.__doc__ = get_ocat_local.__doc__.format(
RETURN_TYPE_DOCS=RETURN_TYPE_DOCS,
CDA_PARAM_DOCS=CDA_PARAM_DOCS,
COMMON_PARAM_DOCS=COMMON_PARAM_DOCS)
def _table_read_cached(datafile):
"""Read the Ocat target table from a cached local data file.
The cache expires after one hour.
"""
now_time = time.time()
if datafile in OCAT_TABLE_CACHE:
last_time = OCAT_TABLE_CACHE[datafile]['last_time']
if now_time - last_time < 3600:
return OCAT_TABLE_CACHE[datafile]['data']
dat = Table.read(datafile)
OCAT_TABLE_CACHE[datafile] = {'last_time': now_time, 'data': dat}
return dat
def _table_read_where(datafile, where_parts):
"""Read HDF5 ``datafile`` using read_where() and ``where_parts``.
"""
where = '&'.join(f'({where})' for where in where_parts)
with tables.open_file(datafile) as h5:
# PyTables is unhappy with all the column names that cannot be an
# object attribute, so squelch that warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=tables.NaturalNameWarning)
dat = h5.root.data.read_where(where)
dat = Table(dat)
# Manually make MaskedColumn's as needed since we are not using Table.read()
# which handles this. This assumes a correspondence betweeen <name>.mask
# and <name>, but this is always true for the Ocat target table.
masked_names = [name for name in dat.colnames if name.endswith('.mask')]
for masked_name in masked_names:
name = masked_name[:-5]
dat[name] = MaskedColumn(dat[name], mask=dat[masked_name], dtype=dat[name].dtype)
dat.remove_column(masked_name)
return dat
|
sot/mica
|
mica/archive/cda/services.py
|
Python
|
bsd-3-clause
| 23,345 | 0.001157 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_mynock.iff"
result.attribute_template_id = 9
result.stfName("monster_name","mynock")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_mynock.py
|
Python
|
mit
| 426 | 0.049296 |
#!/usr/bin/env python
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import itk
itk.auto_progress(2)
if len(sys.argv) < 3:
print("Usage: " + sys.argv[0] + " <InputFileName> <OutputFileName> [Extension]")
sys.exit(1)
inputFileName = sys.argv[1]
outputFileName = sys.argv[2]
if len(sys.argv) > 3:
extension = sys.argv[3]
else:
extension = ".png"
fileNameFormat = outputFileName + "-%d" + extension
Dimension = 3
PixelType = itk.UC
InputImageType = itk.Image[PixelType, Dimension]
ReaderType = itk.ImageFileReader[InputImageType]
reader = ReaderType.New()
reader.SetFileName(inputFileName)
OutputPixelType = itk.UC
RescaleImageType = itk.Image[OutputPixelType, Dimension]
RescaleFilterType = itk.RescaleIntensityImageFilter[InputImageType, RescaleImageType]
rescale = RescaleFilterType.New()
rescale.SetInput(reader.GetOutput())
rescale.SetOutputMinimum(0)
rescale.SetOutputMaximum(255)
rescale.UpdateLargestPossibleRegion()
region = reader.GetOutput().GetLargestPossibleRegion()
size = region.GetSize()
fnames = itk.NumericSeriesFileNames.New()
fnames.SetStartIndex(0)
fnames.SetEndIndex(size[2] - 1)
fnames.SetIncrementIndex(1)
fnames.SetSeriesFormat(fileNameFormat)
OutputImageType = itk.Image[OutputPixelType, 2]
WriterType = itk.ImageSeriesWriter[RescaleImageType, OutputImageType]
writer = WriterType.New()
writer.SetInput(rescale.GetOutput())
writer.SetFileNames(fnames.GetFileNames())
writer.Update()
|
InsightSoftwareConsortium/ITKExamples
|
src/IO/ImageBase/GenerateSlicesFromVolume/Code.py
|
Python
|
apache-2.0
| 1,976 | 0.001012 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-03-21 13:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('erudit', '0086_auto_20180321_0717'),
]
operations = [
migrations.RemoveField(
model_name='articleabstract',
name='article',
),
migrations.DeleteModel(
name='ArticleAbstract',
),
]
|
erudit/zenon
|
eruditorg/erudit/migrations/0087_auto_20180321_0853.py
|
Python
|
gpl-3.0
| 481 | 0 |
{
'name': 'View Editor',
'category': 'Hidden',
'description': """
OpenERP Web to edit views.
==========================
""",
'version': '2.0',
'depends':['web'],
'data' : [
'views/web_view_editor.xml',
],
'qweb': ['static/src/xml/view_editor.xml'],
'auto_install': True,
}
|
mycodeday/crm-platform
|
web_view_editor/__openerp__.py
|
Python
|
gpl-3.0
| 326 | 0.006135 |
"""Defines the SMEFT class that provides the main API to smeftrunner."""
from . import rge
from . import io
from . import definitions
from . import beta
from . import smpar
import pylha
from collections import OrderedDict
from math import sqrt
import numpy as np
import ckmutil.phases, ckmutil.diag
class SMEFT(object):
"""Parameter point in the Standard Model Effective Field Theory."""
def __init__(self):
"""Initialize the SMEFT instance."""
self.C_in = None
self.scale_in = None
self.scale_high = None
def set_initial(self, C_in, scale_in, scale_high):
r"""Set the initial values for parameters and Wilson coefficients at
the scale `scale_in`, setting the new physics scale $\Lambda$ to
`scale_high`."""
self.C_in = C_in
self.scale_in = scale_in
self.scale_high = scale_high
def load_initial(self, streams):
"""Load the initial values for parameters and Wilson coefficients from
one or several files.
`streams` should be a tuple of file-like objects strings."""
d = {}
for stream in streams:
s = io.load(stream)
if 'BLOCK' not in s:
raise ValueError("No BLOCK found")
d.update(s['BLOCK'])
d = {'BLOCK': d}
C = io.wc_lha2dict(d)
sm = io.sm_lha2dict(d)
C.update(sm)
C = definitions.symmetrize(C)
self.C_in = C
def set_initial_wcxf(self, wc, scale_high=None, get_smpar=False):
"""Load the initial values for Wilson coefficients from a
wcxf.WC instance.
Parameters:
- `scale_high`: since Wilson coefficients are dimensionless in
smeftrunner but not in WCxf, the high scale in GeV has to be provided.
If this parameter is None (default), either a previously defined
value will be used, or the scale attribute of the WC instance will
be used.
- `get_smpar`: boolean, optional, defaults to True. If True, an attempt
is made to determine the SM parameters from the requirement of
reproducing the correct SM masses and mixings at the electroweak
scale. As approximations are involved, the result might or might not
be reliable, depending on the size of the Wilson coefficients
affecting the SM masses and mixings. If False, Standard Model
parameters have to be provided separately and are assumed to be in
the weak basis used for the Warsaw basis as defined in WCxf,
i.e. in the basis where the down-type and charged lepton mass
matrices are diagonal.
"""
import wcxf
if wc.eft != 'SMEFT':
raise ValueError("Wilson coefficients use wrong EFT.")
if wc.basis != 'Warsaw':
raise ValueError("Wilson coefficients use wrong basis.")
if scale_high is not None:
self.scale_high = scale_high
elif self.scale_high is None:
self.scale_high = wc.scale
C = wcxf.translators.smeft.wcxf2arrays(wc.dict)
keys_dim5 = ['llphiphi']
keys_dim6 = list(set(definitions.WC_keys_0f + definitions.WC_keys_2f + definitions.WC_keys_4f) - set(keys_dim5))
self.scale_in = wc.scale
for k in keys_dim5:
if k in C:
C[k] = C[k]*self.scale_high
for k in keys_dim6:
if k in C:
C[k] = C[k]*self.scale_high**2
C = definitions.symmetrize(C)
# fill in zeros for missing WCs
for k, s in definitions.C_keys_shape.items():
if k not in C and k not in definitions.SM_keys:
if s == 1:
C[k] = 0
else:
C[k] = np.zeros(s)
if self.C_in is None:
self.C_in = C
else:
self.C_in.update(C)
if get_smpar:
self.C_in.update(self._get_sm_scale_in())
def load_wcxf(self, stream, get_smpar=True):
"""Load the initial values for Wilson coefficients from
a file-like object or a string in WCxf format.
Note that Standard Model parameters have to be provided separately
and are assumed to be in the weak basis used for the Warsaw basis as
defined in WCxf, i.e. in the basis where the down-type and charged
lepton mass matrices are diagonal."""
import wcxf
wc = wcxf.WC.load(stream)
self.set_initial_wcxf(wc, get_smpar=get_smpar)
def dump(self, C_out, scale_out=None, stream=None, fmt='lha', skip_redundant=True):
"""Return a string representation of the parameters and Wilson
coefficients `C_out` in DSixTools output format. If `stream` is
specified, export it to a file. `fmt` defaults to `lha` (the SLHA-like
DSixTools format), but can also be `json` or `yaml` (see the
pylha documentation)."""
C = OrderedDict()
if scale_out is not None:
C['SCALES'] = {'values': [[1, self.scale_high], [2, scale_out]]}
else:
C['SCALES'] = {'values': [[1, self.scale_high]]}
sm = io.sm_dict2lha(C_out)['BLOCK']
C.update(sm)
wc = io.wc_dict2lha(C_out, skip_redundant=skip_redundant)['BLOCK']
C.update(wc)
return pylha.dump({'BLOCK': C}, fmt=fmt, stream=stream)
def get_wcxf(self, C_out, scale_out):
"""Return the Wilson coefficients `C_out` as a wcxf.WC instance.
Note that the Wilson coefficients are rotated into the Warsaw basis
as defined in WCxf, i.e. to the basis where the down-type and charged
lepton mass matrices are diagonal."""
import wcxf
C = self.rotate_defaultbasis(C_out)
d = wcxf.translators.smeft.arrays2wcxf(C)
basis = wcxf.Basis['SMEFT', 'Warsaw']
d = {k: v for k, v in d.items() if k in basis.all_wcs and v != 0}
keys_dim5 = ['llphiphi']
keys_dim6 = list(set(definitions.WC_keys_0f + definitions.WC_keys_2f
+ definitions.WC_keys_4f) - set(keys_dim5))
for k in d:
if k.split('_')[0] in keys_dim5:
d[k] = d[k] / self.scale_high
for k in d:
if k.split('_')[0] in keys_dim6:
d[k] = d[k] / self.scale_high**2
d = wcxf.WC.dict2values(d)
wc = wcxf.WC('SMEFT', 'Warsaw', scale_out, d)
return wc
def dump_wcxf(self, C_out, scale_out, fmt='yaml', stream=None, **kwargs):
"""Return a string representation of the Wilson coefficients `C_out`
in WCxf format. If `stream` is specified, export it to a file.
`fmt` defaults to `yaml`, but can also be `json`.
Note that the Wilson coefficients are rotated into the Warsaw basis
as defined in WCxf, i.e. to the basis where the down-type and charged
lepton mass matrices are diagonal."""
wc = self.get_wcxf(C_out, scale_out)
return wc.dump(fmt=fmt, stream=stream, **kwargs)
def rgevolve(self, scale_out, **kwargs):
"""Solve the SMEFT RGEs from the initial scale to `scale_out`.
Returns a dictionary with parameters and Wilson coefficients at
`scale_out`. Additional keyword arguments will be passed to
the ODE solver `scipy.integrate.odeint`."""
self._check_initial()
return rge.smeft_evolve(C_in=self.C_in,
scale_high=self.scale_high,
scale_in=self.scale_in,
scale_out=scale_out,
**kwargs)
def rgevolve_leadinglog(self, scale_out):
"""Compute the leading logarithmix approximation to the solution
of the SMEFT RGEs from the initial scale to `scale_out`.
Returns a dictionary with parameters and Wilson coefficients.
Much faster but less precise that `rgevolve`.
"""
self._check_initial()
return rge.smeft_evolve_leadinglog(C_in=self.C_in,
scale_high=self.scale_high,
scale_in=self.scale_in,
scale_out=scale_out)
def _check_initial(self):
"""Check if initial values and scale as well as the new physics scale
have been set."""
if self.C_in is None:
raise Exception("You have to specify the initial conditions first.")
if self.scale_in is None:
raise Exception("You have to specify the initial scale first.")
if self.scale_high is None:
raise Exception("You have to specify the high scale first.")
def rotate_defaultbasis(self, C):
"""Rotate all parameters to the basis where the running down-type quark
and charged lepton mass matrices are diagonal and where the running
up-type quark mass matrix has the form V.S, with V unitary and S real
diagonal, and where the CKM and PMNS matrices have the standard
phase convention."""
v = sqrt(2*C['m2'].real/C['Lambda'].real)
Mep = v/sqrt(2) * (C['Ge'] - C['ephi'] * v**2/self.scale_high**2/2)
Mup = v/sqrt(2) * (C['Gu'] - C['uphi'] * v**2/self.scale_high**2/2)
Mdp = v/sqrt(2) * (C['Gd'] - C['dphi'] * v**2/self.scale_high**2/2)
Mnup = -v**2 * C['llphiphi']
UeL, Me, UeR = ckmutil.diag.msvd(Mep)
UuL, Mu, UuR = ckmutil.diag.msvd(Mup)
UdL, Md, UdR = ckmutil.diag.msvd(Mdp)
Unu, Mnu = ckmutil.diag.mtakfac(Mnup)
UuL, UdL, UuR, UdR = ckmutil.phases.rephase_standard(UuL, UdL, UuR, UdR)
Unu, UeL, UeR = ckmutil.phases.rephase_pmns_standard(Unu, UeL, UeR)
return definitions.flavor_rotation(C, Uq=UdL, Uu=UuR, Ud=UdR, Ul=UeL, Ue=UeR)
def _run_sm_scale_in(self, C_out, scale_sm=91.1876):
"""Get the SM parameters at the EW scale, using an estimate `C_out`
of the Wilson coefficients at that scale, and run them to the
input scale."""
# initialize an empty SMEFT instance
smeft_sm = SMEFT()
C_in_sm = beta.C_array2dict(np.zeros(9999))
# set the SM parameters to the values obtained from smpar.smeftpar
C_SM = smpar.smeftpar(scale_sm, self.scale_high, C_out, basis='Warsaw')
C_SM = {k: v for k, v in C_SM.items() if k in definitions.SM_keys}
# set the Wilson coefficients at the EW scale to C_out
C_in_sm.update(C_out)
C_in_sm.update(C_SM)
smeft_sm.set_initial(C_in_sm, scale_sm, scale_high=self.scale_high)
# run up (with 1% relative precision, ignore running of Wilson coefficients)
C_SM_high = smeft_sm.rgevolve(self.scale_in, newphys=False, rtol=0.01, atol=1)
return {k: v for k, v in C_SM_high.items() if k in definitions.SM_keys}
def _get_sm_scale_in(self, scale_sm=91.1876):
"""Get an estimate of the SM parameters at the input scale by running
them from the EW scale using constant values for the Wilson coefficients
(corresponding to their leading log approximated values at the EW
scale).
Note that this is not guaranteed to work and will fail if some of the
Wilson coefficients (the ones affecting the extraction of SM parameters)
are large."""
# intialize a copy of ourselves
_smeft = SMEFT()
_smeft.set_initial(self.C_in, self.scale_in, self.scale_high)
# Step 1: run the SM up, using the WCs at scale_input as (constant) estimate
_smeft.C_in.update(self._run_sm_scale_in(self.C_in, scale_sm=scale_sm))
# Step 2: run the WCs down in LL approximation
C_out = _smeft.rgevolve_leadinglog(scale_sm)
# Step 3: run the SM up again, this time using the WCs at scale_sm as (constant) estimate
return self._run_sm_scale_in(C_out, scale_sm=scale_sm)
|
DsixTools/python-smeftrunner
|
smeftrunner/classes.py
|
Python
|
mit
| 11,842 | 0.001773 |
# This file is part of MANTIS OS, Operating System
# See http://mantis.cs.colorado.edu/
#
# Copyright (C) 2003-2005 University of Colorado, Boulder
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the mos license (see file LICENSE)
import wx, thread
import net_model
class node_view:
def __init__(self, model, color = 'BLUE'):
self.node_radius = 10 # Radius of a node
self.node_color = 'GREEN' # TODO not currently used
self.node_outline = 'BLACK' # TODO not currently used
# Setting this flag prevents drawing this node and links while dragging
self.dragging = False
self.model = model
# Now setup the node's bitmap so we can just blit to the screen
# rather than having to re-draw every time.
#self.bmp = wx.EmptyBitmap(2 * self.node_radius + 4, 2 * self.node_radius + 4)
self.bmp = wx.EmptyBitmap(2 * self.node_radius, 3 * self.node_radius)
self.Update()
def HitTest(self, point):
rect = self.GetRect()
return rect.InsideXY(point.x, point.y)
def GetRect(self):
x, y = self.model.GetPosition()
return wx.Rect(x-self.node_radius, y-self.node_radius,
self.bmp.GetWidth(), self.bmp.GetHeight())
def Erase(self, dc):
if self.dragging:
return
dc.SetBrush(wx.Brush("WHITE"))
dc.SetPen(wx.Pen("WHITE"))
x, y = self.model.GetPosition()
#dc.DrawRectangle(x-self.node_radius, y-self.node_radius,
# self.node_radius * 2 + 4, self.node_radius * 2 + 4)
dc.DrawRectangle(x-self.node_radius, y-self.node_radius,
2 * self.node_radius, 3 * self.node_radius)
def Draw(self, dc, op = wx.COPY):
if self.dragging:
return True
if self.bmp.Ok():
memDC = wx.MemoryDC()
memDC.SelectObject(self.bmp)
x, y = self.model.GetPosition()
dc.Blit(x-self.node_radius, y-self.node_radius,
self.bmp.GetWidth(), self.bmp.GetHeight(),
memDC, 0, 0, op, True)
return True
else:
return False
def Update(self):
#self.led = state
# create a DC for drawing in to the bitmap memory
bdc = wx.MemoryDC();
bdc.SelectObject(self.bmp);
# First clear the background
#bdc.SetBrush(wx.Brush("WHITE"))
#bdc.SetPen(wx.Pen("WHITE"))
#bdc.DrawRectangle(0, 0, self.node_radius * 2 + 4, self.node_radius * 2 + 4)
# Now draw our default node
#bdc.SetBrush(wx.Brush(self.node_color))
#if self.model.GetLedState() == 1:
# bdc.SetPen(wx.Pen(self.node_outline, 4))
#else:
# bdc.SetPen(wx.Pen("RED", 4))
#bdc.DrawEllipse(0, 0, self.node_radius * 2, self.node_radius * 2)
bdc.SetBrush(wx.Brush("DARKGREEN"))
bdc.SetPen(wx.Pen("DARKGREEN"))
bdc.DrawRectangle(0, 0, 2 * self.node_radius, 3 * self.node_radius)
# Now draw the led line
if self.model.led & 1:
bdc.SetBrush(wx.Brush("YELLOW"))
bdc.SetPen(wx.Pen("YELLOW"))
bdc.DrawRectangle(0, 16, self.node_radius*3/2, 8)
if self.model.led & 2: # green
bdc.SetBrush(wx.Brush("GREEN"))
bdc.SetPen(wx.Pen("GREEN"))
bdc.DrawRectangle(0, 8, self.node_radius*3/2, 8)
if self.model.led & 4: # red
bdc.SetBrush(wx.Brush("RED"))
bdc.SetPen(wx.Pen("RED"))
bdc.DrawRectangle(0, 0, self.node_radius*3/2, 8)
# must disconnect the bitmap from the dc so we can use it later
bdc.SelectObject(wx.NullBitmap);
# Create a mask so that we only blit the colored part
#if "__WXGTK__" not in wx.PlatformInfo:
#mask = wx.Mask(self.bmp, wx.WHITE)
mask = wx.Mask(self.bmp)
mask.colour = wx.WHITE
self.bmp.SetMask(mask)
def __str__(self):
return 'node_view:'+str(self.model.id)
class link_view:
def __init__(self, src, dst):
self.src = src
self.dst = dst
self.flashcount = 0
def Erase(self, dc):
if self.src.dragging or self.dst.dragging:
return
pen = wx.Pen("WHITE")
pen.SetWidth(4)
dc.SetPen(pen)
dc.DrawLine(self.src.model.pos[0], self.src.model.pos[1], self.dst.model.pos[0], self.dst.model.pos[1])
def Draw(self, dc, op = wx.COPY):
if self.src.dragging or self.dst.dragging:
return
if self.flashcount:
pen = wx.Pen("GOLD")
else:
pen = wx.Pen("BLUE")
pen.SetWidth(4)
dc.SetPen(pen)
dc.DrawLine(self.src.model.pos[0], self.src.model.pos[1], self.dst.model.pos[0], self.dst.model.pos[1])
class event_queue:
"Queue for storing net events and their callbacks. See net_view.DispatchEvent()."
def __init__(self):
self.lock = thread.allocate_lock()
self.list = []
def put(self, obj):
"Add an object to the queue atomically."
self.lock.acquire()
self.list.append(obj)
self.lock.release()
def get(self):
"Return the entire queue as a list and clear the queue atomically."
self.lock.acquire()
list = self.list
self.list = []
self.lock.release()
return list
class net_view(wx.ScrolledWindow):
"This component does the drawing of the network model."
def __init__(self, parent, id, model):
wx.ScrolledWindow.__init__(self, parent, id, style=wx.NO_FULL_REPAINT_ON_RESIZE)
self.model = model
self.node_dict = {}
self.link_dict = {}
self.node_size = 25
self.dragNode = None
self.dragImage = None
self.queue = event_queue()
self.SetBackgroundColour("WHITE")
self.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
# Mouse buttons and motion
wx.EVT_LEFT_DOWN(self, self.OnLeftDown)
wx.EVT_LEFT_UP(self, self.OnLeftUp)
wx.EVT_MOTION(self, self.OnMotion)
wx.EVT_PAINT(self, self.OnPaint)
wx.EVT_IDLE(self, self.OnIdle)
self.SetMode("Select")
# Register network events callback DispatchEvent.
# See net_view.DispatchEvent() for details.
model.Bind(net_model.ADD_NODE, self.DispatchEvent, self.add_node)
model.Bind(net_model.REMOVE_NODE, self.DispatchEvent, self.del_node)
model.Bind(net_model.ADD_LINK, self.DispatchEvent, self.add_radio_link)
model.Bind(net_model.REMOVE_LINK, self.DispatchEvent, self.del_radio_link)
model.Bind(net_model.NET_CHANGED, self.DispatchEvent, self.new_network)
model.Bind(net_model.FORWARD_PACKET, self.DispatchEvent, self.forward_radio_packet)
def DispatchEvent(self, callback, *args):
""""Queue a net event to be handled on the GUI thread.
Many wxPython functions do not work when invoked from a thread other
than the main GUI thread. This is a problem for network events, because
they occur during the listen thread that was spawned by simClient.py.
The solution is to register a meta-callback, this method, with the
network model. When DispatchEvent is invoked by the network model,
it puts the original GUI callback, along with the arguments,
on self.queue and then calls wx.WakeUpIdle(). This causes OnIdle to be
invoked on the main GUI thread, which in turn invokes every callback
that is on the queue, and these callbacks can invoke wxPython functions
without fear of being on the wrong thread. This greatly simplifies the
implementation of the callbacks (trust me)."""
self.queue.put((callback, args))
# Cause an idle event to occur, which will invoke our idle handler.
wx.WakeUpIdle()
def FindNode(self, point):
"Return the node that contains the point."
for n in self.node_dict.itervalues():
if n.HitTest(point):
return n
return None
def OnLeftDown(self, evt):
node = self.FindNode(evt.GetPosition())
if node:
self.dragNode = node
self.dragStartPos = evt.GetPosition()
def OnLeftUp(self, evt):
if not self.dragImage or not self.dragNode:
self.dragImage = None
self.dragNode = None
return
# Hide the image, end dragging, and nuke out the drag image.
self.dragImage.Hide()
self.dragImage.EndDrag()
self.dragImage = None
dc = wx.ClientDC(self)
# reposition and draw the shape
self.dragNode.model.pos = (
self.dragNode.model.pos[0] + evt.GetPosition()[0] - self.dragStartPos[0],
self.dragNode.model.pos[1] + evt.GetPosition()[1] - self.dragStartPos[1]
)
self.dragNode.dragging = False
self.dragNode.Draw(dc)
# Update the network model.
self.model.MoveNode(self.dragNode.model.id, self.dragNode.model.pos[0], self.dragNode.model.pos[1])
self.dragNode = None
def OnRightDown(self, event):
pass
def OnRightUp(self, event):
pass
def OnMotion(self, evt):
# Ignore mouse movement if we're not dragging.
if not self.dragNode or not evt.Dragging() or not evt.LeftIsDown():
return
# if we have a node, but haven't started dragging yet
if self.dragNode and not self.dragImage:
# only start the drag after having moved a couple pixels
tolerance = 2
pt = evt.GetPosition()
dx = abs(pt.x - self.dragStartPos.x)
dy = abs(pt.y - self.dragStartPos.y)
if dx <= tolerance and dy <= tolerance:
return
# Create a DragImage to draw this node while it is moving
# (The drag image will update even as the bitmap is updating. Magical!)
self.dragImage = wx.DragImage(self.dragNode.bmp,
wx.StockCursor(wx.CURSOR_HAND))
hotspot = self.dragStartPos - self.dragNode.model.pos + [self.dragNode.node_radius, self.dragNode.node_radius]
self.dragImage.BeginDrag(hotspot, self, False)
self.dragImage.Move(pt)
# erase the node since it will be drawn by the DragImage now
dc = wx.ClientDC(self)
for link in self.dragNode.model.incoming.itervalues():
if link not in self.link_dict: continue
l = self.link_dict[link]
l.Erase(dc)
l.src.Draw(dc)
for link in self.dragNode.model.outgoing.itervalues():
if link not in self.link_dict: continue
l = self.link_dict[link]
l.Erase(dc)
l.dst.Draw(dc)
self.dragNode.Erase(dc)
self.dragNode.dragging = True
self.dragImage.Show()
# if we have node and image then move it
elif self.dragNode and self.dragImage:
self.dragImage.Move(evt.GetPosition())
def OnSize(self, event):
pass
def OnIdle(self, event):
"""Handle queued network events. See net_view.DispatchEvent()."""
for callback, args in self.queue.get():
callback(*args)
def OnPaint(self, event):
""" Window expose events come here to refresh. """
dc = wx.PaintDC(self)
self.Draw(dc)
def Draw(self, dc):
dc.BeginDrawing() # for Windows compatibility
# Since we are a scrolling window we need to prepare the DC
self.PrepareDC(dc)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
for link in self.link_dict.itervalues():
link.Draw(dc)
for node in self.node_dict.itervalues():
node.Draw(dc)
dc.EndDrawing()
def SetMode(self, mode):
self.mode = mode
if self.mode == "Select":
self.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
else:
self.SetCursor(wx.StockCursor(wx.STANDARD_CURSOR))
# TODO do something about this color parm
def add_node(self, nodemodel, color = 'BLUE'):
n = node_view(nodemodel, color)
self.node_dict[nodemodel] = n
nodemodel.Bind(net_model.LED_CHANGED, self.DispatchEvent, self.node_state_changed)
n.Update()
dc = wx.ClientDC(self)
n.Draw(dc)
def del_node(self, node):
if self.node_dict.has_key(node):
dc = wx.ClientDC(self)
self.node_dict[node].Erase(dc)
del self.node_dict[node]
def node_state_changed(self, node):
if self.node_dict.has_key(node):
n = self.node_dict[node]
n.Update()
dc = wx.ClientDC(self)
n.Draw(dc)
def add_radio_link(self, link):
if self.node_dict.has_key(link.src) and self.node_dict.has_key(link.dst):
src = self.node_dict[link.src]
dst = self.node_dict[link.dst]
l = link_view(src, dst)
self.link_dict[link] = l
dc = wx.ClientDC(self)
l.Draw(dc)
l.src.Draw(dc)
l.dst.Draw(dc)
def del_radio_link(self, link):
if self.link_dict.has_key(link):
l = self.link_dict[link]
dc = wx.ClientDC(self)
l.Erase(dc)
l.src.Draw(dc)
l.dst.Draw(dc)
del self.link_dict[link]
def new_network(self, model):
self.node_dict.clear()
self.link_dict.clear()
self.dragNode = None
self.dragImage = None
dummy = self.queue.get() # empties the list
for nodemodel in model.IterNodes():
n = node_view(nodemodel, 'BLUE')
self.node_dict[nodemodel] = n
nodemodel.Bind(net_model.LED_CHANGED, self.DispatchEvent, self.node_state_changed)
n.Update()
for link in model.IterLinks():
l = link_view(self.node_dict[link.src], self.node_dict[link.dst])
self.link_dict[link] = l
dc = wx.ClientDC(self)
self.Draw(dc)
def forward_radio_packet(self, link):
if link in self.link_dict:
l = self.link_dict[link]
l.flashcount += 1
# Return the link to its original color after a delay.
wx.FutureCall(500, self.flash_link_off, l, link)
dc = wx.ClientDC(self)
l.Draw(dc)
l.src.Draw(dc)
l.dst.Draw(dc)
def flash_link_off(self, link, linkmodel):
# make sure this link hasn't been deleted
if linkmodel in self.link_dict:
link.flashcount -= 1
dc = wx.ClientDC(self)
link.Draw(dc)
link.src.Draw(dc)
link.dst.Draw(dc)
|
turon/mantis
|
src/tools/tenodera/net_view.py
|
Python
|
bsd-3-clause
| 15,234 | 0.006302 |
from typing import Any
from typing import List
from typing import Optional
from typing import Union
import bpy
import compas_blender
from compas.artists import PrimitiveArtist
from compas.geometry import Line
from compas.colors import Color
from compas_blender.artists import BlenderArtist
class LineArtist(BlenderArtist, PrimitiveArtist):
"""Artist for drawing lines in Blender.
Parameters
----------
line : :class:`~compas.geometry.Line`
A COMPAS line.
collection : str | :blender:`bpy.types.Collection`
The Blender scene collection the object(s) created by this artist belong to.
**kwargs : dict, optional
Additional keyword arguments.
For more info,
see :class:`~compas_blender.artists.BlenderArtist` and :class:`~compas.artists.PrimitiveArtist`.
Examples
--------
Use the Blender artist explicitly.
.. code-block:: python
from compas.geometry import Line
from compas_blender.artists import LineArtist
line = Line([0, 0, 0], [1, 1, 1])
artist = LineArtist(line)
artist.draw()
Or, use the artist through the plugin mechanism.
.. code-block:: python
from compas.geometry import Line
from compas.artists import Artist
line = Line([0, 0, 0], [1, 1, 1])
artist = Artist(line)
artist.draw()
"""
def __init__(self,
line: Line,
collection: Optional[Union[str, bpy.types.Collection]] = None,
**kwargs: Any
):
super().__init__(primitive=line, collection=collection or line.name, **kwargs)
def draw(self, color: Optional[Color] = None, show_points: bool = False) -> List[bpy.types.Object]:
"""Draw the line.
Parameters
----------
color : tuple[int, int, int] | tuple[float, float, float] | :class:`~compas.colors.Color`, optional
The RGB color of the box.
The default color is :attr:`compas.artists.PrimitiveArtist.color`.
show_points : bool, optional
If True, show the start and end point in addition to the line.
Returns
-------
list[:blender:`bpy.types.Object`]
"""
color = Color.coerce(color) or self.color
start = self.primitive.start
end = self.primitive.end
objects = []
if show_points:
points = [
{'pos': start, 'name': f"{self.primitive.name}.start", 'color': color, 'radius': 0.01},
{'pos': end, 'name': f"{self.primitive.name}.end", 'color': color, 'radius': 0.01},
]
objects += compas_blender.draw_points(points, collection=self.collection)
lines = [
{'start': start, 'end': end, 'color': color, 'name': f"{self.primitive.name}"},
]
objects += compas_blender.draw_lines(lines, collection=self.collection)
return objects
|
compas-dev/compas
|
src/compas_blender/artists/lineartist.py
|
Python
|
mit
| 2,962 | 0.003038 |
n = int(input())
grid = [[int(c) for c in input()] for i in range (0, n)]
cavities = []
for i in range(0, n):
if i > 0 and i < n - 1:
for j in range(0, n):
if j > 0 and j < n - 1:
v = grid[i][j]
if grid[i - 1][j] < v and grid[i + 1][j] < v and grid[i][j - 1] < v and grid[i][j + 1] < v:
cavities.append((i, j))
for i, j in cavities:
grid[i][j] = 'X'
print('\n'.join(''.join(str(i) for i in row) for row in grid))
|
amol9/hackerearth
|
hackerrank/practice/cavity_map/solution.py
|
Python
|
mit
| 500 | 0.006 |
import tkinter
tk = tkinter.Tk()
tk.title("Bounce")
tk.resizable(0, 0)
# Keep the window on the top
tk.wm_attributes("-topmost", 1)
canvas = tkinter.Canvas(tk, width=500, height=400)
# Remove border. Apparently no effect on Linux, but good on Mac
canvas.configure(bd=0)
# Make the 0 horizontal and vertical line apparent
canvas.configure(highlightthickness=0)
canvas.pack()
ball = canvas.create_oval(10, 10, 25, 25, fill='red')
def handle_timer_event():
canvas.move(ball, 10, 0)
tk.after(100, handle_timer_event)
handle_timer_event()
tk.mainloop()
|
amosnier/python_for_kids
|
course_code/13_039_animated_ball.py
|
Python
|
gpl-3.0
| 562 | 0.003559 |
import asyncio
import discord
from discord.ext import commands
if not discord.opus.is_loaded():
# the 'opus' library here is opus.dll on windows
# or libopus.so on linux in the current directory
# you should replace this with the location the
# opus library is located in and with the proper filename.
# note that on windows this DLL is automatically provided for you
discord.opus.load_opus('opus')
class VoiceEntry:
def __init__(self, message, player):
self.requester = message.author
self.channel = message.channel
self.player = player
def __str__(self):
fmt = '*{0.title}* uploaded by {0.uploader} and requested by {1.display_name}'
duration = self.player.duration
if duration:
fmt = fmt + ' [length: {0[0]}m {0[1]}s]'.format(divmod(duration, 60))
return fmt.format(self.player, self.requester)
class VoiceState:
def __init__(self, bot):
self.current = None
self.voice = None
self.bot = bot
self.play_next_song = asyncio.Event()
self.songs = asyncio.Queue()
self.skip_votes = set() # a set of user_ids that voted
self.audio_player = self.bot.loop.create_task(self.audio_player_task())
def is_playing(self):
if self.voice is None or self.current is None:
return False
player = self.current.player
return not player.is_done()
@property
def player(self):
return self.current.player
def skip(self):
self.skip_votes.clear()
if self.is_playing():
self.player.stop()
def toggle_next(self):
self.bot.loop.call_soon_threadsafe(self.play_next_song.set)
async def audio_player_task(self):
while True:
self.play_next_song.clear()
self.current = await self.songs.get()
await self.bot.send_message(self.current.channel, 'Now playing ' + str(self.current))
self.current.player.start()
await self.play_next_song.wait()
class Music:
"""Voice related commands.
Works in multiple servers at once.
"""
def __init__(self, bot):
self.bot = bot
self.voice_states = {}
def get_voice_state(self, server):
state = self.voice_states.get(server.id)
if state is None:
state = VoiceState(self.bot)
self.voice_states[server.id] = state
return state
async def create_voice_client(self, channel):
voice = await self.bot.join_voice_channel(channel)
state = self.get_voice_state(channel.server)
state.voice = voice
def __unload(self):
for state in self.voice_states.values():
try:
state.audio_player.cancel()
if state.voice:
self.bot.loop.create_task(state.voice.disconnect())
except:
pass
@commands.command(pass_context=True, no_pm=True)
async def join(self, ctx, *, channel : discord.Channel):
"""Joins a voice channel."""
try:
await self.create_voice_client(channel)
except discord.ClientException:
await self.bot.say('Already in a voice channel...')
except discord.InvalidArgument:
await self.bot.say('This is not a voice channel...')
else:
await self.bot.say('Ready to play audio in ' + channel.name)
@commands.command(pass_context=True, no_pm=True)
async def summon(self, ctx):
"""Summons the bot to join your voice channel."""
summoned_channel = ctx.message.author.voice_channel
if summoned_channel is None:
await self.bot.say('You are not in a voice channel.')
return False
state = self.get_voice_state(ctx.message.server)
if state.voice is None:
state.voice = await self.bot.join_voice_channel(summoned_channel)
else:
await state.voice.move_to(summoned_channel)
return True
@commands.command(pass_context=True, no_pm=True)
async def play(self, ctx, *, song : str):
"""Plays a song.
If there is a song currently in the queue, then it is
queued until the next song is done playing.
This command automatically searches as well from YouTube.
The list of supported sites can be found here:
https://rg3.github.io/youtube-dl/supportedsites.html
"""
state = self.get_voice_state(ctx.message.server)
opts = {
'default_search': 'auto',
'quiet': True,
}
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
player = await state.voice.create_ytdl_player(song, ytdl_options=opts, after=state.toggle_next)
except Exception as e:
fmt = 'An error occurred while processing this request: ```py\n{}: {}\n```'
await self.bot.send_message(ctx.message.channel, fmt.format(type(e).__name__, e))
else:
player.volume = 0.6
entry = VoiceEntry(ctx.message, player)
await self.bot.say('Enqueued ' + str(entry))
await state.songs.put(entry)
@commands.command(pass_context=True, no_pm=True)
async def volume(self, ctx, value : int):
"""Sets the volume of the currently playing song."""
state = self.get_voice_state(ctx.message.server)
if state.is_playing():
player = state.player
player.volume = value / 100
await self.bot.say('Set the volume to {:.0%}'.format(player.volume))
@commands.command(pass_context=True, no_pm=True)
async def pause(self, ctx):
"""Pauses the currently played song."""
state = self.get_voice_state(ctx.message.server)
if state.is_playing():
player = state.player
player.pause()
@commands.command(pass_context=True, no_pm=True)
async def resume(self, ctx):
"""Resumes the currently played song."""
state = self.get_voice_state(ctx.message.server)
if state.is_playing():
player = state.player
player.resume()
@commands.command(pass_context=True, no_pm=True)
async def stop(self, ctx):
"""Stops playing audio and leaves the voice channel.
This also clears the queue.
"""
server = ctx.message.server
state = self.get_voice_state(server)
if state.is_playing():
player = state.player
player.stop()
try:
state.audio_player.cancel()
del self.voice_states[server.id]
await state.voice.disconnect()
except:
pass
@commands.command(pass_context=True, no_pm=True)
async def skip(self, ctx):
"""Vote to skip a song. The song requester can automatically skip.
3 skip votes are needed for the song to be skipped.
"""
state = self.get_voice_state(ctx.message.server)
if not state.is_playing():
await self.bot.say('Not playing any music right now...')
return
voter = ctx.message.author
if voter == state.current.requester:
await self.bot.say('Requester requested skipping song...')
state.skip()
elif voter.id not in state.skip_votes:
state.skip_votes.add(voter.id)
total_votes = len(state.skip_votes)
if total_votes >= 3:
await self.bot.say('Skip vote passed, skipping song...')
state.skip()
else:
await self.bot.say('Skip vote added, currently at [{}/3]'.format(total_votes))
else:
await self.bot.say('You have already voted to skip this song.')
@commands.command(pass_context=True, no_pm=True)
async def playing(self, ctx):
"""Shows info about the currently played song."""
state = self.get_voice_state(ctx.message.server)
if state.current is None:
await self.bot.say('Not playing anything.')
else:
skip_count = len(state.skip_votes)
await self.bot.say('Now playing {} [skips: {}/3]'.format(state.current, skip_count))
bot = commands.Bot(command_prefix=commands.when_mentioned_or('$'), description='A playlist example for discord.py')
bot.add_cog(Music(bot))
@bot.event
async def on_ready():
print('Logged in as:\n{0} (ID: {0.id})'.format(bot.user))
bot.run('token')
|
Boy-314/winner-winner-bidget-sbinner
|
examples/playlist.py
|
Python
|
mit
| 8,569 | 0.002451 |
import csv
import os
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.utils.decorators import method_decorator
from django.db import models
from converter.exceptions import UploadException
from .models import SystemSource, Reference, ReferenceKeyValue
from django.db import transaction
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-
. fields.
updating ``created`` and ``modified``
"""
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class LoginRequiredMixin:
@method_decorator(login_required(login_url=reverse_lazy("auth:login")))
def dispatch(self, request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
def ss_handle_uploaded_file(f):
filename = f.name
# filepath = os.path.join('/home/niko/' + filename)
filepath = os.path.join('C:/Users/nmorozov/Desktop/1/' + filename)
with open(filepath, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
with open(filepath, newline='') as csvfile:
iterator = csv.reader(csvfile, delimiter=',', quotechar='|')
with transaction.atomic():
for obj in iterator:
if safe_get(obj, 0) == "system":
ss = SystemSource(code=safe_get(obj, 1),
fullname=safe_get(obj, 2))
ss.save()
elif safe_get(obj, 0) == "reference":
reference = Reference(code=safe_get(obj, 1),
fullname=safe_get(obj, 2),
table_name=safe_get(obj, 3),
table_charset=safe_get(obj, 4),
jdbc_source=safe_get(obj, 5),
replication_sql=safe_get(obj, 6),
master_id=ss)
reference.save()
elif safe_get(obj, 0) == "content":
content = ReferenceKeyValue(key=safe_get(obj, 1),
value=safe_get(obj, 2),
reference_id=reference)
content.save()
else:
raise UploadException("Parse error")
# raise ValidationError('Invalid value', code='invalid')
os.remove(filepath)
def safe_get(_list, _index, _default=""):
try:
return _list[_index]
except IndexError:
return _default
|
valentine20xx/portal
|
converter/utils.py
|
Python
|
gpl-3.0
| 2,852 | 0.001052 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ImportTaxonomies
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-datacatalog
# [START datacatalog_generated_datacatalog_v1_PolicyTagManagerSerialization_ImportTaxonomies_sync]
from google.cloud import datacatalog_v1
def sample_import_taxonomies():
# Create a client
client = datacatalog_v1.PolicyTagManagerSerializationClient()
# Initialize request argument(s)
inline_source = datacatalog_v1.InlineSource()
inline_source.taxonomies.display_name = "display_name_value"
request = datacatalog_v1.ImportTaxonomiesRequest(
inline_source=inline_source,
parent="parent_value",
)
# Make the request
response = client.import_taxonomies(request=request)
# Handle the response
print(response)
# [END datacatalog_generated_datacatalog_v1_PolicyTagManagerSerialization_ImportTaxonomies_sync]
|
googleapis/python-datacatalog
|
samples/generated_samples/datacatalog_generated_datacatalog_v1_policy_tag_manager_serialization_import_taxonomies_sync.py
|
Python
|
apache-2.0
| 1,711 | 0.001753 |
import multiprocessing
import Library.interfaz
import Library.config
import handler
import server
try:
config = Library.config.read()
except:
import sys
print("FAILED TO OPEN CONFIG FILE, EXITING")
sys.exit()
man = multiprocessing.Manager()
adios = man.Value(bool, False)
interfaz = Library.interfaz.Interfaz(lang=config["lang"])
hand = handler.Handler(interfaz, adios)
hand.pantalla("INIT", prompt=False)
input("")
key_bits = int(config["key_length"])
hand.pantalla("GENERATING_KEY", args=(key_bits,), prompt=False)
server = server.Server(adios, hand, Library.Encriptacion.genera(key_bits), ip=config["host"], port=int(config["port"]))
g = multiprocessing.Process(target=server.listen)
p = multiprocessing.Process(target=server.server_handler)
p2 = multiprocessing.Process(target=hand.listen, args=(server, ))
p.start()
g.start()
hand.listen(server)
adios.value = True
p.join()
g.join()
server.handler.exit()
|
elan17/irc-terminal
|
server/main.py
|
Python
|
gpl-3.0
| 927 | 0.002157 |
# -*- coding: utf-8 -*-
#
# Viper documentation build configuration file, created by
# sphinx-quickstart on Mon May 5 18:24:15 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Viper'
copyright = u'2014, Claudio Guarnieri'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1'
# The full version, including alpha/beta/rc tags.
release = '1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Viperdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Viper.tex', u'Viper Documentation',
u'Claudio Guarnieri', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'viper', u'Viper Documentation',
[u'Claudio Guarnieri'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Viper', u'Viper Documentation',
u'Claudio Guarnieri', 'Viper', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
LMSlay/wiper
|
docs/source/conf.py
|
Python
|
bsd-3-clause
| 7,714 | 0.007519 |
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetOpportunitiesTasksTaskIdOk(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, description=None, name=None, notification=None, task_id=None):
"""
GetOpportunitiesTasksTaskIdOk - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'description': 'str',
'name': 'str',
'notification': 'str',
'task_id': 'int'
}
self.attribute_map = {
'description': 'description',
'name': 'name',
'notification': 'notification',
'task_id': 'task_id'
}
self._description = description
self._name = name
self._notification = notification
self._task_id = task_id
@property
def description(self):
"""
Gets the description of this GetOpportunitiesTasksTaskIdOk.
description string
:return: The description of this GetOpportunitiesTasksTaskIdOk.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this GetOpportunitiesTasksTaskIdOk.
description string
:param description: The description of this GetOpportunitiesTasksTaskIdOk.
:type: str
"""
if description is None:
raise ValueError("Invalid value for `description`, must not be `None`")
self._description = description
@property
def name(self):
"""
Gets the name of this GetOpportunitiesTasksTaskIdOk.
name string
:return: The name of this GetOpportunitiesTasksTaskIdOk.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this GetOpportunitiesTasksTaskIdOk.
name string
:param name: The name of this GetOpportunitiesTasksTaskIdOk.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def notification(self):
"""
Gets the notification of this GetOpportunitiesTasksTaskIdOk.
notification string
:return: The notification of this GetOpportunitiesTasksTaskIdOk.
:rtype: str
"""
return self._notification
@notification.setter
def notification(self, notification):
"""
Sets the notification of this GetOpportunitiesTasksTaskIdOk.
notification string
:param notification: The notification of this GetOpportunitiesTasksTaskIdOk.
:type: str
"""
if notification is None:
raise ValueError("Invalid value for `notification`, must not be `None`")
self._notification = notification
@property
def task_id(self):
"""
Gets the task_id of this GetOpportunitiesTasksTaskIdOk.
task_id integer
:return: The task_id of this GetOpportunitiesTasksTaskIdOk.
:rtype: int
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""
Sets the task_id of this GetOpportunitiesTasksTaskIdOk.
task_id integer
:param task_id: The task_id of this GetOpportunitiesTasksTaskIdOk.
:type: int
"""
if task_id is None:
raise ValueError("Invalid value for `task_id`, must not be `None`")
self._task_id = task_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, GetOpportunitiesTasksTaskIdOk):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
minlexx/pyevemon
|
esi_client/models/get_opportunities_tasks_task_id_ok.py
|
Python
|
gpl-3.0
| 5,602 | 0.001071 |
# -*- coding: utf-8 -*-
import requests
import json
misperrors = {'error': 'Error'}
mispattributes = {'input': ['md5', 'sha1', 'sha256', 'domain', 'url', 'email-src', 'ip-dst|port', 'ip-src|port'], 'output': ['text']}
moduleinfo = {'version': '0.1', 'author': 'Corsin Camichel', 'description': 'Module to search for an IOC on ThreatFox by abuse.ch.', 'module-type': ['hover', 'expansion']}
moduleconfig = []
API_URL = "https://threatfox-api.abuse.ch/api/v1/"
# copied from
# https://github.com/marjatech/threatfox2misp/blob/main/threatfox2misp.py
def confidence_level_to_tag(level: int) -> str:
confidence_tagging = {
0: 'misp:confidence-level="unconfident"',
10: 'misp:confidence-level="rarely-confident"',
37: 'misp:confidence-level="fairly-confident"',
63: 'misp:confidence-level="usually-confident"',
90: 'misp:confidence-level="completely-confident"',
}
confidence_tag = ""
for tag_minvalue, tag in confidence_tagging.items():
if level >= tag_minvalue:
confidence_tag = tag
return confidence_tag
def handler(q=False):
if q is False:
return False
request = json.loads(q)
ret_val = ""
for input_type in mispattributes['input']:
if input_type in request:
to_query = request[input_type]
break
else:
misperrors['error'] = "Unsupported attributes type:"
return misperrors
data = {"query": "search_ioc", "search_term": f"{to_query}"}
response = requests.post(API_URL, data=json.dumps(data))
if response.status_code == 200:
result = json.loads(response.text)
if(result["query_status"] == "ok"):
confidence_tag = confidence_level_to_tag(result["data"][0]["confidence_level"])
ret_val = {'results': [{'types': mispattributes['output'], 'values': [result["data"][0]["threat_type_desc"]], 'tags': [result["data"][0]["malware"], result["data"][0]["malware_printable"], confidence_tag]}]}
return ret_val
def introspection():
return mispattributes
def version():
moduleinfo['config'] = moduleconfig
return moduleinfo
|
VirusTotal/misp-modules
|
misp_modules/modules/expansion/threatfox.py
|
Python
|
agpl-3.0
| 2,146 | 0.001864 |
# -*- coding: utf-8 -*-
from py3Des.pyDes import triple_des, ECB, PAD_PKCS5
class TripleDES:
__triple_des = None
@staticmethod
def init():
TripleDES.__triple_des = triple_des('1234567812345678',
mode=ECB,
IV = '\0\0\0\0\0\0\0\0',
pad=None,
padmode = PAD_PKCS5)
@staticmethod
def encrypt(data):
return TripleDES.__triple_des.encrypt(data)
@staticmethod
def decrypt(data):
return TripleDES.__triple_des.decrypt(data)
|
codeMarble/codeMarble_Web
|
codeMarble_Web/codeMarble_py3des.py
|
Python
|
gpl-3.0
| 649 | 0.007704 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.