repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ejspina/Gene_expression_tools
|
Python/fasta_transcript2gene.py
|
1
|
1865
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Eli
#
# Created: 18/12/2013
# Copyright: (c) Eli 2013
# Licence: <your licence>
#-------------------------------------------------------------------------------
def main():
pass
if __name__ == '__main__':
main()
from Bio import SeqIO
transcripts = list(SeqIO.parse(open('testtranscripts.fasta', 'rU'), "fasta"))
genes = open("testgenes.fasta", "w")
def get_longest(isoforms):
# tempisos = open("tempisos.fasta", "w")
# SeqIO.write(isoforms, tempisos, "fasta")
# isoforms = SeqIO.parse(open("tempisos.fasta", "rU"), "fasta")
lengths = []
for record in isoforms:
print "isoform:" record.id
lengths.append(len(record.seq))
print lengths
most_bases = max(lengths)
print most_bases
for record in isoforms:
if len(record.seq) == most_bases:
SeqIO.write(record + '\n', genes, "fasta")
genes.close()
i, j = 0, 0
isoforms = []
for record in transcripts:
print record.id, len(record.seq), i, j
if transcripts[i] not in isoforms: #Ensure first isoform for any gene is added to list
isoforms.append(transcripts[i])
i+=1
if i < len(transcripts) and transcripts[i] != isoforms[j] and transcripts[i].id.split(".")[0:3] == isoforms[j].id.split(".")[0:3]: #If current transcript is isoform of most recently added to list, add current transcript to list
isoforms.append(transcripts[i])
i+=1
j+=1
else: #If current transcript is not isoform of most recently added to list.
get_longest(isoforms)
isoforms = []
j = 0
|
gpl-2.0
| 1,572,774,385,450,864,000 | 30.155172 | 235 | 0.510456 | false |
rajashreer7/autotest-client-tests
|
linux-tools/libusb/libusb.py
|
3
|
1655
|
#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error, software_manager
sm = software_manager.SoftwareManager()
class libusb(test.test):
"""
Autotest module for testing basic functionality
of libusb
@author Xu Zheng ,zhengxu@cn.ibm.com
"""
version = 1
nfail = 0
path = ''
def initialize(self, test_path=''):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
for package in ['gcc', 'libusb-devel']:
if not sm.check_installed(package):
logging.debug("%s missing - trying to install", package)
sm.install(package)
ret_val = subprocess.Popen(['make', 'all'], cwd="%s/libusb" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./libusb.sh'], cwd="%s/libusb" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
|
gpl-2.0
| 1,791,443,352,213,579,300 | 28.035088 | 85 | 0.558308 | false |
OpenDaisy/daisy-api
|
daisy/api/v1/config_sets.py
|
1
|
17500
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/config_sets endpoint for Daisy v1 API
"""
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob import Response
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
from daisy.api.configset import manager
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController):
"""
WSGI controller for config_sets resource in Daisy v1 API
The config_sets resource API is a RESTful web service for config_set data. The API
is as follows::
GET /config_sets -- Returns a set of brief metadata about config_sets
GET /config_sets/detail -- Returns a set of detailed metadata about
config_sets
HEAD /config_sets/<ID> -- Return metadata about an config_set with id <ID>
GET /config_sets/<ID> -- Return config_set data for config_set with id <ID>
POST /config_sets -- Store config_set data and return metadata about the
newly-stored config_set
PUT /config_sets/<ID> -- Update config_set metadata and/or upload config_set
data for a previously-reserved config_set
DELETE /config_sets/<ID> -- Delete the config_set with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']:
msg = _("cluster with identifier %s has been deleted.") % cluster_id
raise HTTPNotFound(msg)
@utils.mutating
def add_config_set(self, req, config_set_meta):
"""
Adds a new config_set to Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about config_set
:raises HTTPBadRequest if x-config_set-name is missing
"""
self._enforce(req, 'add_config_set')
#config_set_id=config_set_meta["id"]
config_set_name = config_set_meta["name"]
config_set_description = config_set_meta["description"]
#print config_set_id
print config_set_name
print config_set_description
config_set_meta = registry.add_config_set_metadata(req.context, config_set_meta)
return {'config_set_meta': config_set_meta}
@utils.mutating
def delete_config_set(self, req, id):
"""
Deletes a config_set from Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about config_set
:raises HTTPBadRequest if x-config_set-name is missing
"""
self._enforce(req, 'delete_config_set')
try:
registry.delete_config_set_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find config_set to delete: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete config_set: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("config_set %(id)s could not be deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
#self.notifier.info('config_set.delete', config_set)
return Response(body='', status=200)
@utils.mutating
def get_config_set(self, req, id):
"""
Returns metadata about an config_set in the HTTP headers of the
response object
:param req: The WSGI/Webob Request object
:param id: The opaque config_set identifier
:raises HTTPNotFound if config_set metadata is not available to user
"""
self._enforce(req, 'get_config_set')
config_set_meta = self.get_config_set_meta_or_404(req, id)
return {'config_set_meta': config_set_meta}
def detail(self, req):
"""
Returns detailed information for all available config_sets
:param req: The WSGI/Webob Request object
:retval The response body is a mapping of the following form::
{'config_sets': [
{'id': <ID>,
'name': <NAME>,
'description': <DESCRIPTION>,
'created_at': <TIMESTAMP>,
'updated_at': <TIMESTAMP>,
'deleted_at': <TIMESTAMP>|<NONE>,}, ...
]}
"""
self._enforce(req, 'get_config_sets')
params = self._get_query_params(req)
try:
config_sets = registry.get_config_sets_detail(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(config_sets=config_sets)
@utils.mutating
def update_config_set(self, req, id, config_set_meta):
"""
Updates an existing config_set with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'modify_image')
orig_config_set_meta = self.get_config_set_meta_or_404(req, id)
# Do not allow any updates on a deleted image.
# Fix for LP Bug #1060930
if orig_config_set_meta['deleted']:
msg = _("Forbidden to update deleted config_set.")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
try:
config_set_meta = registry.update_config_set_metadata(req.context,
id,
config_set_meta)
except exception.Invalid as e:
msg = (_("Failed to update config_set metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find config_set to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update config_set: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('config_set operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('config_set.update', config_set_meta)
return {'config_set_meta': config_set_meta}
def _raise_404_if_role_exist(self,req,config_set_meta):
role_id_list=[]
try:
roles = registry.get_roles_detail(req.context)
for role in roles:
for role_name in eval(config_set_meta['role']):
if role['cluster_id'] == config_set_meta['cluster'] and role['name'] == role_name:
role_id_list.append(role['id'])
break
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return role_id_list
@utils.mutating
def cluster_config_set_update(self, req, config_set_meta):
if config_set_meta.has_key('cluster'):
orig_cluster = str(config_set_meta['cluster'])
self._raise_404_if_cluster_deleted(req, orig_cluster)
try:
if config_set_meta.get('role',None):
role_id_list=self._raise_404_if_role_exist(req,config_set_meta)
if len(role_id_list) == len(eval(config_set_meta['role'])):
for role_id in role_id_list:
backend=manager.configBackend('clushshell', req, role_id)
backend.push_config()
else:
msg = "the role is not exist"
LOG.error(msg)
raise HTTPNotFound(msg)
else:
roles = registry.get_roles_detail(req.context)
for role in roles:
if role['cluster_id'] == config_set_meta['cluster']:
backend=manager.configBackend('clushshell', req, role['id'])
backend.push_config()
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
config_status={"status":"config successful"}
return {'config_set':config_status}
else:
msg = "the cluster is not exist"
LOG.error(msg)
raise HTTPNotFound(msg)
@utils.mutating
def cluster_config_set_progress(self, req, config_set_meta):
role_list = []
if config_set_meta.has_key('cluster'):
orig_cluster = str(config_set_meta['cluster'])
self._raise_404_if_cluster_deleted(req, orig_cluster)
try:
if config_set_meta.get('role',None):
role_id_list=self._raise_404_if_role_exist(req,config_set_meta)
if len(role_id_list) == len(eval(config_set_meta['role'])):
for role_id in role_id_list:
role_info = {}
role_meta=registry.get_role_metadata(req.context, role_id)
role_info['role-name']=role_meta['name']
role_info['config_set_update_progress']=role_meta['config_set_update_progress']
role_list.append(role_info)
else:
msg = "the role is not exist"
LOG.error(msg)
raise HTTPNotFound(msg)
else:
roles = registry.get_roles_detail(req.context)
for role in roles:
if role['cluster_id'] == config_set_meta['cluster']:
role_info = {}
role_info['role-name']=role['name']
role_info['config_set_update_progress']=role['config_set_update_progress']
role_list.append(role_info)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return role_list
else:
msg = "the cluster is not exist"
LOG.error(msg)
raise HTTPNotFound(msg)
class Config_setDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["config_set_meta"] = utils.get_config_set_meta(request)
return result
def add_config_set(self, request):
return self._deserialize(request)
def update_config_set(self, request):
return self._deserialize(request)
def cluster_config_set_update(self, request):
return self._deserialize(request)
def cluster_config_set_progress(self, request):
return self._deserialize(request)
class Config_setSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def add_config_set(self, response, result):
config_set_meta = result['config_set_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config_set=config_set_meta))
return response
def delete_config_set(self, response, result):
config_set_meta = result['config_set_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config_set=config_set_meta))
return response
def get_config_set(self, response, result):
config_set_meta = result['config_set_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config_set=config_set_meta))
return response
def cluster_config_set_update(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def cluster_config_set_progress(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config_set=result))
return response
def create_resource():
"""config_sets resource factory method"""
deserializer = Config_setDeserializer()
serializer = Config_setSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)
|
apache-2.0
| -2,054,898,294,051,990,000 | 39.322581 | 107 | 0.571771 | false |
fmonjalet/miasm
|
miasm2/ir/ir.py
|
1
|
11474
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# Copyright (C) 2013 Fabrice Desclaux
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import miasm2.expression.expression as m2_expr
from miasm2.expression.expression_helper import get_missing_interval
from miasm2.core import asmbloc
from miasm2.expression.simplifications import expr_simp
from miasm2.core.asmbloc import asm_symbol_pool
class irbloc(object):
def __init__(self, label, irs, lines = []):
assert(isinstance(label, asmbloc.asm_label))
self.label = label
self.irs = irs
self.lines = lines
self.except_automod = True
self._dst = None
self._dst_linenb = None
def _get_dst(self):
"""Find the IRDst affectation and update dst, dst_linenb accordingly"""
if self._dst is not None:
return self._dst
dst = None
for linenb, ir in enumerate(self.irs):
for i in ir:
if isinstance(i.dst, m2_expr.ExprId) and i.dst.name == "IRDst":
if dst is not None:
raise ValueError('Multiple destinations!')
dst = i.src
dst_linenb = linenb
self._dst = dst
self._dst_linenb = linenb
return dst
def _set_dst(self, value):
"""Find and replace the IRDst affectation's source by @value"""
if self._dst_linenb is None:
self._get_dst()
ir = self.irs[self._dst_linenb]
for i, expr in enumerate(ir):
if isinstance(expr.dst, m2_expr.ExprId) and expr.dst.name == "IRDst":
ir[i] = m2_expr.ExprAff(expr.dst, value)
self._dst = value
dst = property(_get_dst, _set_dst)
@property
def dst_linenb(self):
"""Line number of the IRDst setting statement in the current irs"""
return self._dst_linenb
def get_rw(self, regs_ids):
"""
Computes the variables read and written by each instructions
Initialize attributes needed for in/out and reach computation.
@regs_ids : ids of registers used in IR
"""
self.r = []
self.w = []
self.cur_reach = [{reg: set() for reg in regs_ids}
for _ in xrange(len(self.irs))]
self.prev_reach = [{reg: set() for reg in regs_ids}
for _ in xrange(len(self.irs))]
self.cur_kill = [{reg: set() for reg in regs_ids}
for _ in xrange(len(self.irs))]
self.prev_kill = [{reg: set() for reg in regs_ids}
for _ in xrange(len(self.irs))]
self.defout = [{reg: set() for reg in regs_ids}
for _ in xrange(len(self.irs))]
for k, ir in enumerate(self.irs):
r, w = set(), set()
for i in ir:
r.update(x for x in i.get_r(True)
if isinstance(x, m2_expr.ExprId))
w.update(x for x in i.get_w()
if isinstance(x, m2_expr.ExprId))
if isinstance(i.dst, m2_expr.ExprMem):
r.update(x for x in i.dst.arg.get_r(True)
if isinstance(x, m2_expr.ExprId))
self.defout[k].update((x, {(self.label, k, i)})
for x in i.get_w()
if isinstance(x, m2_expr.ExprId))
self.r.append(r)
self.w.append(w)
def __str__(self):
o = []
o.append('%s' % self.label)
for expr in self.irs:
for e in expr:
o.append('\t%s' % e)
o.append("")
return "\n".join(o)
class ir(object):
def __init__(self, arch, attrib, symbol_pool=None):
if symbol_pool is None:
symbol_pool = asm_symbol_pool()
self.symbol_pool = symbol_pool
self.blocs = {}
self.pc = arch.getpc(attrib)
self.sp = arch.getsp(attrib)
self.arch = arch
self.attrib = attrib
def instr2ir(self, l):
ir_bloc_cur, ir_blocs_extra = self.get_ir(l)
return ir_bloc_cur, ir_blocs_extra
def get_label(self, ad):
"""Transforms an ExprId/ExprInt/label/int into a label
@ad: an ExprId/ExprInt/label/int"""
if (isinstance(ad, m2_expr.ExprId) and
isinstance(ad.name, asmbloc.asm_label)):
ad = ad.name
if isinstance(ad, m2_expr.ExprInt):
ad = int(ad.arg)
if type(ad) in [int, long]:
ad = self.symbol_pool.getby_offset_create(ad)
elif isinstance(ad, asmbloc.asm_label):
ad = self.symbol_pool.getby_name_create(ad.name)
return ad
def get_bloc(self, ad):
"""Returns the irbloc associated to an ExprId/ExprInt/label/int
@ad: an ExprId/ExprInt/label/int"""
label = self.get_label(ad)
return self.blocs.get(label, None)
def add_instr(self, l, ad=0, gen_pc_updt = False):
b = asmbloc.asm_bloc(l)
b.lines = [l]
self.add_bloc(b, gen_pc_updt)
def merge_multi_affect(self, affect_list):
"""
If multiple affection to a same ExprId are present in @affect_list,
merge them (in place).
For instance, XCGH AH, AL semantic is
[
RAX = {RAX[0:8],0,8, RAX[0:8],8,16, RAX[16:64],16,64}
RAX = {RAX[8:16],0,8, RAX[8:64],8,64}
]
This function will update @affect_list to replace previous ExprAff by
[
RAX = {RAX[8:16],0,8, RAX[0:8],8,16, RAX[16:64],16,64}
]
"""
# Extract side effect
effect = {}
for expr in affect_list:
effect[expr.dst] = effect.get(expr.dst, []) + [expr]
# Find candidates
for dst, expr_list in effect.items():
if len(expr_list) <= 1:
continue
# Only treat ExprCompose list
if any(map(lambda e: not(isinstance(e.src, m2_expr.ExprCompose)),
expr_list)):
continue
# Find collision
e_colision = reduce(lambda x, y: x.union(y),
(e.get_modified_slice() for e in expr_list),
set())
# Sort interval collision
known_intervals = sorted([(x[1], x[2]) for x in e_colision])
# Fill with missing data
missing_i = get_missing_interval(known_intervals, 0, dst.size)
remaining = ((m2_expr.ExprSlice(dst, *interval),
interval[0],
interval[1])
for interval in missing_i)
# Build the merging expression
slices = sorted(e_colision.union(remaining), key=lambda x: x[1])
final_dst = m2_expr.ExprCompose(slices)
# Remove unused expression
for expr in expr_list:
affect_list.remove(expr)
# Add the merged one
affect_list.append(m2_expr.ExprAff(dst, final_dst))
def getby_offset(self, offset):
out = set()
for irb in self.blocs.values():
for l in irb.lines:
if l.offset <= offset < l.offset + l.l:
out.add(irb)
return out
def gen_pc_update(self, c, l):
c.irs.append([m2_expr.ExprAff(self.pc, m2_expr.ExprInt_from(self.pc,
l.offset))])
c.lines.append(l)
def add_bloc(self, bloc, gen_pc_updt = False):
c = None
ir_blocs_all = []
for l in bloc.lines:
if c is None:
label = self.get_instr_label(l)
c = irbloc(label, [], [])
ir_blocs_all.append(c)
ir_bloc_cur, ir_blocs_extra = self.instr2ir(l)
if gen_pc_updt is not False:
self.gen_pc_update(c, l)
c.irs.append(ir_bloc_cur)
c.lines.append(l)
if ir_blocs_extra:
for b in ir_blocs_extra:
b.lines = [l] * len(b.irs)
ir_blocs_all += ir_blocs_extra
c = None
self.post_add_bloc(bloc, ir_blocs_all)
return ir_blocs_all
def expr_fix_regs_for_mode(self, e, *args, **kwargs):
return e
def expraff_fix_regs_for_mode(self, e, *args, **kwargs):
return e
def irbloc_fix_regs_for_mode(self, irbloc, *args, **kwargs):
return
def is_pc_written(self, b):
all_pc = self.arch.pc.values()
for irs in b.irs:
for ir in irs:
if ir.dst in all_pc:
return ir
return None
def set_empty_dst_to_next(self, bloc, ir_blocs):
for b in ir_blocs:
if b.dst is not None:
continue
dst = m2_expr.ExprId(self.get_next_label(bloc.lines[-1]),
self.pc.size)
b.irs.append([m2_expr.ExprAff(self.IRDst, dst)])
b.lines.append(b.lines[-1])
def gen_edges(self, bloc, ir_blocs):
pass
def post_add_bloc(self, bloc, ir_blocs):
self.set_empty_dst_to_next(bloc, ir_blocs)
self.gen_edges(bloc, ir_blocs)
for irb in ir_blocs:
self.irbloc_fix_regs_for_mode(irb, self.attrib)
# Detect multi-affectation
for affect_list in irb.irs:
self.merge_multi_affect(affect_list)
self.blocs[irb.label] = irb
def get_instr_label(self, instr):
"""Returns the label associated to an instruction
@instr: current instruction"""
return self.symbol_pool.getby_offset_create(instr.offset)
def gen_label(self):
# TODO: fix hardcoded offset
l = self.symbol_pool.gen_label()
return l
def get_next_label(self, instr):
l = self.symbol_pool.getby_offset_create(instr.offset + instr.l)
return l
def simplify_blocs(self):
for b in self.blocs.values():
for ir in b.irs:
for i, r in enumerate(ir):
ir[i] = m2_expr.ExprAff(expr_simp(r.dst), expr_simp(r.src))
def replace_expr_in_ir(self, bloc, rep):
for irs in bloc.irs:
for i, l in enumerate(irs):
irs[i] = l.replace_expr(rep)
def get_rw(self, regs_ids = []):
"""
Calls get_rw(irb) for each bloc
@regs_ids : ids of registers used in IR
"""
for b in self.blocs.values():
b.get_rw(regs_ids)
def ExprIsLabel(self, l):
return isinstance(l, m2_expr.ExprId) and isinstance(l.name,
asmbloc.asm_label)
|
gpl-2.0
| 2,692,302,315,748,242,000 | 32.846608 | 81 | 0.531811 | false |
OliverWalter/amdtk
|
amdtk/models/normal_gamma.py
|
1
|
2962
|
""" Normal-Gamma density."""
import numpy as np
from scipy.special import gammaln, psi
class NormalGamma(object):
"""Normal-Gamma density.
Attributes
----------
mu : numpy.ndarray
Mean of the Gaussian density.
kappa : float
Factor of the precision matrix.
alpha : float
Shape parameter of the Gamma density.
beta : numpy.ndarray
Rate parameters of the Gamma density.
Methods
-------
expLogPrecision()
Expected value of the logarithm of the precision.
expPrecision()
Expected value of the precision.
KL(self, pdf)
KL divergence between the current and the given densities.
newPosterior(self, stats)
Create a new Normal-Gamma density.
"""
def __init__(self, mu, kappa, alpha, beta):
self.mu = mu
self.kappa = kappa
self.alpha = alpha
self.beta = beta
def expLogPrecision(self):
'''Expected value of the logarithm of the precision.
Returns
-------
E_log_prec : numpy.ndarray
Log precision.
'''
return psi(self.alpha) - np.log(self.beta)
def expPrecision(self):
"""Expected value of the precision.
Returns
-------
E_prec : numpy.ndarray
Precision.
"""
return self.alpha/self.beta
def KL(self, q):
"""KL divergence between the current and the given densities.
Returns
-------
KL : float
KL divergence.
"""
p = self
exp_lambda = p.expPrecision()
exp_log_lambda = p.expLogPrecision()
return (.5 * (np.log(p.kappa) - np.log(q.kappa))
- .5 * (1 - q.kappa * (1./p.kappa + exp_lambda * (p.mu - q.mu)**2))
- (gammaln(p.alpha) - gammaln(q.alpha))
+ (p.alpha * np.log(p.beta) - q.alpha * np.log(q.beta))
+ exp_log_lambda * (p.alpha - q.alpha)
- exp_lambda * (p.beta - q.beta)).sum()
def newPosterior(self, stats):
"""Create a new Normal-Gamma density.
Create a new Normal-Gamma density given the parameters of the
current model and the statistics provided.
Parameters
----------
stats : :class:MultivariateGaussianDiagCovStats
Accumulated sufficient statistics for the update.
Returns
-------
post : :class:NormalGamma
New Dirichlet density.
"""
# stats[0]: counts
# stats[1]: sum(x)
# stats[2]: sum(x**2)
kappa_n = self.kappa + stats[0]
mu_n = (self.kappa * self.mu + stats[1]) / kappa_n
alpha_n = self.alpha + .5 * stats[0]
v = (self.kappa * self.mu + stats[1])**2
v /= (self.kappa + stats[0])
beta_n = self.beta + 0.5*(-v + stats[2] + self.kappa * self.mu**2)
return NormalGamma(mu_n, kappa_n, alpha_n, beta_n)
|
bsd-2-clause
| 3,954,424,811,886,270,000 | 26.425926 | 83 | 0.540176 | false |
priyaganti/rockstor-core
|
src/rockstor/storageadmin/urls/commands.py
|
1
|
1347
|
"""
Copyright (c) 2012-2017 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.conf.urls import patterns, url
from storageadmin.views import CommandView
valid_commands = ('uptime|bootstrap|utcnow|update-check|update|'
'current-version|shutdown|reboot|kernel|current-user|auto-update-status' # noqa E501
'|enable-auto-update|disable-auto-update|refresh-pool-state'
'|refresh-share-state|refresh-snapshot-state')
urlpatterns = patterns(
'',
url(r'(?P<command>%s)$' % valid_commands, CommandView.as_view(),
name='user-view'),
url(r'(?P<command>shutdown|suspend)/(?P<rtcepoch>\d+)$',
CommandView.as_view(), name='user-view'),
)
|
gpl-3.0
| -4,425,011,777,252,937,000 | 39.818182 | 103 | 0.717892 | false |
jpwilliams/appenlight-client-python
|
appenlight_client/timing/__init__.py
|
1
|
7630
|
from __future__ import absolute_import
import logging
import time
import threading
from appenlight_client.utils import import_from_module
from datetime import datetime, timedelta
from functools import wraps
from operator import itemgetter
default_timer = time.time
class AppenlightLocalStorage(object):
def __init__(self):
self.cls_storage = {}
def contains(self, parent, child):
return (child['start'] >= parent['start'] and
child['end'] <= parent['end'])
def get_stack(self, slow_calls=None):
if not slow_calls:
data = sorted(self.slow_calls, key=itemgetter('start'))
else:
data = slow_calls
stack = []
for node in data:
while stack and not self.contains(stack[-1], node):
stack.pop()
node['parents'] = [n['type'] for n in stack]
stack.append(node)
return data
def get_thread_storage(self, thread=None):
if thread is None:
thread = threading.currentThread()
if thread not in self.cls_storage:
self.cls_storage[thread] = {'last_updated': datetime.utcnow(),
'logs': []}
self.clear()
return self.cls_storage[thread]
@property
def logs(self):
return self.get_thread_storage()['logs']
@logs.setter
def logs(self, value):
self.get_thread_storage()['logs'] = value
self.get_thread_storage()['last_updated'] = datetime.utcnow()
@property
def view_name(self):
return self.get_thread_storage()['view_name']
@view_name.setter
def view_name(self, value):
self.get_thread_storage()['view_name'] = value
self.get_thread_storage()['last_updated'] = datetime.utcnow()
@property
def slow_calls(self):
return self.get_thread_storage()['slow_calls']
@slow_calls.setter
def slow_calls(self, value):
self.get_thread_storage()['slow_calls'] = value
self.get_thread_storage()['last_updated'] = datetime.utcnow()
@property
def thread_stats(self):
return self.get_thread_storage()['thread_stats']
@thread_stats.setter
def thread_stats(self, value):
self.get_thread_storage()['thread_stats'] = value
self.get_thread_storage()['last_updated'] = datetime.utcnow()
def clear(self):
self.thread_stats = {'main': 0, 'sql': 0, 'nosql': 0, 'remote': 0,
'tmpl': 0, 'unknown': 0, 'sql_calls': 0,
'nosql_calls': 0, 'remote_calls': 0,
'tmpl_calls': 0, 'custom': 0, 'custom_calls': 0}
self.slow_calls = []
self.view_name = ''
def get_thread_stats(self):
""" resets thread stats at same time """
stats = self.thread_stats.copy()
slow_calls = []
for row in self.get_stack():
duration = row['end'] - row['start']
if row['ignore_in'].intersection(row['parents']):
# this means that it is used internally in other lib
continue
if row.get('count'):
stats['%s_calls' % row['type']] += 1
# if this call was being made inside template - substract duration
# from template timing
is_nested_template = 'tmpl' in row['parents']
is_nested_custom = 'custom' in row['parents']
if not is_nested_template and not is_nested_custom:
stats[row['type']] += duration
if duration >= row['min_duration']:
slow_calls.append(row)
# round stats to 5 digits
for k, v in stats.iteritems():
stats[k] = round(v, 5)
return stats, slow_calls
def cycle_old_data(self, older_than=60):
boundary = datetime.utcnow() - timedelta(minutes=60)
for k, v in self.cls_storage.items():
if v['last_updated'] < boundary or 1:
del self.cls_storage[k]
TIMING_REGISTERED = False
local_timing = threading.local()
appenlight_storage = AppenlightLocalStorage()
log = logging.getLogger(__name__)
def get_local_storage(local_timing=None):
return appenlight_storage
def _e_trace(info_gatherer, min_duration, e_callable, *args, **kw):
""" Used to wrap dbapi2 driver methods """
start = default_timer()
result = e_callable(*args, **kw)
end = default_timer()
info = {'start': start,
'end': end,
'min_duration': min_duration}
info.update(info_gatherer(*args, **kw))
appenlight_storage = get_local_storage()
if len(appenlight_storage.slow_calls) < 1000:
appenlight_storage.slow_calls.append(info)
return result
def time_trace(gatherer=None, min_duration=0.1, is_template=False, name=None):
if gatherer is None:
if not name:
name = 'Unnamed callable'
def gatherer(*args, **kwargs):
return {'type': 'custom',
'subtype': 'user_defined',
'statement': name,
'parameters': '',
'count': True,
'ignore_in': set()}
def decorator(appenlight_callable):
@wraps(appenlight_callable)
def wrapper(*args, **kwargs):
start = default_timer()
result = appenlight_callable(*args, **kwargs)
end = default_timer()
info = {'start': start,
'end': end,
'min_duration': min_duration}
info.update(gatherer(*args, **kwargs))
appenlight_storage = get_local_storage()
if len(appenlight_storage.slow_calls) < 500:
appenlight_storage.slow_calls.append(info)
return result
# will prevent this wrapper being decorated again
wrapper._e_attached_tracer = True
if is_template:
wrapper._e_is_template = True
return wrapper
return decorator
def register_timing(config):
timing_modules = ['timing_urllib', 'timing_urllib2', 'timing_urllib3',
'timing_requests', 'timing_httplib', 'timing_pysolr',
'timing_chameleon', 'timing_mako', 'timing_jinja2',
'timing_pymongo', 'timing_redispy', 'timing_memcache',
'timing_django_templates']
for mod in timing_modules:
min_time = config['timing'].get(mod.replace("timing_", '').lower())
if min_time is not False:
log.debug('%s slow time:%s' % (mod, min_time or 'default'))
e_callable = import_from_module(
'appenlight_client.timing.%s:add_timing' % mod)
if e_callable:
if min_time:
e_callable(min_time)
else:
e_callable()
else:
log.debug('not tracking slow time:%s' % mod)
db_modules = ['pg8000', 'psycopg2', 'MySQLdb', 'sqlite3', 'oursql',
'pyodbc', 'pypyodbc',
'cx_Oracle', 'kinterbasdb', 'postgresql', 'pymysql', 'pymssql']
import appenlight_client.timing.timing_dbapi2 as dbapi2
for mod in db_modules:
min_time = config['timing'].get('dbapi2_%s' % mod.lower())
log.debug('%s dbapi query time:%s' % (mod, min_time or 'default'))
if min_time is not False:
if mod == 'sqlite3' and not min_time:
continue
elif min_time:
dbapi2.add_timing(mod, min_time)
else:
dbapi2.add_timing(mod)
|
bsd-3-clause
| 8,553,453,150,697,994,000 | 33.524887 | 82 | 0.554391 | false |
marchon/flask-virtualenv
|
setup.py
|
1
|
1029
|
#!/usr/bin/env python
"""
https://github.com/imlucas/flask-virtualenv
"""
from setuptools import setup
setup(
name='Flask-Virtualenv',
version='0.2.0',
url='http://imlucas.com',
license='BSD',
author='Lucas Hrabovsky',
author_email='hrabovsky.lucas@gmail.com',
description='Manage a virtualenv for your flask app.',
long_description=__doc__,
packages=['flaskext'],
namespace_packages=['flaskext'],
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'virtualenv>=1.5.1'
],
test_suite='nose.collector',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'License :: OSI Approved :: {{ license }} License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
bsd-3-clause
| -7,406,923,368,084,122,000 | 27.583333 | 70 | 0.608358 | false |
artursmet/climbing-contest
|
contest/views.py
|
1
|
2281
|
#coding: utf-8
from __future__ import unicode_literals
import csv
from django.template.response import TemplateResponse
from django.shortcuts import redirect, get_object_or_404
from django.contrib import messages
from django.http import HttpResponse
from django.contrib.auth.decorators import user_passes_test
from contest.models import Contest
from contest.forms import ContestantForm
def subscribe(request, contest_pk):
contest = get_object_or_404(Contest, pk=contest_pk)
if not contest.is_active():
messages.error(request, 'Zapisy się zakończyły')
return redirect('homepage')
if request.method == 'POST':
form = ContestantForm(contest=contest, data=request.POST)
if form.is_valid():
subscribtion = form.save(commit=False)
# toDO e-mail
subscribtion.save()
messages.success(request, 'Dziękujemy za zgłoszenie')
return redirect('all_groups', contest_pk=contest.pk)
else:
form = ContestantForm(contest=contest)
return TemplateResponse(request, 'contest/form.html',
{'form': form, 'contest': contest})
def all_groups(request, contest_pk):
contest = Contest.objects.get(pk=contest_pk)
return TemplateResponse(request, 'contest/all_groups.html',
{'contest': contest})
@user_passes_test(lambda u: u.is_staff)
def create_csv_list(request, contest_pk):
contest = get_object_or_404(Contest, pk=contest_pk)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = "attachment; filename='zawodnicy.csv'"
writer = csv.writer(response)
writer.writerow([
'Imię'.encode('utf-8'),
'Nazwisko',
'Płeć',
'Wiek',
'Klub/Sponsor',
'Koszulka',
'Grupa']
)
for group in contest.group_set.all():
for person in group.contestant_set.all():
writer.writerow([
person.first_name.encode('utf-8'),
person.surname.encode('utf-8'),
person.get_gender_display().encode('utf-8'),
person.age,
person.sponsor.encode('utf-8'),
person.group.name.encode('utf-8'),
person.shirt_size
])
return response
|
apache-2.0
| -1,803,734,999,262,948,000 | 31.942029 | 76 | 0.636164 | false |
tedmeeds/tcga_encoder
|
tcga_encoder/utils/helpers.py
|
1
|
3776
|
import tensorflow
import tcga_encoder
import sys, os, yaml
import numpy as np
import scipy as sp
import pylab as pp
import pandas as pd
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.model_selection import KFold
from collections import *
import itertools
import pdb
def xval_folds( n, K, randomize = False, seed = None ):
if randomize is True:
print("XVAL RANDOMLY PERMUTING")
if seed is not None:
print( "XVAL SETTING SEED = %d"%(seed) )
np.random.seed(seed)
x = np.random.permutation(n)
else:
print( "XVAL JUST IN ARANGE ORDER")
x = np.arange(n,dtype=int)
kf = KFold( K )
train = []
test = []
for train_ids, test_ids in kf.split( x ):
#train_ids = np.setdiff1d( x, test_ids )
train.append( x[train_ids] )
test.append( x[test_ids] )
#pdb.set_trace()
return train, test
def chunks(l, n):
#Yield successive n-sized chunks from l.
for i in xrange(0, len(l), n):
yield l[i:i + n]
def load_gmt( filename ):
with open(filename, 'r') as f:
pathway2genes = OrderedDict()
gene2pathways = OrderedDict()
for line in f.readlines():
splits = line.split("\t")
splits[-1] = splits[-1].rstrip("\n")
#pdb.set_trace()
if splits[0][:9] == "HALLMARK_":
pathway = splits[0][9:]
link = splits[1]
genes = splits[2:]
pathway2genes[ pathway ] = genes
for g in genes:
if gene2pathways.has_key( g ):
gene2pathways[g].append( pathway )
else:
gene2pathways[g] = [pathway]
return pathway2genes, gene2pathways
def load_yaml( filename ):
with open(filename, 'r') as f:
data = yaml.load(f)
return data
def check_and_mkdir( path_name, verbose = False ):
ok = False
if os.path.exists( path_name ) == True:
ok = True
else:
if verbose:
print "Making directory: ", path_name
os.makedirs( path_name )
ok = True
return ok
def ReadH5( fullpath ):
df = pd.read_hdf( fullpath )
return df
def OpenHdfStore(location, which_one, mode ):
store_name = "%s.h5"%(which_one)
check_and_mkdir( location )
full_name = os.path.join( location, store_name )
# I think we can just open in 'a' mode for both
if os.path.exists(full_name) is False:
print "OpenHdfStore: %s does NOT EXIST, opening in %s mode"%(full_name, mode)
return pd.HDFStore( full_name, mode )
else:
print "OpenHdfStore: %s does EXISTS, opening in %s mode"%(full_name, mode)
return pd.HDFStore( full_name, mode )
def CloseHdfStore(store):
return store.close()
# a generator for batch ids
class batch_ids_maker:
def __init__(self, batchsize, n, randomize = True):
self.batchsize = min( batchsize, n )
#assert n >= batchsize, "Right now must have batchsize < n"
self.randomize = randomize
#self.batchsize = batchsize
self.n = n
self.indices = self.new_indices()
self.start_idx = 0
def __iter__(self):
return self
def new_indices(self):
if self.randomize:
return np.random.permutation(self.n).astype(int)
else:
return np.arange(self.n,dtype=int)
def next(self, weights = None ):
if weights is not None:
return self.weighted_next( weights )
if self.start_idx+self.batchsize >= len(self.indices):
keep_ids = self.indices[self.start_idx:]
self.indices = np.hstack( (keep_ids, self.new_indices() ))
self.start_idx = 0
ids = self.indices[self.start_idx:self.start_idx+self.batchsize]
self.start_idx += self.batchsize
return ids
def weighted_next( self, weights ):
I = np.argsort( -weights )
ids = self.indices[ I[:self.batchsize] ]
return ids
|
mit
| 4,324,703,122,337,669,600 | 26.172662 | 81 | 0.619174 | false |
sonoble/fboss
|
fboss/agent/tools/fboss_route.py
|
1
|
6527
|
#!/usr/bin/env python
# Copyright (C) 2004-present Facebook. All Rights Reserved
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
"""Add, change, or delete a route on FBOSS controller
"""
import contextlib
import ipaddr
import pdb
from argparse import ArgumentParser, ArgumentError
from contextlib import contextmanager
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
from fboss.ctrl import FbossCtrl
from fboss.ctrl.ttypes import IpPrefix
from fboss.ctrl.ttypes import UnicastRoute
from facebook.network.Address.ttypes import BinaryAddress
DEFAULT_CLIENTID = 1
def parse_prefix(args):
network = ipaddr.IPNetwork(args.prefix)
return IpPrefix(ip=BinaryAddress(addr=network.ip.packed),
prefixLength=network.prefixlen)
def parse_nexthops(args):
return [BinaryAddress(addr=ipaddr.IPAddress(nh).packed)
for nh in args.nexthop]
def flush_routes(args):
with get_client(args) as client:
client.syncFib(args.client, [])
def add_route(args):
prefix = parse_prefix(args)
nexthops = parse_nexthops(args)
with get_client(args) as client:
client.addUnicastRoutes(
args.client, [UnicastRoute(dest=prefix, nextHopAddrs=nexthops)])
def del_route(args):
prefix = parse_prefix(args)
with get_client(args) as client:
client.deleteUnicastRoutes(args.client, [prefix])
def list_intf(args):
details = args.details
with get_client(args) as client:
#for intf in client.getInterfaceList():
for idx, intf in client.getAllInterfaces().iteritems():
print ("L3 Interface %d: %s" % (idx, str(intf)))
def list_routes(args):
details = args.details
with get_client(args) as client:
for route in client.getRouteTable():
print ("Route %s" % route)
def list_optics(args):
details = args.details
with get_client(args) as client:
for key,val in client.getTransceiverInfo(range(1,64)).iteritems():
print ("Optic %d: %s" % (key, str(val)))
def list_ports(args):
details = args.details
with get_client(args) as client:
#for intf in client.getInterfaceList():
for idx, intf in client.getPortStatus(range(1,64)).iteritems():
stats = client.getPortStats(idx) if details else ""
print ("Port %d: %s: %s" % (idx, str(intf), stats))
def list_arps(args):
details = args.details
with get_client(args) as client:
#for intf in client.getInterfaceList():
for arp in client.getArpTable():
print ("Arp: %s" % (str(arp)))
def list_vlans(args):
details = args.details
with get_client(args) as client:
#for intf in client.getInterfaceList():
vlans = dict()
for idx, intf in client.getAllInterfaces().iteritems():
vlans[intf.vlanId] = True
for vlan in vlans:
print("===== Vlan %d ==== " % vlan)
for address in client.getVlanAddresses(vlan):
print(address)
@contextlib.contextmanager
def get_client(args, timeout=5.0):
sock = TSocket.TSocket(args.host, args.port)
sock.setTimeout(timeout * 1000) # thrift timeout is in ms
protocol = TBinaryProtocol.TBinaryProtocol(sock)
transport = protocol.trans
transport.open()
client = FbossCtrl.Client(protocol)
yield client
transport.close()
if __name__ == '__main__':
ap = ArgumentParser()
ap.add_argument('--port', '-p', type=int, default=5909,
help='the controller thrift port')
ap.add_argument('--client', '-c', type=int, default=DEFAULT_CLIENTID,
help='the client ID used to manipulate the routes')
ap.add_argument('--host',
help='the controller hostname', default='localhost')
subparsers = ap.add_subparsers()
flush_parser = subparsers.add_parser(
'flush', help='flush all existing non-interface routes')
flush_parser.set_defaults(func=flush_routes)
add_parser = subparsers.add_parser(
'add', help='add a new route or change an existing route')
add_parser.set_defaults(func=add_route)
add_parser.add_argument(
'prefix', help='the route prefix, i.e. "1.1.1.0/24" or "2001::0/64"')
add_parser.add_argument(
'nexthop', nargs='+',
help='the nexthops of the route, i.e "10.1.1.1" or "2002::1"')
del_parser = subparsers.add_parser(
'delete', help='delete an existing route')
del_parser.set_defaults(func=del_route)
del_parser.add_argument(
'prefix', help='The route prefix, i.e. "1.1.1.0/24" or "2001::0/64"')
list_parser = subparsers.add_parser(
'list_intf', help='list switch interfaces')
list_parser.set_defaults(func=list_intf)
list_parser.add_argument(
'--details', action='store_true',
help='List all information about the interface', default=False)
list_route_parser = subparsers.add_parser(
'list_routes', help='list switch routes')
list_route_parser.set_defaults(func=list_routes)
list_route_parser.add_argument(
'--details', action='store_true',
help='List all information about the routes', default=False)
list_optic_parser = subparsers.add_parser(
'list_optics', help='list switch optics')
list_optic_parser.set_defaults(func=list_optics)
list_optic_parser.add_argument(
'--details', action='store_true',
help='List all information about the optics', default=False)
list_port_parser = subparsers.add_parser(
'list_ports', help='list switch ports')
list_port_parser.set_defaults(func=list_ports)
list_port_parser.add_argument(
'--details', action='store_true',
help='List all information about the ports', default=False)
list_vlan_parser = subparsers.add_parser(
'list_vlans', help='list switch vlans')
list_vlan_parser.set_defaults(func=list_vlans)
list_vlan_parser.add_argument(
'--details', action='store_true',
help='List all information about the vlans', default=False)
list_arp_parser = subparsers.add_parser(
'list_arps', help='list switch arps')
list_arp_parser.set_defaults(func=list_arps)
list_arp_parser.add_argument(
'--details', action='store_true',
help='List all information about the arps', default=False)
args = ap.parse_args()
args.func(args)
|
bsd-3-clause
| -2,916,868,972,458,586,000 | 33.718085 | 77 | 0.655278 | false |
custom22/google-python-exercises
|
basic/string1.py
|
1
|
3771
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
# +++your code here+++
if count >= 10:
donuts = 'Number of donuts: many'
else:
donuts = 'Number of donuts: %d' % count
return donuts
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
# +++your code here+++
if len(s) < 2:
both_ends = ''
else:
both_ends = s[0:2] + s[len(s)-2:]
return both_ends
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
# +++your code here+++
first_s = s[0:1]
s = s.replace(s[0:1], '*')
s = first_s + s[1:]
return s
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
# +++your code here+++
a_first = a[0:2]
b_first = b[0:2]
a = b_first + a[2:]
b = a_first + b[2:]
return a + ' ' + b
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
|
apache-2.0
| -5,765,066,975,706,078,000 | 28.692913 | 78 | 0.654733 | false |
drpaneas/linuxed.gr
|
plugins/simple_footnotes.py
|
1
|
4382
|
from pelican import signals
import re
import html5lib
RAW_FOOTNOTE_CONTAINERS = ["code"]
def getText(node, recursive = False):
"""Get all the text associated with this node.
With recursive == True, all text from child nodes is retrieved."""
L = ['']
for n in node.childNodes:
if n.nodeType in (node.TEXT_NODE, node.CDATA_SECTION_NODE):
L.append(n.data)
else:
if not recursive:
return None
L.append(getText(n) )
return ''.join(L)
def parse_for_footnotes(article_generator):
for article in article_generator.articles:
if "[ref]" in article._content and "[/ref]" in article._content:
article._content += "<hr><h4>Footnotes</h4>"
content = article._content.replace("[ref]", "<x-simple-footnote>").replace("[/ref]", "</x-simple-footnote>")
parser = html5lib.HTMLParser(tree=html5lib.getTreeBuilder("dom"))
dom = parser.parse(content)
endnotes = []
count = 0
for footnote in dom.getElementsByTagName("x-simple-footnote"):
pn = footnote
leavealone = False
while pn:
if pn.nodeName in RAW_FOOTNOTE_CONTAINERS:
leavealone = True
break
pn = pn.parentNode
if leavealone:
continue
count += 1
fnid = "sf-%s-%s" % (article.slug, count)
fnbackid = "%s-back" % (fnid,)
endnotes.append((footnote, fnid, fnbackid))
number = dom.createElement("sup")
number.setAttribute("id", fnbackid)
numbera = dom.createElement("a")
numbera.setAttribute("href", "#%s" % fnid)
numbera.setAttribute("class", "simple-footnote")
numbera.appendChild(dom.createTextNode(str(count)))
txt = getText(footnote, recursive=True).replace("\n", " ")
numbera.setAttribute("title", txt)
number.appendChild(numbera)
footnote.parentNode.insertBefore(number, footnote)
if endnotes:
ol = dom.createElement("ol")
ol.setAttribute("class", "simple-footnotes")
for e, fnid, fnbackid in endnotes:
li = dom.createElement("li")
li.setAttribute("id", fnid)
while e.firstChild:
li.appendChild(e.firstChild)
backlink = dom.createElement("a")
backlink.setAttribute("href", "#%s" % fnbackid)
backlink.setAttribute("class", "simple-footnote-back")
backlink.appendChild(dom.createTextNode(u'\u21a9'))
li.appendChild(dom.createTextNode(" "))
li.appendChild(backlink)
ol.appendChild(li)
e.parentNode.removeChild(e)
dom.getElementsByTagName("body")[0].appendChild(ol)
s = html5lib.serializer.htmlserializer.HTMLSerializer(omit_optional_tags=False, quote_attr_values=True)
output_generator = s.serialize(html5lib.treewalkers.getTreeWalker("dom")(dom.getElementsByTagName("body")[0]))
article._content = "".join(list(output_generator)).replace(
"<x-simple-footnote>", "[ref]").replace("</x-simple-footnote>", "[/ref]").replace(
"<body>", "").replace("</body>", "")
if False:
count = 0
endnotes = []
for f in footnotes:
count += 1
fnstr = '<a class="simple-footnote" name="%s-%s-back" href="#%s-%s"><sup>%s</a>' % (
article.slug, count, article.slug, count, count)
endstr = '<li id="%s-%s">%s <a href="#%s-%s-back">↑</a></li>' % (
article.slug, count, f[len("[ref]"):-len("[/ref]")], article.slug, count)
content = content.replace(f, fnstr)
endnotes.append(endstr)
content += '<h4>Footnotes</h4><ol class="simple-footnotes">%s</ul>' % ("\n".join(endnotes),)
article._content = content
def register():
signals.article_generator_finalized.connect(parse_for_footnotes)
|
mit
| -472,941,587,506,048,000 | 47.164835 | 126 | 0.527385 | false |
piratos/ctfbulletin
|
ctfbulletin/settings.py
|
1
|
2533
|
"""
Django settings for ctfbulletin project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
'challenges.views.get_challenger',
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b6&ks5aoq^b$b*uwz_ryc!^f9kgu&#%(po%p)%ef3_)n!u7ppt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
LOGIN_URL = '/challenges/login/'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'suit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'talk',
'blog',
'challenges',
'ctf',
'django_countries',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ctfbulletin.urls'
WSGI_APPLICATION = 'ctfbulletin.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {'ENGINE': 'django.db.backends.mysql',
'NAME': 'ctfbulletin',
'USER': 'root',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# DIRS
STATIC_URL = '/static/'
STATIC_PATH = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
STATIC_PATH,
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates/'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
|
mit
| 1,901,026,166,474,514,200 | 23.365385 | 74 | 0.684564 | false |
Mageswaran1989/aja
|
src/test/python/examples/naive_bayes/naive_bayes.py
|
1
|
4954
|
from numpy import *
def loadDataSet():
postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0,1,0,1,0,1] #1 is abusive, 0 not
return postingList,classVec
def createVocabList(dataSet):
vocabSet = set([]) #create empty set
for document in dataSet:
vocabSet = vocabSet | set(document) #union of the two sets
return list(vocabSet)
def setOfWords2Vec(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else: print "the word: %s is not in my Vocabulary!" % word
return returnVec
def trainNB0(trainMatrix,trainCategory):
numTrainDocs = len(trainMatrix)
numWords = len(trainMatrix[0])
pAbusive = sum(trainCategory)/float(numTrainDocs)
p0Num = ones(numWords); p1Num = ones(numWords) #change to ones()
p0Denom = 2.0; p1Denom = 2.0 #change to 2.0
for i in range(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
p1Vect = log(p1Num/p1Denom) #change to log()
p0Vect = log(p0Num/p0Denom) #change to log()
return p0Vect,p1Vect,pAbusive
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
p1 = sum(vec2Classify * p1Vec) + log(pClass1) #element-wise mult
p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)
if p1 > p0:
return 1
else:
return 0
def testingNB():
listOPosts,listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
trainMat=[]
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses))
testEntry = ['love', 'my', 'dalmation']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)
testEntry = ['stupid', 'garbage']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)
def bagOfWords2VecMN(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] += 1
return returnVec
def textParse(bigString): #input is big string, #output is word list
import re
listOfTokens = re.split(r'\W*', bigString)
return [tok.lower() for tok in listOfTokens if len(tok) > 2]
def spamTest():
docList=[]; classList = []; fullText =[]
for i in range(1,26):
wordList = textParse(open('../../../data/email/spam/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
wordList = textParse(open('../../../data/email/ham/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList = createVocabList(docList)#create vocabulary
trainingSet = range(50); testSet=[] #create test set
for i in range(10):
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat=[]; trainClasses = []
for docIndex in trainingSet:#train the classifier (get probs) trainNB0
trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))
errorCount = 0
for docIndex in testSet: #classify the remaining items
wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
errorCount += 1
print "classification error",docList[docIndex]
print 'the error rate is: ',float(errorCount)/len(testSet)
#return vocabList,fullText
'''
import naive_bayes
listOPosts,listClasses = naive_bayes.loadDataSet()
myVocabList = naive_bayes.createVocabList(listOPosts)
myVocabList
naive_bayes.setOfWords2Vec(myVocabList, listOPosts[0])
naive_bayes.setOfWords2Vec(myVocabList, listOPosts[3])
trainMat=[]
for postinDoc in listOPosts:
trainMat.append(naive_bayes.setOfWords2Vec(myVocabList, postinDoc))
p0V,p1V,pAb=naive_bayes.trainNB0(trainMat,listClasses)
naive_bayes.testingNB()
'''
|
apache-2.0
| 3,253,726,386,395,607,000 | 38.007874 | 83 | 0.637061 | false |
jonzobrist/Percona-Server-5.1
|
kewpie/drizzle_tests/randgen_slavePlugin/multiThread3_test.py
|
1
|
2002
|
#! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import unittest
import subprocess
import os
from lib.util.randgen_methods import execute_randgen
server_requirements = [['--innodb.replication-log'],['--plugin-add=slave --slave.config-file=$MASTER_SERVER_SLAVE_CONFIG']]
servers = []
server_manager = None
test_executor = None
class multiThread3Test(unittest.TestCase):
#def setUp(self):
# """ If we need to do anything pre-test, we do it here.
# Any code here is executed before any test method we
# may execute
#
# """
# return
def test_multiThread3(self):
test_cmd = "./gentest.pl --gendata=conf/drizzle/translog_drizzle.zz --grammar=conf/drizzle/translog_concurrent3.yy --Reporter=DrizzleSlavePlugin --queries=75 --threads=3"
retcode, output = execute_randgen(test_cmd, test_executor, servers)
self.assertTrue(retcode==0, output)
def tearDown(self):
server_manager.reset_servers(test_executor.name)
def run_test(output_file):
suite = unittest.TestLoader().loadTestsFromTestCase(multiThread3Test)
return unittest.TextTestRunner(stream=output_file, verbosity=2).run(suite)
|
bsd-3-clause
| 5,068,747,423,588,182,000 | 34.122807 | 179 | 0.718282 | false |
aerospike/aerospike-client-python
|
test/new_tests/test_data.py
|
1
|
4069
|
import pytest
try:
import cPickle as pickle
except:
import pickle
class SomeClass(object):
pass
pos_data = [
(('test', 'demo', 1), {'age': 1, 'name': 'name1'}),
(('test', 'demo', 2), {'age': 2, 'name': 'Mr John', 'bmi': 3.55}),
(('test', 'demo', 'boolean_key'), {'is_present': True}),
(('test', 'demo', 'string'), {'place': "New York", 'name': 'John'}),
(('test', 'demo', u"bb"), {'a': [u'aa', 2, u'aa', 4, u'cc', 3, 2, 1]}),
(('test', u'demo', 1), {'age': 1, 'name': 'name1'}),
(('test', 'demo', 1), {"is_present": None}),
(('test', 'unknown_set', 1), {
'a': {'k': [bytearray("askluy3oijs", "utf-8")]}}),
# Bytearray
(("test", "demo", bytearray(
"asd;as[d'as;d", "utf-8")), {"name": "John"}),
(('test', 'demo', 'bytes_key'), {'bytes': bytearray('John', 'utf-8')}),
# List Data
(('test', 'demo', 'list_key'), {'names': ['John', 'Marlen', 'Steve']}),
(('test', 'demo', 'list_key'), {'names': [1, 2, 3, 4, 5]}),
(('test', 'demo', 'list_key'), {
'names': [1.5, 2.565, 3.676, 4, 5.89]}),
(('test', 'demo', 'list_key'), {'names': ['John', 'Marlen', 1024]}),
(('test', 'demo', 'list_key_unicode'), {
'a': [u'aa', u'bb', 1, u'bb', u'aa']}),
(('test', 'demo', 'objects'), {'objects': [
pickle.dumps(SomeClass()), pickle.dumps(SomeClass())]}),
# Map Data
(('test', 'demo', 'map_key'), {'names': {'name': 'John', 'age': 24}}),
(('test', 'demo', 'map_key_float'), {
"double_map": {"1": 3.141, "2": 4.123, "3": 6.285}}),
(('test', 'demo', 'map_key_unicode'), {
'a': {u'aa': u'11'}, 'b': {u'bb': u'22'}}),
# (('test', 'demo', 1),
# {'odict': OrderedDict(sorted({'banana': 3, 'apple': 4, 'pear': 1, 'orange': 2}.items(),
# key=lambda t: t[0]))}),
# Tuple Data
(('test', 'demo', 'tuple_key'), {'tuple_seq': tuple('abc')}),
# Set Data
(('test', 'demo', 'set_key'), {"set_data": set([1, 2])}),
(('test', 'demo', 'fset_key'), {
"fset_data": frozenset(["Frankfurt", "Basel", "Freiburg"])}),
# Hybrid
(('test', 'demo', 'multiple_bins'), {
'i': ["nanslkdl", 1, bytearray("asd;as[d'as;d", "utf-8")],
's': {"key": "asd';q;'1';"},
'b': 1234,
'l': '!@#@#$QSDAsd;as'
}),
(('test', 'demo', 'list_map_key'), {
'names': ['John', 'Marlen', 'Steve'],
'names_and_age': [{'name': 'John',
'age': 24}, {'name': 'Marlen',
'age': 25}]
}),
(('test', 'demo', 'map_tuple_key'), {
'seq': {'bb': tuple('abc')}
}),
]
key_neg = [
((None, 'demo', 1), -2, "namespace must be a string"),
((12.34, 'demo', 1), -2, "namespace must be a string"),
((35, 'demo', 1), -2, "namespace must be a string"),
(([], 'demo', 1), -2, "namespace must be a string"),
(({}, 'demo', 1), -2, "namespace must be a string"),
(((), 'demo', 1), -2, "namespace must be a string"),
(None, -2, 'key is invalid'),
(['test', 'demo', 'key_as_list'], -2, "key is invalid"),
(('test', 123, 1), -2, "set must be a string"),
(('test', 12.36, 1), -2, "set must be a string"),
(('test', [], 1), -2, "set must be a string"),
(('test', {}, 1), -2, "set must be a string"),
(('test', (), 1), -2, "set must be a string"),
(('test', 'demo', None),
-2, 'either key or digest is required'),
(('test', 'demo'),
-2, 'key tuple must be (Namespace, Set, Key) or (Namespace, Set, None, Digest)'),
]
|
apache-2.0
| 4,281,508,143,284,940,000 | 43.228261 | 112 | 0.390759 | false |
davcastroruiz/boston-django-project
|
webSite/repo/migrations/0002_auto_20170224_1612.py
|
1
|
1210
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-24 22:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('repo', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Site',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('site_title', models.CharField(max_length=250)),
('site_url', models.CharField(max_length=1000)),
],
),
migrations.RenameField(
model_name='url',
old_name='url_name',
new_name='url',
),
migrations.RemoveField(
model_name='url',
name='type',
),
migrations.DeleteModel(
name='Type',
),
migrations.AddField(
model_name='url',
name='site',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='repo.Site'),
preserve_default=False,
),
]
|
gpl-3.0
| -8,792,082,569,611,806,000 | 27.809524 | 114 | 0.538017 | false |
nerdvegas/rez
|
src/rez/tests/test_package_cache.py
|
1
|
5704
|
"""
Test package caching.
"""
from rez.tests.util import TestBase, TempdirMixin, restore_os_environ, \
install_dependent
from rez.packages import get_package
from rez.package_cache import PackageCache
from rez.resolved_context import ResolvedContext
from rez.exceptions import PackageCacheError
from rez.utils.filesystem import canonical_path
import os
import os.path
import time
import subprocess
class TestPackageCache(TestBase, TempdirMixin):
@classmethod
def setUpClass(cls):
TempdirMixin.setUpClass()
cls.py_packages_path = canonical_path(cls.data_path("packages", "py_packages"))
cls.solver_packages_path = canonical_path(cls.data_path("solver", "packages"))
cls.package_cache_path = os.path.join(cls.root, "package_cache")
os.mkdir(cls.package_cache_path)
cls.settings = dict(
packages_path=[cls.py_packages_path, cls.solver_packages_path],
cache_packages_path=cls.package_cache_path,
default_cachable=True,
# ensure test packages will get cached
package_cache_same_device=True,
default_cachable_per_repository={
cls.solver_packages_path: False
},
default_cachable_per_package={
"late_binding": False
}
)
@classmethod
def tearDownClass(cls):
TempdirMixin.tearDownClass()
def _pkgcache(self):
return PackageCache(self.package_cache_path)
def test_cache_variant(self):
"""Test direct caching of a cachable variant."""
pkgcache = self._pkgcache()
package = get_package("versioned", "3.0")
variant = next(package.iter_variants())
_, status = pkgcache.add_variant(variant)
self.assertEqual(status, PackageCache.VARIANT_CREATED)
# adding again should indicate the variant is already cached
_, status = pkgcache.add_variant(variant)
self.assertEqual(status, PackageCache.VARIANT_FOUND)
def test_delete_cached_variant(self):
"""Test variant deletion from cache."""
pkgcache = self._pkgcache()
package = get_package("versioned", "3.0")
variant = next(package.iter_variants())
pkgcache.add_variant(variant)
result = pkgcache.remove_variant(variant)
self.assertEqual(result, PackageCache.VARIANT_REMOVED)
# another deletion should say not found
result = pkgcache.remove_variant(variant)
self.assertEqual(result, PackageCache.VARIANT_NOT_FOUND)
def test_cache_fail_uncachable_variant(self):
"""Test that caching of an uncachable variant fails."""
pkgcache = self._pkgcache()
package = get_package("timestamped", "1.1.1")
variant = next(package.iter_variants())
with self.assertRaises(PackageCacheError):
pkgcache.add_variant(variant)
def test_cache_fail_no_variant_payload(self):
"""Test that adding a variant with no disk payload, fails."""
pkgcache = self._pkgcache()
package = get_package("variants_py", "2.0")
variant = next(package.iter_variants())
with self.assertRaises(PackageCacheError):
pkgcache.add_variant(variant)
def test_cache_fail_per_repo(self):
"""Test that caching fails on a package from a repo set to non-cachable."""
pkgcache = self._pkgcache()
package = get_package("pyfoo", "3.1.0")
variant = next(package.iter_variants())
with self.assertRaises(PackageCacheError):
pkgcache.add_variant(variant)
def test_cache_fail_per_package(self):
"""Test that caching fails on a package with a blacklisted name."""
pkgcache = self._pkgcache()
package = get_package("late_binding", "1.0")
variant = next(package.iter_variants())
with self.assertRaises(PackageCacheError):
pkgcache.add_variant(variant)
@install_dependent()
def test_caching_on_resolve(self):
"""Test that cache is updated as expected on resolved env."""
pkgcache = self._pkgcache()
with restore_os_environ():
# set config settings into env so rez-pkg-cache proc sees them
os.environ.update(self.get_settings_env())
# Creating the context will asynchronously add variants to the cache
# in a separate proc.
#
c = ResolvedContext([
"timestamped-1.2.0",
"pyfoo-3.1.0" # won't cache, see earlier test
])
variant = c.get_resolved_package("timestamped")
# Retry 50 times with 0.1 sec interval, 5 secs is more than enough for
# the very small variant to be copied to cache.
#
cached_root = None
for _ in range(50):
time.sleep(0.1)
cached_root = pkgcache.get_cached_root(variant)
if cached_root:
break
self.assertNotEqual(cached_root, None)
expected_payload_file = os.path.join(cached_root, "stuff.txt")
self.assertTrue(os.path.exists(expected_payload_file))
# check that refs to root point to cache location in rex code
for ref in ("resolve.timestamped.root", "'{resolve.timestamped.root}'"):
proc = c.execute_rex_code(
code="info(%s)" % ref,
stdout=subprocess.PIPE
)
out, _ = proc.communicate()
root = out.strip()
self.assertEqual(
root, cached_root,
"Reference %r should resolve to %s, but resolves to %s"
% (ref, cached_root, root)
)
|
lgpl-3.0
| 8,163,004,261,610,059,000 | 32.552941 | 87 | 0.617637 | false |
justinbois/fish-activity
|
tests/test_parse.py
|
1
|
8083
|
import pytest
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
import fishact
def test_sniffer():
n_header, delimiter, line = fishact.parse._sniff_file_info(
'tests/single_gtype.txt')
assert n_header == 2
assert delimiter is None
assert line == '1\n'
n_header, delimiter, line = fishact.parse._sniff_file_info(
'tests/multiple_gtype.txt')
assert n_header == 2
assert delimiter == '\t'
assert line == '1\t5\t2\n'
def test_gtype_loader():
df = fishact.parse.load_gtype('tests/single_gtype.txt', quiet=True,
rstrip=True)
assert all(df.columns == ['genotype', 'location'])
assert all(df['genotype'] == 'all fish')
assert all(df['location'] == [1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 86, 87, 88, 89, 90, 91, 92, 93, 94, 96])
def test_resample_array():
x = np.arange(10, dtype=float)
assert np.isclose(fishact.parse._resample_array(x, 10),
np.array([45.])).all()
assert np.isclose(fishact.parse._resample_array(x, 20),
np.array([90.])).all()
assert np.isclose(fishact.parse._resample_array(x, 5),
np.array([10., 35.])).all()
assert np.isclose(fishact.parse._resample_array(x, 3),
np.array([3., 12., 21., 27.])).all()
x[-3] = np.nan
assert np.isclose(fishact.parse._resample_array(x, 5)[0], 10.0) \
and np.isnan(fishact.parse._resample_array(x, 5)[1])
def test_resample_segment():
df = pd.DataFrame({'a': np.arange(10),
'b': np.arange(10, 20),
'c': np.arange(10, dtype=float),
'd': np.arange(10, 20, dtype=float)})
re_df = fishact.parse._resample_segment(df, 5, ['c'])
re_df = re_df.reindex_axis(sorted(re_df.columns), axis=1)
correct_df = pd.DataFrame({'a': [0, 5],
'b': [10, 15],
'c': [10., 35.],
'd': [10., 15.]})
assert_frame_equal(re_df, correct_df)
re_df = fishact.parse._resample_segment(df, 5, ['c', 'd'])
re_df = re_df.reindex_axis(sorted(re_df.columns), axis=1)
correct_df = pd.DataFrame({'a': [0, 5],
'b': [10, 15],
'c': [10., 35.],
'd': [60., 85.]})
assert_frame_equal(re_df, correct_df)
re_df = fishact.parse._resample_segment(df, 3, ['c'])
re_df = re_df.reindex_axis(sorted(re_df.columns), axis=1)
correct_df = pd.DataFrame({'a': [0, 3, 6, 9],
'b': [10, 13, 16, 19],
'c': [3., 12., 21., 27.],
'd': [10., 13., 16., 19.]})
assert_frame_equal(re_df, correct_df)
re_df = fishact.parse._resample_segment(df, 3, ['c', 'd'])
re_df = re_df.reindex_axis(sorted(re_df.columns), axis=1)
correct_df = pd.DataFrame({'a': [0, 3, 6, 9],
'b': [10, 13, 16, 19],
'c': [3., 12., 21., 27.],
'd': [33., 42., 51., 57.]})
assert_frame_equal(re_df, correct_df)
def test_resample():
df = pd.DataFrame(
{'location': np.concatenate((np.ones(10), 2*np.ones(10))).astype(int),
'exp_time': np.concatenate((np.arange(10),
np.arange(10))).astype(float),
'exp_ind': np.concatenate((np.arange(10), np.arange(10))).astype(int),
'zeit': np.concatenate((np.arange(10),
np.arange(10))).astype(float),
'zeit_ind': np.concatenate((np.arange(10),
np.arange(10))).astype(int),
'activity': np.concatenate((np.arange(10),
np.arange(10, 20))).astype(float),
'sleep': np.ones(20, dtype=float),
'light': [True]*5 + [False]*5 + [True]*5 + [False]*5,
'day': [5]*10 + [6]*10,
'genotype': ['wt']*20,
'acquisition': np.ones(20, dtype=int),
'instrument': np.ones(20, dtype=int),
'trial': np.ones(20, dtype=int),
'time': pd.to_datetime(['2017-03-30 14:00:00',
'2017-03-30 14:01:00',
'2017-03-30 14:02:00',
'2017-03-30 14:03:00',
'2017-03-30 14:04:00',
'2017-03-30 14:05:00',
'2017-03-30 14:06:00',
'2017-03-30 14:07:00',
'2017-03-30 14:08:00',
'2017-03-30 14:09:00']*2)})
re_df = fishact.parse.resample(df, 5, signal=['activity', 'sleep'],
quiet=True)
re_df = re_df.reindex_axis(sorted(re_df.columns), axis=1)
correct_df = pd.DataFrame(
{'activity': np.array([10., 35., 60., 85.]),
'day': [5, 5, 6, 6],
'location': np.array([1, 1, 2, 2], dtype=int),
'genotype': ['wt']*4,
'light': [True, False, True, False],
'sleep': np.array([5., 5., 5., 5.]),
'time': pd.to_datetime(['2017-03-30 14:00:00',
'2017-03-30 14:05:00']*2),
'exp_time': np.array([0., 5., 0., 5.]),
'exp_ind': np.array([0, 5, 0, 5], dtype=int),
'zeit': np.array([0., 5., 0., 5.]),
'zeit_ind': np.array([0, 5, 0, 5], dtype=int),
'acquisition': np.ones(4, dtype=int),
'instrument': np.ones(4, dtype=int),
'trial': np.ones(4, dtype=int)})
assert_frame_equal(re_df, correct_df)
re_df = fishact.parse.resample(df, 3, quiet=True)
re_df = re_df.reindex_axis(sorted(re_df.columns), axis=1)
correct_df = pd.DataFrame(
{'activity': np.array([3., 10.5, 18., 25.5, 33., 40.5, 48., 55.5]),
'day': [5, 5, 5, 5, 6, 6, 6, 6],
'location': np.array([1, 1, 1, 1, 2, 2, 2, 2], dtype=int),
'genotype': ['wt']*8,
'light': [True, True, False, False, True, True, False, False],
'sleep': np.array([3., 3., 3., 3., 3., 3., 3., 3.]),
'time': pd.to_datetime(['2017-03-30 14:00:00',
'2017-03-30 14:03:00',
'2017-03-30 14:05:00',
'2017-03-30 14:08:00']*2),
'exp_time': np.array([0., 3., 5., 8., 0., 3., 5., 8.]),
'exp_ind': np.array([0, 3, 5, 8, 0, 3, 5, 8], dtype=int),
'zeit': np.array([0., 3., 5., 8., 0., 3., 5., 8.]),
'zeit_ind': np.array([0, 3, 5, 8, 0, 3, 5, 8], dtype=int),
'acquisition': np.ones(8, dtype=int),
'instrument': np.ones(8, dtype=int),
'trial': np.ones(8, dtype=int)})
assert_frame_equal(re_df, correct_df)
def test_tidy_data():
# Test that it will not overwrite existing file
with pytest.raises(RuntimeError) as excinfo:
fishact.parse.tidy_data('test.csv', 'test_geno.txt', 'test.csv')
excinfo.match("Cowardly refusing to overwrite input file.")
with pytest.raises(RuntimeError) as excinfo:
fishact.parse.tidy_data('test.csv', 'test_geno.txt', 'test_geno.txt')
excinfo.match("Cowardly refusing to overwrite input file.")
with pytest.raises(RuntimeError) as excinfo:
fishact.parse.tidy_data('test.csv', 'test_geno.txt',
'tests/empty_file_for_tests.csv')
excinfo.match("tests/empty_file_for_tests.csv already exists, cowardly refusing to overwrite.")
## TO DO: integration test: make sure output CSV is as expected.
|
mit
| -4,720,349,903,391,392,000 | 45.454023 | 382 | 0.477917 | false |
slivkamiro/sample_kafka_producer
|
src/sample_source_props.py
|
1
|
7278
|
from random import randint
def random_movie():
return movies_list[randint(0, movies_list_length - 1)]
def random_series():
return seasons_list[randint(0, seasons_list_length - 1)]
def random_user():
user_data = user_list[randint(0, user_list_length - 1)]
return {'email': user_data[0], 'name': user_data[randint(1, len(user_data) - 1)]}
def random_tags():
random_indices = [randint(0, tag_list_length - 1) for i in range(0, tag_list_length/2)]
tags = [tag_list[i] for i in random_indices]
return ','.join(set(tags))
def random_sentence():
return reviews_list[randint(0, reviews_list_lenght - 1)]
reviews_list = [
"The old-fashioned voice revitalizes the time.",
"The approval relates the driving.",
"The trade stimulates the mother.",
"The destruction interacts the swim.",
"The paste operates the fight.",
"The reward treats the good substance.",
"The slimy control boughts the mountain.",
"The start records the woman.",
"The rice promotes the use.",
"The balance simplifys the weather.",
"The fiction fosters the loss.",
"The tendency solds the mountain.",
"The vessel contributes the idea.",
"The grain drews the furtive part.",
"The smell converts the experience.",
"The thing briefs the funny vessel.",
"The society qualifys the front.",
"The naive question furthers the rate.",
"The direction zero ins the whimsical change.",
"The ignorant machine debates the nation.",
"The meat reorganizes the doubt.",
"The effect correlates the wax.",
"The muddled manager engineers the ornament.",
"The view litigates the greasy disgust.",
"The attraction checks the wild burst.",
"The rain forecasts the gorgeous smell.",
"The pain facilitates the smoke.",
"The division consolidates the free mountain.",
"The sign adapts the mass.",
"The giant amount formulates the paper.",
"The amusing person recreates the substance.",
"The mist xeroxs the square tendency."
]
reviews_list_lenght = len(reviews_list)
tag_list = [
"cool",
"bad",
"boring",
"sad",
"funny",
"horror",
"cartoon",
"great"
]
tag_list_length = len(tag_list)
user_list = [
["gozer@sbcglobal.net", "cool guy", "Shorty"],
["cliffordj@me.com", "foo"],
["carroll@optonline.net", "CASTBOUND", "cheesy doo doop"],
["bflong@sbcglobal.net", "Ducky", "Punker Chick"],
["keiji@aol.com", "LX Size: Big guys", "mistalee"],
["miami@outlook.com", "The Unpredictable"],
["kingma@yahoo.com","Meat Duck", "ladyshrew"],
["carroll@aol.com", "Evil Philanthropist"],
["epeeist@optonline.net", "Flaming Spaz", "BaBy_BluE"],
["jemarch@msn.com", "Crimson Fart"],
["sfoskett@msn.com", "the prosexionist", "Chick-Fool-a"],
["flakeg@yahoo.ca", "motor_angel"],
["sinkou@icloud.com", "Popoff", "DeDe Al"],
["grady@comcast.net", "angle_fire"],
["milton@gmail.com", "infected mushroom", "toxic alien"],
["dexter@live.com", "hallucinogen"],
["rmcfarla@msn.com", "Dan-E DAngerously", "Cracker Chick"],
["kaiser@yahoo.com", "Phat T"],
["slaff@me.com", "swamp donkey", "Trouble Chick"],
["dwsauder@att.net", "cheese"]
]
user_list_length = len(user_list)
seasons_list = [
["The Handmaid's Tale", 1],
["Bloodline", 3],
["Fargo", 3],
["House Of Cards", 5],
["Twin Peaks", 3],
["Master Of None", 2],
["American Gods", 1],
["The Leftovers", 3],
["Better Call Saul", 3],
["Downward Dog", 1],
["13 Reasons Why", 1],
["Riverdale", 1],
["Dear White People", 1],
["Kingdom", 3],
["IZombie", 3],
["The Keepers", 1]
]
seasons_list_length = len(seasons_list)
movies_list = [
["The Wizard of Oz","1939"],
["Citizen Kane","1941"],
["The Third Man","1949"],
["Get Out","2017"],
["Mad Max: Fury Road","2015"],
["The Cabinet of Dr. Caligari (Das Cabinet des Dr. Caligari)","1920"],
["All About Eve","1950"],
["Inside Out","2015"],
["The Godfather","1972"],
["Metropolis","1927"],
["E.T. The Extra-Terrestrial","1982"],
["Modern Times","1936"],
["It Happened One Night","1934"],
["Singin' in the Rain","1952"],
["Casablanca","1942"],
["Boyhood","2014"],
["Laura","1944"],
["Nosferatu, a Symphony of Horror (Nosferatu, eine Symphonie des Grauens) (Nosferatu the Vampire)","1922"],
["Snow White and the Seven Dwarfs","1937"],
["Moonlight","2016"],
["A Hard Day's Night","1964"],
["The Battle of Algiers (La Battaglia di Algeri)","1967"],
["La Grande illusion (Grand Illusion)","1938"],
["North by Northwest","1959"],
["The Maltese Falcon","1941"],
["Repulsion","1965"],
["12 Years a Slave","2013"],
["Gravity","2013"],
["Sunset Boulevard","1950"],
["King Kong","1933"],
["The Adventures of Robin Hood","1938"],
["Rear Window","1954"],
["Rashomon","1951"],
["Psycho","1960"],
["Taxi Driver","1976"],
["Spotlight","2015"],
["Selma","2015"],
["Toy Story 3","2010"],
["Argo","2012"],
["Toy Story 2","1999"],
["The Bride of Frankenstein","1935"],
["M","1931"],
["Logan","2017"],
["Zootopia","2016"],
["The Philadelphia Story","1940"],
["Alien","1979"],
["Seven Samurai (Shichinin no Samurai)","1956"],
["The Treasure of the Sierra Madre","1948"],
["Up","2009"],
["All Quiet on the Western Front","1930"],
["The 400 Blows (Les Quatre cents coups)","1959"],
["Bicycle Thieves (Ladri di biciclette)","1949"],
["12 Angry Men (Twelve Angry Men)","1957"],
["Army of Shadows (L'Armee des ombres)","1969"],
["A Streetcar Named Desire","1951"],
["The Night of the Hunter","1955"],
["Dr. Strangelove Or How I Learned to Stop Worrying and Love the Bomb","1964"],
["Rebecca","1940"],
["Vertigo","1958"],
["Rosemary's Baby","1968"],
["Frankenstein","1931"],
["The Conformist","1970"],
["The Dark Knight","2008"],
["La La Land","2016"],
["Touch of Evil","1958"],
["Star Wars: Episode VII - The Force Awakens","2015"],
["Finding Nemo","2003"],
["Manchester by the Sea","2016"],
["The Wrestler","2008"],
["The Babadook","2014"],
["L.A. Confidential","1997"],
["The Good, the Bad and the Ugly","1966"],
["The 39 Steps","1935"],
["The Hurt Locker","2009"],
["Skyfall","2012"],
["Gone With the Wind","1939"],
["Pinocchio","1940"],
["Brooklyn","2015"],
["Open City","1946"],
["Tokyo Story (Tokyo monogatari)","1953"],
["Hell or High Water","2016"],
["High Noon","1952"],
["Star Trek","2009"],
["The Last Picture Show","1971"],
["The Jungle Book","2016"],
["Wonder Woman","2017"],
["The Grapes of Wrath","1940"],
["The Wages of Fear","1953"],
["Roman Holiday","1953"],
["On the Waterfront","1954"],
["Harry Potter and the Deathly Hallows - Part 2","2011"],
["It Follows","2015"],
["The Godfather, Part II","1974"],
["Man on Wire","2008"],
["Toy Story","1995"],
["Jaws","1975"],
["Battleship Potemkin","1925"],
["Anatomy of a Murder","1959"],
["Arrival","2016"],
["Lawrence of Arabia", "1962"]
]
movies_list_length = len(movies_list)
|
apache-2.0
| 8,245,845,420,224,092,000 | 31.783784 | 111 | 0.590409 | false |
adfernandes/mbed
|
tools/psa/tfm/bin_utils/imgtool/keys/rsa_test.py
|
5
|
4542
|
# Original code taken from mcuboot project at:
# https://github.com/mcu-tools/mcuboot
# Git SHA of the original version: a8e12dae381080e898cea0c6f7408009b0163f9f
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for RSA keys
"""
import io
import os
import sys
import tempfile
import unittest
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.primitives.asymmetric.padding import PSS, MGF1
from cryptography.hazmat.primitives.hashes import SHA256
# Setup sys path so 'imgtool' is in it.
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
'../..')))
from imgtool.keys import load, RSA, RSAUsageError
from imgtool.keys.rsa import RSA_KEY_SIZES
class KeyGeneration(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.TemporaryDirectory()
def tname(self, base):
return os.path.join(self.test_dir.name, base)
def tearDown(self):
self.test_dir.cleanup()
def test_keygen(self):
# Try generating a RSA key with non-supported size
with self.assertRaises(RSAUsageError):
RSA.generate(key_size=1024)
for key_size in RSA_KEY_SIZES:
name1 = self.tname("keygen.pem")
k = RSA.generate(key_size=key_size)
k.export_private(name1, b'secret')
# Try loading the key without a password.
self.assertIsNone(load(name1))
k2 = load(name1, b'secret')
pubname = self.tname('keygen-pub.pem')
k2.export_public(pubname)
pk2 = load(pubname)
# We should be able to export the public key from the loaded
# public key, but not the private key.
pk2.export_public(self.tname('keygen-pub2.pem'))
self.assertRaises(RSAUsageError, pk2.export_private,
self.tname('keygen-priv2.pem'))
def test_emit(self):
"""Basic sanity check on the code emitters."""
for key_size in RSA_KEY_SIZES:
k = RSA.generate(key_size=key_size)
ccode = io.StringIO()
k.emit_c_public(ccode)
self.assertIn("rsa_pub_key", ccode.getvalue())
self.assertIn("rsa_pub_key_len", ccode.getvalue())
rustcode = io.StringIO()
k.emit_rust_public(rustcode)
self.assertIn("RSA_PUB_KEY", rustcode.getvalue())
def test_emit_pub(self):
"""Basic sanity check on the code emitters, from public key."""
pubname = self.tname("public.pem")
for key_size in RSA_KEY_SIZES:
k = RSA.generate(key_size=key_size)
k.export_public(pubname)
k2 = load(pubname)
ccode = io.StringIO()
k2.emit_c_public(ccode)
self.assertIn("rsa_pub_key", ccode.getvalue())
self.assertIn("rsa_pub_key_len", ccode.getvalue())
rustcode = io.StringIO()
k2.emit_rust_public(rustcode)
self.assertIn("RSA_PUB_KEY", rustcode.getvalue())
def test_sig(self):
for key_size in RSA_KEY_SIZES:
k = RSA.generate(key_size=key_size)
buf = b'This is the message'
sig = k.sign(buf)
# The code doesn't have any verification, so verify this
# manually.
k.key.public_key().verify(
signature=sig,
data=buf,
padding=PSS(mgf=MGF1(SHA256()), salt_length=32),
algorithm=SHA256())
# Modify the message to make sure the signature fails.
self.assertRaises(InvalidSignature,
k.key.public_key().verify,
signature=sig,
data=b'This is thE message',
padding=PSS(mgf=MGF1(SHA256()), salt_length=32),
algorithm=SHA256())
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| 7,534,515,029,772,833,000 | 33.409091 | 78 | 0.599295 | false |
freelancer/freelancer-sdk-python
|
examples/create_employer_review.py
|
1
|
1087
|
from freelancersdk.session import Session
from freelancersdk.resources.projects.projects import post_review
from freelancersdk.resources.projects.helpers import \
create_review_employer_object
from freelancersdk.resources.projects.exceptions import \
ReviewNotPostedException
import os
def sample_post_review():
url = os.environ.get('FLN_URL')
oauth_token = os.environ.get('FLN_OAUTH_TOKEN')
session = Session(oauth_token=oauth_token, url=url)
review = create_review_employer_object(
project_id=201,
employer_id=101,
freelancer_id=102,
clarity_spec=5,
communication=5,
payment_prom=5,
professionalism=5,
work_for_again=5,
comment='Thanks for all the fish'
)
try:
r = post_review(session, review)
except ReviewNotPostedException as e:
print('Error message: {}'.format(e.message))
print('Server response: {}'.format(e.error_code))
return None
else:
return r
r = sample_post_review()
if r:
print('Review created: {}'.format(r))
|
lgpl-3.0
| -388,278,904,186,106,100 | 26.871795 | 65 | 0.664213 | false |
MCopperhead/city-building
|
object_layer.py
|
1
|
2012
|
import cocos as c
from cell import Cell
from shared_data import MAP_SIZE
from objects import TestCube, Wall
class DynamicBatch(c.batch.BatchNode):
"""
Batch modification, that allows to change Z level of child object dynamically.
"""
def change_z(self, child, z):
child.set_batch(None)
child.set_batch(self.batch, self.groups, z)
self.children = [(z_, c_) for (z_, c_) in self.children if c_ != child]
elem = z, child
lo = 0
hi = len(self.children)
a = self.children
while lo < hi:
mid = (lo+hi)//2
if z < a[mid][0]: hi = mid
else: lo = mid+1
self.children.insert(lo, elem)
class ObjectLayer(c.layer.ScrollableLayer):
def __init__(self):
super(ObjectLayer, self).__init__()
# self.batch = c.batch.BatchNode()
self.batch = DynamicBatch()
self.add(self.batch)
self.buildings = set()
def add_object(self, cell, object_class, building=False):
if cell.passable and cell.type != Cell.ROAD:
z = 2*MAP_SIZE - cell.i - cell.j
if building:
obj = object_class(position=cell.position, z=z)
self.buildings.add(obj)
obj.cell = cell
else:
obj = object_class(position=cell.position)
self.batch.add(obj, z=z)
cell.child = obj
cell.passable = False
return obj
return None
def add_wall(self, cell, index):
if cell.passable and cell.type != Cell.ROAD:
z = 2*MAP_SIZE - cell.i - cell.j
wall = Wall(index, position=cell.position)
self.batch.add(wall, z=z)
if index < 6:
cell.passable = False
def summon_creature(self, path):
creature = TestCube(position=self.parent.pillar_cell.position)
z = 2*MAP_SIZE - path[0].i - path[0].j
self.batch.add(creature, z=z)
creature.move(path)
|
gpl-2.0
| -6,374,275,823,120,481,000 | 30.936508 | 82 | 0.553678 | false |
GoogleCloudPlatform/sap-deployment-automation
|
third_party/github.com/ansible/awx/awx/main/tests/unit/commands/test_replay_job_events.py
|
1
|
2696
|
# Copyright (c) 2017 Ansible by Red Hat
# All Rights Reserved
# Python
import pytest
from unittest import mock
from datetime import timedelta
# Django
from django.utils import timezone
# AWX
from awx.main.models import (
Job,
JobEvent,
)
from awx.main.management.commands.replay_job_events import (
ReplayJobEvents,
)
class TestReplayJobEvents():
@pytest.fixture
def epoch(self):
return timezone.now()
@pytest.fixture
def job_events(self, epoch):
return [
JobEvent(created=epoch),
JobEvent(created=epoch + timedelta(seconds=10)),
JobEvent(created=epoch + timedelta(seconds=20)),
JobEvent(created=epoch + timedelta(seconds=30)),
JobEvent(created=epoch + timedelta(seconds=31)),
JobEvent(created=epoch + timedelta(seconds=31, milliseconds=1)),
JobEvent(created=epoch + timedelta(seconds=31, microseconds=1, milliseconds=1)),
]
@pytest.fixture
def mock_serializer_fn(self):
class MockSerializer():
data = dict()
def fn(job_event):
serialized = MockSerializer()
serialized.data['group_name'] = 'foobar'
return serialized
return fn
@pytest.fixture
def replayer(self, mocker, job_events, mock_serializer_fn):
r = ReplayJobEvents()
r.get_serializer = lambda self: mock_serializer_fn
r.get_job = mocker.MagicMock(return_value=Job(id=3))
r.sleep = mocker.MagicMock()
r.get_job_events = lambda self: (job_events, len(job_events))
r.replay_offset = lambda *args, **kwarg: 0
r.emit_job_status = lambda job, status: True
return r
@mock.patch('awx.main.management.commands.replay_job_events.emit_event_detail', lambda *a, **kw: None)
def test_sleep(self, mocker, replayer):
replayer.run(3, 1)
assert replayer.sleep.call_count == 6
replayer.sleep.assert_has_calls([
mock.call(10.0),
mock.call(10.0),
mock.call(10.0),
mock.call(1.0),
mock.call(0.001),
mock.call(0.000001),
])
@mock.patch('awx.main.management.commands.replay_job_events.emit_event_detail', lambda *a, **kw: None)
def test_speed(self, mocker, replayer):
replayer.run(3, 2)
assert replayer.sleep.call_count == 6
replayer.sleep.assert_has_calls([
mock.call(5.0),
mock.call(5.0),
mock.call(5.0),
mock.call(0.5),
mock.call(0.0005),
mock.call(0.0000005),
])
# TODO: Test replay_offset()
# TODO: Test stat generation
|
apache-2.0
| -2,448,998,290,709,884,000 | 28.304348 | 106 | 0.599777 | false |
codeinvain/object_detection
|
app/tracks.py
|
1
|
1501
|
import logging
import os
import json
import sys
from object_detector import ObjectDetector
from jsonmerge import merge
import atexit
config = {}
logger = None
def bootstrap(options):
if not os.path.exists('log'):
os.makedirs('log')
if not os.path.exists('tmp'):
os.makedirs('tmp')
create_pid()
initConfig(options)
init_logger()
logger.info('Starting app v{0} on pid: {1}'.format(config['version'], os.getpid()))
logger.debug('With config:')
logger.debug(config)
atexit.register(delete_pid)
def create_pid():
f = open('tmp/tracks.pid', 'w')
f.write("{0}".format(os.getpid()))
f.close()
def delete_pid():
os.remove('tmp/tracks.pid')
def initConfig(options):
with open('config/application.json') as json_data_file:
cfg = json.load(json_data_file)
global config
config = merge(merge(cfg['default'],cfg[env()]),options)
def env():
return os.getenv('PY_ENV','development')
def init_logger():
logging.basicConfig(filename='log/{env}.log'.format(env=env()),level=logging.DEBUG,format='%(asctime)s [%(levelname)s] %(message)s')
logging.captureWarnings(True)
root = logging.getLogger()
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
global logger
logger = logging
def startApp():
o = ObjectDetector()
|
mit
| -9,208,650,910,631,746,000 | 21.742424 | 136 | 0.652232 | false |
satanas/libturpial
|
libturpial/api/models/column.py
|
1
|
1346
|
# -*- coding: utf-8 -*-
class Column(object):
"""
This model represents a column that holds
:class:`libturpial.api.models.status.Status` objects. You need to specify
to what *account_id* are they associated, as well as the column *slug*.
Available column slugs are available in
:class:`libturpial.common.ColumnType`.
:ivar id_: Column id (for example: "johndoe-twitter-timeline")
:ivar slug: Column slug
:ivar account_id: id of account associated to the column
:ivar size: max number of statuses that this column can hold
:ivar singular_unit: unit used to identify one status (e.g: 'tweet')
:ivar plural_unit: unit used to identify more than one status
(e.g: 'tweets')
"""
def __init__(self, account_id, slug, singular_unit='tweet',
plural_unit='tweets'):
self.size = 0
self.id_ = "%s-%s" % (account_id, slug) # username-protocol-column
self.slug = slug
self.account_id = account_id
self.updating = False
self.singular_unit = singular_unit
self.plural_unit = plural_unit
def __repr__(self):
return "libturpial.api.models.Column %s" % (self.id_)
def __str__(self):
return '%s: %s' % (self.account_id, self.slug)
def __unicode__(self):
return u'%s' % self.__str__()
|
gpl-3.0
| -3,300,106,422,524,235,300 | 35.378378 | 77 | 0.618128 | false |
crate/crate-python
|
src/crate/client/sqlalchemy/tests/dict_test.py
|
1
|
16645
|
# -*- coding: utf-8; -*-
#
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
from __future__ import absolute_import
from unittest import TestCase
from unittest.mock import patch, MagicMock
import sqlalchemy as sa
from sqlalchemy.sql import select
from sqlalchemy.orm import Session
from sqlalchemy.ext.declarative import declarative_base
from crate.client.sqlalchemy.types import Craty, ObjectArray
from crate.client.cursor import Cursor
fake_cursor = MagicMock(name='fake_cursor')
FakeCursor = MagicMock(name='FakeCursor', spec=Cursor)
FakeCursor.return_value = fake_cursor
class SqlAlchemyDictTypeTest(TestCase):
def setUp(self):
self.engine = sa.create_engine('crate://')
metadata = sa.MetaData()
self.mytable = sa.Table('mytable', metadata,
sa.Column('name', sa.String),
sa.Column('data', Craty))
def assertSQL(self, expected_str, actual_expr):
self.assertEqual(expected_str, str(actual_expr).replace('\n', ''))
def test_select_with_dict_column(self):
mytable = self.mytable
self.assertSQL(
"SELECT mytable.data['x'] AS anon_1 FROM mytable",
select([mytable.c.data['x']], bind=self.engine)
)
def test_select_with_dict_column_where_clause(self):
mytable = self.mytable
s = select([mytable.c.data], bind=self.engine).\
where(mytable.c.data['x'] == 1)
self.assertSQL(
"SELECT mytable.data FROM mytable WHERE mytable.data['x'] = ?",
s
)
def test_select_with_dict_column_nested_where(self):
mytable = self.mytable
s = select([mytable.c.name], bind=self.engine)
s = s.where(mytable.c.data['x']['y'] == 1)
self.assertSQL(
"SELECT mytable.name FROM mytable " +
"WHERE mytable.data['x']['y'] = ?",
s
)
def test_select_with_dict_column_where_clause_gt(self):
mytable = self.mytable
s = select([mytable.c.data], bind=self.engine).\
where(mytable.c.data['x'] > 1)
self.assertSQL(
"SELECT mytable.data FROM mytable WHERE mytable.data['x'] > ?",
s
)
def test_select_with_dict_column_where_clause_other_col(self):
mytable = self.mytable
s = select([mytable.c.name], bind=self.engine)
s = s.where(mytable.c.data['x'] == mytable.c.name)
self.assertSQL(
"SELECT mytable.name FROM mytable " +
"WHERE mytable.data['x'] = mytable.name",
s
)
def test_update_with_dict_column(self):
mytable = self.mytable
stmt = mytable.update(bind=self.engine).\
where(mytable.c.name == 'Arthur Dent').\
values({
"data['x']": "Trillian"
})
self.assertSQL(
"UPDATE mytable SET data['x'] = ? WHERE mytable.name = ?",
stmt
)
def set_up_character_and_cursor(self, return_value=None):
return_value = return_value or [('Trillian', {})]
fake_cursor.fetchall.return_value = return_value
fake_cursor.description = (
('characters_name', None, None, None, None, None, None),
('characters_data', None, None, None, None, None, None)
)
fake_cursor.rowcount = 1
Base = declarative_base(bind=self.engine)
class Character(Base):
__tablename__ = 'characters'
name = sa.Column(sa.String, primary_key=True)
age = sa.Column(sa.Integer)
data = sa.Column(Craty)
data_list = sa.Column(ObjectArray)
session = Session()
return session, Character
def test_assign_null_to_object_array(self):
session, Character = self.set_up_character_and_cursor()
char_1 = Character(name='Trillian', data_list=None)
self.assertTrue(char_1.data_list is None)
char_2 = Character(name='Trillian', data_list=1)
self.assertTrue(char_2.data_list == [1])
char_3 = Character(name='Trillian', data_list=[None])
self.assertTrue(char_3.data_list == [None])
@patch('crate.client.connection.Cursor', FakeCursor)
def test_assign_to_craty_type_after_commit(self):
session, Character = self.set_up_character_and_cursor(
return_value=[('Trillian', None, None)]
)
char = Character(name='Trillian')
session.add(char)
session.commit()
char.data = {'x': 1}
self.assertTrue(char in session.dirty)
session.commit()
fake_cursor.execute.assert_called_with(
"UPDATE characters SET data = ? WHERE characters.name = ?",
({'x': 1}, 'Trillian',)
)
@patch('crate.client.connection.Cursor', FakeCursor)
def test_change_tracking(self):
session, Character = self.set_up_character_and_cursor()
char = Character(name='Trillian')
session.add(char)
session.commit()
try:
char.data['x'] = 1
except Exception:
print(fake_cursor.fetchall.called)
print(fake_cursor.mock_calls)
raise
self.assertTrue(char in session.dirty)
try:
session.commit()
except Exception:
print(fake_cursor.mock_calls)
raise
self.assertFalse(char in session.dirty)
@patch('crate.client.connection.Cursor', FakeCursor)
def test_partial_dict_update(self):
session, Character = self.set_up_character_and_cursor()
char = Character(name='Trillian')
session.add(char)
session.commit()
char.data['x'] = 1
char.data['y'] = 2
session.commit()
# on python 3 dicts aren't sorted so the order if x or y is updated
# first isn't deterministic
try:
fake_cursor.execute.assert_called_with(
("UPDATE characters SET data['y'] = ?, data['x'] = ? "
"WHERE characters.name = ?"),
(2, 1, 'Trillian')
)
except AssertionError:
fake_cursor.execute.assert_called_with(
("UPDATE characters SET data['x'] = ?, data['y'] = ? "
"WHERE characters.name = ?"),
(1, 2, 'Trillian')
)
@patch('crate.client.connection.Cursor', FakeCursor)
def test_partial_dict_update_only_one_key_changed(self):
"""
If only one attribute of Crate is changed
the update should only update that attribute
not all attributes of Crate.
"""
session, Character = self.set_up_character_and_cursor(
return_value=[('Trillian', dict(x=1, y=2))]
)
char = Character(name='Trillian')
char.data = dict(x=1, y=2)
session.add(char)
session.commit()
char.data['y'] = 3
session.commit()
fake_cursor.execute.assert_called_with(
("UPDATE characters SET data['y'] = ? "
"WHERE characters.name = ?"),
(3, 'Trillian')
)
@patch('crate.client.connection.Cursor', FakeCursor)
def test_partial_dict_update_with_regular_column(self):
session, Character = self.set_up_character_and_cursor()
char = Character(name='Trillian')
session.add(char)
session.commit()
char.data['x'] = 1
char.age = 20
session.commit()
fake_cursor.execute.assert_called_with(
("UPDATE characters SET age = ?, data['x'] = ? "
"WHERE characters.name = ?"),
(20, 1, 'Trillian')
)
@patch('crate.client.connection.Cursor', FakeCursor)
def test_partial_dict_update_with_delitem(self):
session, Character = self.set_up_character_and_cursor(
return_value=[('Trillian', {'x': 1})]
)
char = Character(name='Trillian')
char.data = {'x': 1}
session.add(char)
session.commit()
del char.data['x']
self.assertTrue(char in session.dirty)
session.commit()
fake_cursor.execute.assert_called_with(
("UPDATE characters SET data['x'] = ? "
"WHERE characters.name = ?"),
(None, 'Trillian')
)
@patch('crate.client.connection.Cursor', FakeCursor)
def test_partial_dict_update_with_delitem_setitem(self):
""" test that the change tracking doesn't get messed up
delitem -> setitem
"""
session, Character = self.set_up_character_and_cursor(
return_value=[('Trillian', {'x': 1})]
)
session = Session()
char = Character(name='Trillian')
char.data = {'x': 1}
session.add(char)
session.commit()
del char.data['x']
char.data['x'] = 4
self.assertTrue(char in session.dirty)
session.commit()
fake_cursor.execute.assert_called_with(
("UPDATE characters SET data['x'] = ? "
"WHERE characters.name = ?"),
(4, 'Trillian')
)
@patch('crate.client.connection.Cursor', FakeCursor)
def test_partial_dict_update_with_setitem_delitem(self):
""" test that the change tracking doesn't get messed up
setitem -> delitem
"""
session, Character = self.set_up_character_and_cursor(
return_value=[('Trillian', {'x': 1})]
)
char = Character(name='Trillian')
char.data = {'x': 1}
session.add(char)
session.commit()
char.data['x'] = 4
del char.data['x']
self.assertTrue(char in session.dirty)
session.commit()
fake_cursor.execute.assert_called_with(
("UPDATE characters SET data['x'] = ? "
"WHERE characters.name = ?"),
(None, 'Trillian')
)
@patch('crate.client.connection.Cursor', FakeCursor)
def test_partial_dict_update_with_setitem_delitem_setitem(self):
""" test that the change tracking doesn't get messed up
setitem -> delitem -> setitem
"""
session, Character = self.set_up_character_and_cursor(
return_value=[('Trillian', {'x': 1})]
)
char = Character(name='Trillian')
char.data = {'x': 1}
session.add(char)
session.commit()
char.data['x'] = 4
del char.data['x']
char.data['x'] = 3
self.assertTrue(char in session.dirty)
session.commit()
fake_cursor.execute.assert_called_with(
("UPDATE characters SET data['x'] = ? "
"WHERE characters.name = ?"),
(3, 'Trillian')
)
def set_up_character_and_cursor_data_list(self, return_value=None):
return_value = return_value or [('Trillian', {})]
fake_cursor.fetchall.return_value = return_value
fake_cursor.description = (
('characters_name', None, None, None, None, None, None),
('characters_data_list', None, None, None, None, None, None)
)
fake_cursor.rowcount = 1
Base = declarative_base(bind=self.engine)
class Character(Base):
__tablename__ = 'characters'
name = sa.Column(sa.String, primary_key=True)
data_list = sa.Column(ObjectArray)
session = Session()
return session, Character
def _setup_object_array_char(self):
session, Character = self.set_up_character_and_cursor_data_list(
return_value=[('Trillian', [{'1': 1}, {'2': 2}])]
)
char = Character(name='Trillian', data_list=[{'1': 1}, {'2': 2}])
session.add(char)
session.commit()
return session, char
@patch('crate.client.connection.Cursor', FakeCursor)
def test_object_array_setitem_change_tracking(self):
session, char = self._setup_object_array_char()
char.data_list[1] = {'3': 3}
self.assertTrue(char in session.dirty)
session.commit()
fake_cursor.execute.assert_called_with(
("UPDATE characters SET data_list = ? "
"WHERE characters.name = ?"),
([{'1': 1}, {'3': 3}], 'Trillian')
)
def _setup_nested_object_char(self):
session, Character = self.set_up_character_and_cursor(
return_value=[('Trillian', {'nested': {'x': 1, 'y': {'z': 2}}})]
)
char = Character(name='Trillian')
char.data = {'nested': {'x': 1, 'y': {'z': 2}}}
session.add(char)
session.commit()
return session, char
@patch('crate.client.connection.Cursor', FakeCursor)
def test_nested_object_change_tracking(self):
session, char = self._setup_nested_object_char()
char.data["nested"]["x"] = 3
self.assertTrue(char in session.dirty)
session.commit()
fake_cursor.execute.assert_called_with(
("UPDATE characters SET data['nested'] = ? "
"WHERE characters.name = ?"),
({'y': {'z': 2}, 'x': 3}, 'Trillian')
)
@patch('crate.client.connection.Cursor', FakeCursor)
def test_deep_nested_object_change_tracking(self):
session, char = self._setup_nested_object_char()
# change deep nested object
char.data["nested"]["y"]["z"] = 5
self.assertTrue(char in session.dirty)
session.commit()
fake_cursor.execute.assert_called_with(
("UPDATE characters SET data['nested'] = ? "
"WHERE characters.name = ?"),
({'y': {'z': 5}, 'x': 1}, 'Trillian')
)
@patch('crate.client.connection.Cursor', FakeCursor)
def test_delete_nested_object_tracking(self):
session, char = self._setup_nested_object_char()
# delete nested object
del char.data["nested"]["y"]["z"]
self.assertTrue(char in session.dirty)
session.commit()
fake_cursor.execute.assert_called_with(
("UPDATE characters SET data['nested'] = ? "
"WHERE characters.name = ?"),
({'y': {}, 'x': 1}, 'Trillian')
)
@patch('crate.client.connection.Cursor', FakeCursor)
def test_object_array_append_change_tracking(self):
session, char = self._setup_object_array_char()
char.data_list.append({'3': 3})
self.assertTrue(char in session.dirty)
@patch('crate.client.connection.Cursor', FakeCursor)
def test_object_array_insert_change_tracking(self):
session, char = self._setup_object_array_char()
char.data_list.insert(0, {'3': 3})
self.assertTrue(char in session.dirty)
@patch('crate.client.connection.Cursor', FakeCursor)
def test_object_array_slice_change_tracking(self):
session, char = self._setup_object_array_char()
char.data_list[:] = [{'3': 3}]
self.assertTrue(char in session.dirty)
@patch('crate.client.connection.Cursor', FakeCursor)
def test_object_array_extend_change_tracking(self):
session, char = self._setup_object_array_char()
char.data_list.extend([{'3': 3}])
self.assertTrue(char in session.dirty)
@patch('crate.client.connection.Cursor', FakeCursor)
def test_object_array_pop_change_tracking(self):
session, char = self._setup_object_array_char()
char.data_list.pop()
self.assertTrue(char in session.dirty)
@patch('crate.client.connection.Cursor', FakeCursor)
def test_object_array_remove_change_tracking(self):
session, char = self._setup_object_array_char()
item = char.data_list[0]
char.data_list.remove(item)
self.assertTrue(char in session.dirty)
|
apache-2.0
| 1,422,843,196,939,603,700 | 35.502193 | 77 | 0.584019 | false |
mmcauliffe/linguistic-helper-functions
|
linghelper/representations/classes.py
|
1
|
4708
|
import re
import copy
from linghelper.representations.constants import ENGLISH_ONSETS as ONSETS,ENGLISH_VOWEL_PATTERN as VOWEL_PATTERN
class Word(object):
def __init__(self,orthography,transcription,frequency=0):
self.orthography = orthography
self.syllables = syllabify(transcription)
self.frequency = frequency
def istitle(self):
if self.orthography.istitle():
return True
return False
def get_all_segments(self):
segs = []
for s in self.syllables:
segs += s.onset + [s.nucleus] + s.coda
return segs
def segment_count(self,sound):
return sum(map(lambda x: x == sound,[x for x in self.get_all_segments()]))
def neighbour_transcription(self,sound, syllable_position, word_position):
syllables = copy.deepcopy(self.syllables)
if word_position == 'initial':
index = 0
elif word_position == 'final':
index = -1
elif word_position == 'penultimate':
index = -2
if syllable_position == 'onset':
syllables[index].onset = [sound]
elif syllable_position == 'coda':
syllables[index].coda = [sound]
else:
syllables[index].nucleus = sound
sep = syllables[0].sep
return sep.join(map(str,syllables))
def contains(self,sound):
if self.segment_count(sound) > 0:
return True
return False
def in_position(self,sound,syllable_position,word_position, complex=False):
if word_position == 'initial':
s = self.syllables[0]
elif word_position == 'final':
s = self.syllables[-1]
elif word_position == 'penultimate':
if len(self.syllables) < 3:
return False
s = self.syllables[-2]
if syllable_position == 'onset':
if complex:
return s.in_onset(sound)
else:
return s.onset_is(sound)
elif syllable_position == 'coda':
if complex:
return s.in_coda(sound)
else:
return s.coda_is(sound)
else:
return s.nucleus_is(sound)
return False
def render_transcription(self,flat=True):
if flat:
sep = self.syllables[0].sep
return sep.join(map(str,self.syllables))
def __str__(self):
return '%s, %s' % (self.orthography,self.render_transcription())
class Syllable(object):
def __init__(self,nucleus, onset=[],coda=[],sep='.'):
self.nucleus = nucleus
self.onset = onset
self.coda = coda
self.sep = sep
def num_segments(self):
return len(self.onset+[self.nucleus]+self.coda)
def in_onset(self,sound):
if sound in self.onset:
return True
return False
def onset_is(self,sound):
if self.onset_string() == sound:
return True
return False
def in_coda(self,sound):
if sound in self.coda:
return True
return False
def coda_is(self,sound):
if self.coda_string() == sound:
return True
return False
def nucleus_is(self,sound):
if sound == self.nucleus:
return True
return False
def contains(self,sound):
if re.match(VOWEL_PATTERN,sound):
return self.nucleus_is(sound)
else:
if self.in_onset(sound):
return True
if self.in_coda(sound):
return True
return False
def onset_string(self):
return self.sep.join(self.onset)
def coda_string(self):
return self.sep.join(self.coda)
def __str__(self):
return self.sep.join([x for x in [self.onset_string(),self.nucleus,self.coda_string()] if x])
def syllabify(inputword):
#verify if list
sep = '.'
if isinstance(inputword,str):
if ' ' in inputword:
sep = ' '
inputword = inputword.split(sep)
cons = []
syllables = []
while inputword:
cur = inputword.pop(0)
if re.match(VOWEL_PATTERN,cur):
s = Syllable(nucleus = cur,sep=sep)
if syllables:
for i in range(len(cons)):
if ' '.join(cons[i:]) in ONSETS:
s.onset = cons[i:]
syllables[-1].coda = cons[:i]
break
else:
s.onset = cons
cons = []
syllables.append(s)
else:
cons.append(cur)
if cons:
syllables[-1].coda = cons
return syllables
|
gpl-3.0
| -2,131,419,134,616,865,000 | 27.533333 | 113 | 0.540782 | false |
jknebel/STEN
|
PostStat.py
|
1
|
23533
|
import numpy as np
import tables
import wx
import os
import itertools
# ClassMultiple testing correction using mathematical morphology
##Tables definition
##
##tables:
##/Shape # Keep the shape of the Data, to reshape after calculation # simple array
##/Data/All #using createEArray('/','AllData',tables.Float64Atom(),(TF,Electrodes,0))
##/Data/GFP #using createEArray('/','AllData',tables.Float64Atom(),(TF,1,0))
##/Model #using a tables with col= {Name of the factor, Value of factor (Vector), type of Factor (Within,between, covariate, subject)
##/Info # using a tables that contain all the information in the "ExcelSheet"
##/Result/All/Anova # Tables with col ={Name of the effect (i.e main effect, interaction, ..),P Data(Without any threshold (alpha, consecpoits, ...),F Data}
##/Result/All/IntermediateResult # Tabes with Col = {Condition name, Type (Mean,pearson correaltion,Sandard error,...),Data Corresponding in Type}
##/Result/All/PostHoc # Tabes with Col = {Name,P,T}
##/Result/GFP/Anova # Tables with col ={Name of the effect (i.e main effect, interaction, ..),P Data(Without any threshold (alpha, consecpoits, ...),F Data}
##/Result/GFP/IntermediateResult # Tabes with Col = {Condition name, Type (Mean,pearson correaltion,Sandard error,...)}
##/Result/GFP/PostHoc # Tabes with Col = {Name,P,T}
class MultipleTestingCorrection:
def __init__(self, H5, parent,TF=1,Alpha=0.05,SpaceCont=1,SpaceFile=None):
""" Initialisation for multiple testing correction analysis
H5 = H5 File
Parent = Parent windows
"""
self.parent = parent
self.file = tables.openFile(H5,'r')
self.TFDict=TF
self.SpaceContDict=SpaceCont
if SpaceFile is not None:
self.__MatrixDist__(SpaceFile)
else:
self.Distance=np.array([[-1]])
self.AlphaDict=Alpha
def __MatrixDist__(self,SpaceFile):
if SpaceFile.split('.')[-1]=='xyz':
Data=np.loadtxt(SpaceFile,dtype='string',skiprows=1)
else:
Data=np.loadtxt(SpaceFile,dtype='string')
if Data.shape[1]>3:
Label=Data[:,3]
else:
Label=['']
Coordonee=np.float64(Data[:,0:3])
NbPoint=Coordonee.shape[0]
MatrixDist=np.zeros((NbPoint,NbPoint))
for v in range(NbPoint):
dist=Coordonee-Coordonee[v,:]
dist=dist*dist
dist=dist.sum(1)
dist=np.sqrt(dist)
MatrixDist[v,:]=dist
self.Distance=MatrixDist
def __MathematicalMorphology__(self,Dilate=True):
"""Calulation of the Mathematical Morphology (Erosion and Dilatation)
The infromation like BinaryData, Number on Conseq TF , Number of Contigouis points, and the Amtrix Distance are in self
"""
if self.TF ==1 and self.SpaceCont==1:
print('1')
Mask=self.BinaryData
print(self.BinaryData.sum())
else:
# Definition of problematic time point that correspond to time border
if Dilate:
print('Dilate')
print(self.BinaryData.sum())
else:
print('Erode')
print(self.BinaryData.sum())
TimeStart = (self.TF - 1) / 2
TimeEnd = self.TF - TimeStart
# creation of a Mask of 0 and size of the Bianary Data
Mask = np.zeros(self.BinaryData.shape)
# Mathematical morphlogy calculation
if self.BinaryData.sum() != 0:# if their is no significant point we don't need mathematical morphology
#loop over all time point
for time in range(self.BinaryData.shape[0]):
# checking if the times of interest (composed of the time value +/- TF) exist Border problem
BeginTime = time - TimeStart
EndTime = time + TimeEnd
if BeginTime < 0:
BeginTime = 0
EndTime = self.TF
elif EndTime > self.BinaryData.shape[0]:
BeginTime = self.BinaryData.shape[0] - self.TF
EndTime = self.BinaryData.shape[0]
# Loop over all space points
for dim in range(self.BinaryData.shape[1]):
# Extract the Distance of the point in space of interest in the matrix of all Distance
# Space is a mvector containting all the distance between the point dim to the other point
if self.Distance[0,0]==-1: # no correction
element = self.BinaryData[BeginTime:EndTime,dim]==1
else:
space = self.Distance[dim, :]
# sort these distance and extract the index from the colser (itself) to the farer
space = space.argsort()
# keep only the pn poitns correcponding to the criteria choosen by the user
space = space[0:self.SpaceCont]
# element is a subset of interest containting the time and the sapce of interest
# element is a boolean subset where true means significatif points and 0 means no significative points
element = self.BinaryData[BeginTime:EndTime, space] ==1
if Dilate:# dilatatioon
if element.any():
# if at least one point in element is significant mask at time =time and space =dim => 1 else leave 0
Mask[time,dim]=1
else: #Erosion
if element.all():
# if all poitns of element = 1 mask at time =time and space =dim => 1 else leave a 0
Mask[time, dim]=1
pourcent = str(100.0 * (self.n) / (self.NbCalcul))
pourcent = pourcent[0:pourcent.find('.') + 3]
if float(pourcent)>100:
pourcent='100'
self.n=self.NbCalcul
self.dlg.Update(self.n, " ".join(['Progression :',
pourcent, ' %']))
self.n += 1
self.Mask=Mask
def Calculation(self):
"""
Calucualtion of mathematical morpholgy on all results anova and PostHoc
TF = Number of consecutive Time frame entered by usr
Alpha = Statistical Thesjold Difine by usr
SpaceCont = Contigous space point define by usr
SpaceFiel File with 3d coodonate to determine distance
"""
ResultType={'Anova.GFP':'/Result/GFP/Anova','Anova.Electrodes':'/Result/All/Anova',
'PostHoc.GFP':'/Result/GFP/PostHoc','PostHoc.Electrodes':'/Result/All/PostHoc'}
self.CorrectedMask={}
for r in ResultType:
res=self.file.getNode(ResultType[r])
if len(res)>0:
ShapeOriginalData=self.file.getNode('/Shape').read()
# extrating infromion usefull
# used for user interface feed back
# number of terms in Anova or in PostHoc mulipled by the number of TF* number of space point muliply * 2(Erosion/Dilatation)
self.NbCalcul=2*ShapeOriginalData.prod()
#Dictionary Correction Anova Contain the mask of All Anova Mask Keys = statistical Condition name
self.dlg = wx.ProgressDialog(
'Multiple Test Correction for '+r,
'Calculation in progress : 0 %',
self.NbCalcul, parent=self.parent,
style=wx.PD_AUTO_HIDE | wx.PD_REMAINING_TIME)
self.dlg.SetSize((200, 130))
tmp={}
print(res)
for v in res:
self.n=0
FolderName=r.split('.')[0]
try:
os.mkdir(self.resultFolder+'\\'+FolderName)
except:
pass
P=v['P']
Name=v['StatEffect']
CorrectedMask={}
# adapt the conseutive number of time frame and the contigous criteria to the length o Data
# in case of the user made a mistake.
# if their is only one TF we cannot have a TF value !=1
# same remark for the contigous criteria
if P.shape[0]==1:
self.TF=1
else:
self.TF=self.TFDict[FolderName]
if P.shape[1] == 1:
self.SpaceCont = 1
else:
self.SpaceCont=self.SpaceContDict[FolderName]
# we compute an openning more restrictive that means errosion folwed by a dilatation
# the BinaryData is all the pvalue lower than Alpha
self.BinaryData=np.zeros(P.shape)
print('init')
print(self.AlphaDict[FolderName])
self.BinaryData[P<self.AlphaDict[FolderName]]=1
print(self.BinaryData.sum())
# Dilatation
self.__MathematicalMorphology__(Dilate=False)
# the BinaryData is the Mask the come from the errosion
self.BinaryData=self.Mask
# Erosion
self.__MathematicalMorphology__(Dilate=True)
tmp[Name]=self.Mask
# Corrected Mask is a Dicionary tha containe all the binarymask
self.CorrectedMask[r]=tmp
self.dlg.Destroy()
self.file.close()
class WriteData:
def __init__(self,ResultFolder,H5,Param,DataGFP=False):
#Type = Param/Non Param
self.resultFolder=ResultFolder
self.file = tables.openFile(H5,'a')
self.DataGFP=DataGFP
self.Param=Param
def StatistcalData(self,CorrectedMask):
"""write Staristicla Data using
the multiple testing mask
and raw statistical information in h5"""
# Extract Results check
ResultType={'Anova.GFP':'/Result/GFP/Anova','Anova.Electrodes':'/Result/All/Anova',
'PostHoc.GFP':'/Result/GFP/PostHoc','PostHoc.Electrodes':'/Result/All/PostHoc'}
for r in ResultType:
res=self.file.getNode(ResultType[r])
for v in res:
FolderName=r.split('.')[0]
try:
os.mkdir(self.resultFolder+'\\'+FolderName)
except:
pass
Term=v['StatEffect']
P=1-v['P']
if r.split('.')[0]=='Anova':# partial eta square
stat=v['F']
ext='.F.eph'
else:# R squared
stat=v['T']
ext='.t.eph'
self.TF=P.shape[0]
self.Electrodes=P.shape[1]
self.Fs=self.file.getNode('/Data/Header')[2]
# name based on GFP or Parametric
Name=[r]
# name based on GFP or Parametric
Name.append(Term)
if self.Param[FolderName]:
Name.append('Param')
else:
Name.append('NonParam')
if self.DataGFP:
Name.append('GFP')
else:
if self.Electrodes<500:
Name.append('El')
else:
Name.append('IS')
Name.append('P.eph')
Name=".".join(Name)
self.Data=P*CorrectedMask[r][Term]
self.__WriteEph__(self.resultFolder+'\\'+FolderName+'\\'+Name)
self.Data=stat*CorrectedMask[r][Term]
self.__WriteEph__(self.resultFolder+'\\'+FolderName+'\\'+Name.replace('.P.eph',ext))
def IntermediateResult(self):
"""Write intermediate Result"""
try:
os.mkdir(self.resultFolder+'\IntermediateResults')
except:
pass
# Extract Effectsize
self.__EfectSize__('IntermediateResults')
if self.DataGFP:
Intermediate=self.file.getNode('/Result/GFP/IntermediateResult')
else:
Intermediate=self.file.getNode('/Result/All/IntermediateResult')
if len(Intermediate)>0:
for t in Intermediate:
tmp=t['Data']
self.TF=tmp.shape[0]
self.Electrodes=tmp.shape[1]
self.Data=tmp
self.__WriteEph__(self.resultFolder+'\IntermediateResults\\'+t['CondName']+'.'+t['Type']+'.eph')
else:
newRow = Intermediate.row
# use PostHoc Function to extract combination
self.__Combination__()
if self.DataGFP:
Data=self.file.getNode('/Data/GFP')
else:
Data=self.file.getNode('/Data/All')
# calculation mean and SE for non covariate Arrangegment
DataShape=self.file.getNode('/Shape')
self.Fs=self.file.getNode('/Data/Header')[2]
# idx to ovoid memory problem and acces only part of the datas
idx=np.arange(Data.shape[1])
for ar in self.Arrangement:
if ar!='Simple Regression':
Mean=Data[:,idx[self.Arrangement[ar]]].mean(axis=1)
Se=(Data[:,idx[self.Arrangement[ar]]].std(axis=1))/(np.sqrt(self.Arrangement[ar].sum()))
if self.DataGFP==False:
Mean=Mean.reshape(DataShape)
Se=Se.reshape(DataShape)
else:
Mean=Mean.reshape((Mean.shape[0],1))
Se=Se.reshape((Se.shape[0],1))
self.TF=Mean.shape[0]
self.Electrodes=Mean.shape[1]
self.Data=Mean
newRow['Data'] = Mean
newRow['Type'] = 'mean'
newRow['CondName']=ar
newRow.append()
self.__WriteEph__(self.resultFolder+'\IntermediateResults\\'+ar+'.mean.eph')
self.Data=Se
newRow['Data'] = Se
newRow['Type'] = 'Se'
newRow['CondName']=ar
newRow.append()
self.__WriteEph__(self.resultFolder+'\IntermediateResults\\'+ar+'.Se.eph')
# calculation R for covariate Arrangegment
for c in self.Covariate:
CovData= self.Covariate[c]
for ar in self.Arrangement:
covtmp=CovData[self.Arrangement[ar]]
datatmp=Data[:,idx[self.Arrangement[ar]]]
R=[]
for d in datatmp:
R.append(np.corrcoef(d,covtmp)[0,1])
R=np.array(R)
if self.DataGFP==False:
R=R.reshape(DataShape)
else:
R=R.reshape((R.shape[0],1))
self.TF=R.shape[0]
self.Electrodes=R.shape[1]
self.Data=R
newRow['Data'] = R
newRow['Type'] = 'R'
newRow['CondName']=ar+'_'+c
newRow.append()
self.__WriteEph__(self.resultFolder+'\IntermediateResults\\'+ar+'_'+c+'.R.eph')
def __Combination__(self):
"""
Reading use H5 File to extract all combination
for intermediate results inspired by Posthoc instat.py
"""
# Numerical Factor Information from the Datatable
tableFactor = self.file.getNode('/Model').read()
# Extraction of relevant factor information from tableFactor
between = {}
within = {}
betweenName = []
withinName = []
self.Covariate={}
CovariateName=[]
# Generating within and between dictionary with condition name
for t in tableFactor:
factorName = t[0]
factorType = t[1]
factorData = t[2]
if factorType == 'Between':
between[factorName] = factorData
betweenName.append(factorName)
elif factorType == 'Within':
within[factorName] = factorData
withinName.append(factorName)
elif factorType == 'Covariate':
self.Covariate[factorName]=factorData
CovariateName.append(factorName)
elif factorType == 'Subject':
subject = factorData
self.subject = subject
# Transform dict into matrix for easy use
within = np.array(within.values())
between = np.array(between.values())
# Extract different levels for each between subject factor
existingCombi = []
# Between subject factor Exist
if between !=[]:
levelsBetween = between.max(axis=1).astype('int')
# Cacluate all possible combinations using the max number of levels
allCombinationBetween = itertools.product(
range(1, levelsBetween.max() + 1), repeat=len(levelsBetween))
# Reduce combination to only existing ones
for c in allCombinationBetween:
combinations = np.array(c)
# existing combinations
if (levelsBetween - combinations < 0).sum() == 0:
existingCombi.append(combinations)
else:
existingCombi.append(between)
existingCombi = np.array(existingCombi)
# Create all possible combinations and extract the booleans
# corresponding to it.
allCombiBool = {}
condName = []
for e in existingCombi:
boolBetween = []
tmpNameBetween = []
for c, l in enumerate(e):
boolBetween.append(between[c, :] == l)
tmpNameBetween.append("-".join([betweenName[c],
str(int(l))]))
boolBetween = np.array(boolBetween)
if within!=[]:
withinCombi = within[:,subject == 1].T
else:
withinCombi=[within]
for w in withinCombi:
boolWithin = []
tmpNameWithin = []
for c, l in enumerate(w):
boolWithin.append(within[c, :] == l)
tmpNameWithin.append("-".join([withinName[c],
str(int(l))]))
boolWithin = np.array(boolWithin)
# we have betwenn and within subject Factor
if between!=[] and within !=[]:
print('1')
bools = np.append(boolBetween, boolWithin, axis=0)
# name of the arrangement
tmpName = ".".join([".".join(tmpNameBetween),
".".join(tmpNameWithin)])
# Only Between subject factor
elif between!=[]:
print('2')
bools = boolBetween
# name of the arrangement
tmpName = ".".join(tmpNameBetween)
# Only Within subject factor
elif within !=[]:
print('3')
bools = boolWithin
# name of the arrangement
tmpName = ".".join(tmpNameWithin)
else:
bools=subject>-1
bools=bools.reshape((1,len(bools)))
tmpName='Simple Regression'
condName.append(tmpName)
allCombiBool[tmpName] = bools.prod(axis=0) == 1
# Dictionary of boolean correcponding to all arangements
self.Arrangement = allCombiBool
def __WriteEph__(self,FileName):
""" write Eph File"""
File=open(FileName,"w")
# on prend le header
header=[str(self.Electrodes),'\t',str(self.TF),'\t',str(self.Fs),'\n']
#ecrtiture du header
File.write("".join(header))
# boucle sur les time chaque ligne est un temps
for time in self.Data:
#ecriture ligne par ligne
time.tofile(File,sep='\t',format="%s")
#saut de ligne
File.write('\n')
File.close()
def __EfectSize__(self,SubFolder):
""" Calulate partila eta square for anova
or Rsquare for T-test PostHoc"""
ResultType={'Anova.GFP':'/Result/GFP/Anova','Anova.Electrodes':'/Result/All/Anova',
'PostHoc.GFP':'/Result/GFP/PostHoc','PostHoc.Electrodes':'/Result/All/PostHoc'}
for r in ResultType:
res=self.file.getNode(ResultType[r])
for v in res:
try:
os.mkdir(self.resultFolder+'\\'+SubFolder+'\EffectSize')
except:
pass
Term=v['StatEffect']
Df=v['Df']
# name based on GFP or Parametric
# name based on GFP or Parametric
if r.split('.')[0]=='Anova':# partial eta square
Name=['Partial-Eta-Square']
F=v['F']
EffectSize=(F*Df[0])/((F*Df[0])+Df[1])
else:# R squared
Name=['R-Square']
T=v['T']
EffectSize=(T*T)/((T*T)+Df[0])
self.TF=EffectSize.shape[0]
self.Electrodes=EffectSize.shape[1]
self.Fs=self.file.getNode('/Data/Header')[2]
Name.append(Term)
if self.Param[r.split('.')[0]]:
Name.append('Param')
else:
Name.append('NonParam')
if self.DataGFP:
Name.append('GFP')
else:
if self.Electrodes<500:
Name.append('El')
else:
Name.append('IS')
Name.append('.eph')
Name=".".join(Name)
self.Data=EffectSize
self.__WriteEph__(self.resultFolder+'\\'+SubFolder+'\EffectSize\\'+Name)
|
bsd-3-clause
| -558,887,679,184,231,500 | 44.785288 | 156 | 0.490248 | false |
liamcurry/py3kwarn
|
py3kwarn2to3/fixes/fix_types.py
|
1
|
1707
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for removing uses of the types module.
These work for only the known names in the types module. The forms above
can include types. or not. ie, It is assumed the module is imported either as:
import types
from types import ... # either * or specific types
The import statements are not modified.
There should be another fixer that handles at least the following constants:
type([]) -> list
type(()) -> tuple
type('') -> str
"""
from .. import fixer_base
from ..fixer_util import Name
try:
unicode
except NameError:
unicode = str
_TYPE_MAPPING = {
'BooleanType' : 'bool',
'BufferType' : 'memoryview',
'ClassType' : 'type',
'ComplexType' : 'complex',
'DictType': 'dict',
'DictionaryType' : 'dict',
'EllipsisType' : 'type(Ellipsis)',
'FloatType': 'float',
'IntType': 'int',
'ListType': 'list',
'LongType': 'int',
'ObjectType' : 'object',
'NoneType': 'type(None)',
'NotImplementedType' : 'type(NotImplemented)',
'SliceType' : 'slice',
'TupleType': 'tuple',
'TypeType' : 'type',
'UnicodeType': 'str',
'XRangeType' : 'range',
}
_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING]
class FixTypes(fixer_base.BaseFix):
BM_compatible = True
PATTERN = '|'.join(_pats)
def transform(self, node, results):
new_value = unicode(_TYPE_MAPPING.get(results["name"].value))
if new_value:
return Name(new_value, prefix=node.prefix)
return None
|
mit
| -5,724,285,161,513,692,000 | 25.671875 | 80 | 0.598711 | false |
embray/PyFITS
|
lib/pyfits/scripts/fitscheck.py
|
1
|
6708
|
"""
``fitscheck`` is a command line script based on pyfits for verifying and
updating the CHECKSUM and DATASUM keywords of .fits files. ``fitscheck`` can
also detect and often fix other FITS standards violations. ``fitscheck``
facilitates re-writing the non-standard checksums originally generated by
pyfits with standard checksums which will interoperate with CFITSIO.
``fitscheck`` will refuse to write new checksums if the checksum keywords are
missing or their values are bad. Use ``--force`` to write new checksums
regardless of whether or not they currently exist or pass. Use
``--ignore-missing`` to tolerate missing checksum keywords without comment.
Example uses of fitscheck:
1. Verify and update checksums, tolerating non-standard checksums, updating to
standard checksum::
$ fitscheck --checksum either --write *.fits
2. Write new checksums, even if existing checksums are bad or missing::
$ fitscheck --write --force *.fits
3. Verify standard checksums and FITS compliance without changing the files::
$ fitscheck --compliance *.fits
4. Verify original nonstandard checksums only::
$ fitscheck --checksum nonstandard *.fits
5. Only check and fix compliance problems, ignoring checksums::
$ fitscheck --checksum none --compliance --write *.fits
6. Verify standard interoperable checksums::
$ fitscheck *.fits
7. Delete checksum keywords::
$ fitscheck --checksum none --write *.fits
"""
import logging
import optparse
import sys
import textwrap
import warnings
import pyfits
log = logging.getLogger('fitscheck')
warnings.filterwarnings('error', message='Checksum verification failed')
warnings.filterwarnings('error', message='Datasum verification failed')
warnings.filterwarnings('ignore', message='Overwriting existing file')
def handle_options(args):
if not len(args):
args = ['-h']
parser = optparse.OptionParser(usage=textwrap.dedent("""
fitscheck [options] <.fits files...>
.e.g. fitscheck example.fits
Verifies and optionally re-writes the CHECKSUM and DATASUM keywords
for a .fits file.
Optionally detects and fixes FITS standard compliance problems.
""".strip()))
parser.add_option(
'-k', '--checksum', dest='checksum_kind',
type='choice', choices=['standard', 'nonstandard', 'either', 'none'],
help='Choose FITS checksum mode or none. Defaults standard.',
default='standard', metavar='[standard | nonstandard | either | none]')
parser.add_option(
'-w', '--write', dest='write_file',
help='Write out file checksums and/or FITS compliance fixes.',
default=False, action='store_true')
parser.add_option(
'-f', '--force', dest='force',
help='Do file update even if original checksum was bad.',
default=False, action='store_true')
parser.add_option(
'-c', '--compliance', dest='compliance',
help='Do FITS compliance checking; fix if possible.',
default=False, action='store_true')
parser.add_option(
'-i', '--ignore-missing', dest='ignore_missing',
help='Ignore missing checksums.',
default=False, action='store_true')
parser.add_option(
'-v', '--verbose', dest='verbose', help='Generate extra output.',
default=False, action='store_true')
global OPTIONS
OPTIONS, fits_files = parser.parse_args(args)
if OPTIONS.checksum_kind == 'none':
OPTIONS.checksum_kind = False
return fits_files
def setup_logging():
if OPTIONS.verbose:
log.setLevel(logging.INFO)
else:
log.setLevel(logging.WARNING)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(message)s'))
log.addHandler(handler)
def verify_checksums(filename):
"""
Prints a message if any HDU in `filename` has a bad checksum or datasum.
"""
errors = 0
try:
hdulist = pyfits.open(filename, checksum=OPTIONS.checksum_kind)
except UserWarning, w:
remainder = '.. ' + ' '.join(str(w).split(' ')[1:]).strip()
# if "Checksum" in str(w) or "Datasum" in str(w):
log.warn('BAD %r %s' % (filename, remainder))
return 1
if not OPTIONS.ignore_missing:
for i, hdu in enumerate(hdulist):
if not hdu._checksum:
log.warn('MISSING %r .. Checksum not found in HDU #%d' %
(filename, i))
return 1
if not hdu._datasum:
log.warn('MISSING %r .. Datasum not found in HDU #%d' %
(filename, i))
return 1
if not errors:
log.info('OK %r' % filename)
return errors
def verify_compliance(filename):
"""Check for FITS standard compliance."""
hdulist = pyfits.open(filename)
try:
hdulist.verify('exception')
except pyfits.VerifyError:
exc = sys.exc_info()[1]
log.warn('NONCOMPLIANT %r .. %s' %
(filename), str(exc).replace('\n', ' '))
return 1
return 0
def update(filename):
"""
Sets the ``CHECKSUM`` and ``DATASUM`` keywords for each HDU of `filename`.
Also updates fixes standards violations if possible and requested.
"""
hdulist = pyfits.open(filename)
try:
output_verify = 'silentfix' if OPTIONS.compliance else 'ignore'
hdulist.writeto(filename, checksum=OPTIONS.checksum_kind, clobber=True,
output_verify=output_verify)
except pyfits.VerifyError:
pass # unfixable errors already noted during verification phase
finally:
hdulist.close()
def process_file(filename):
"""
Handle a single .fits file, returning the count of checksum and compliance
errors.
"""
try:
checksum_errors = verify_checksums(filename)
if OPTIONS.compliance:
compliance_errors = verify_compliance(filename)
else:
compliance_errors = 0
if OPTIONS.write_file and checksum_errors == 0 or OPTIONS.force:
update(filename)
return checksum_errors + compliance_errors
except Exception:
exc = sys.exc_info()[1]
log.error('EXCEPTION %r .. %s' % (filename, exc))
return 1
def main():
"""
Processes command line parameters into options and files, then checks
or update FITS DATASUM and CHECKSUM keywords for the specified files.
"""
errors = 0
fits_files = handle_options(sys.argv[1:])
setup_logging()
for filename in fits_files:
errors += process_file(filename)
if errors:
log.warn('%d errors' % errors)
return int(bool(errors))
|
bsd-3-clause
| -3,699,447,288,446,370,300 | 29.352941 | 79 | 0.642964 | false |
lavalamp-/ws-backend-community
|
wselasticsearch/query/dns/enumeration.py
|
1
|
1098
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .base import BaseDomainNameScanQuery
class SubdomainEnumerationQuery(BaseDomainNameScanQuery):
"""
This is an Elasticsearch query class for querying SubdomainEnumerationModel objects.
"""
@classmethod
def get_queried_class(cls):
from wselasticsearch.models import SubdomainEnumerationModel
return SubdomainEnumerationModel
def filter_by_enumeration_method(self, method):
"""
Apply a filter to this query that restricts results to only those results found by
the given method.
:param method: The method to filter on.
:return: None
"""
self.must_by_term(key="enumeration_method", value=method)
def filter_by_parent_domain(self, parent_domain):
"""
Apply a filter to this query that restricts results to only those results for the
given parent domain.
:param parent_domain: The parent domain.
:return: None
"""
self.must_by_term(key="parent_domain", value=parent_domain)
|
gpl-3.0
| 3,819,539,228,431,977,500 | 32.272727 | 90 | 0.673042 | false |
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/Phylo/PAML/_parse_codeml.py
|
1
|
21515
|
# Copyright (C) 2011 by Brandon Invergo (b.invergo@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import re
line_floats_re = re.compile("-*\d+\.\d+")
try:
float("nan")
_nan_float = float
except ValueError:
# Happens prior to Python 2.6 depending on C library, e.g. breaks on WinXP
def _nan_float(text):
try:
return float(text)
except ValueError:
if text.lower() == "nan":
import struct
return struct.unpack('d', struct.pack('Q', 0xfff8000000000000))[0]
else:
raise
def parse_basics(lines, results):
"""Parse the basic information that should be present in most codeml output files.
"""
# multi_models is used to designate there being results for more than one
# model in the file
multi_models = False
multi_genes = False
version_re = re.compile(".+ \(in paml version (\d+\.\d+[a-z]*).*")
model_re = re.compile("Model:\s+(.+)")
num_genes_re = re.compile("\(([0-9]+) genes: separate data\)")
# In codeml 4.1, the codon substitution model is headed by:
# "Codon frequencies:"
# In codeml 4.3+ it is headed by:
# "Codon frequency model:"
codon_freq_re = re.compile("Codon frequenc[a-z\s]{3,7}:\s+(.+)")
siteclass_re = re.compile("Site-class models:\s*([^\s]*)")
for line in lines:
# Find all floating point numbers in this line
line_floats_res = line_floats_re.findall(line)
line_floats = [_nan_float(val) for val in line_floats_res]
# Get the program version number
version_res = version_re.match(line)
if version_res is not None:
results["version"] = version_res.group(1)
continue
# Find the model description at the beginning of the file.
model_res = model_re.match(line)
if model_res is not None:
results["model"] = model_res.group(1)
# Find out if more than one genes are analyzed
num_genes_res = num_genes_re.search(line)
if num_genes_res is not None:
results["genes"] = []
num_genes = int(num_genes_res.group(1))
for n in range(num_genes):
results["genes"].append({})
multi_genes = True
continue
# Get the codon substitution model
codon_freq_res = codon_freq_re.match(line)
if codon_freq_res is not None:
results["codon model"] = codon_freq_res.group(1)
continue
# Find the site-class model name at the beginning of the file.
# This exists only if a NSsites class other than 0 is used.
# If there's no site class model listed, then there are multiple
# models in the results file
# Example match: "Site-class models: PositiveSelection"
siteclass_res = siteclass_re.match(line)
if siteclass_res is not None:
siteclass_model = siteclass_res.group(1)
if siteclass_model != "":
results["site-class model"] = siteclass_model
multi_models = False
else:
multi_models = True
# Get the maximum log-likelihood
if "ln Lmax" in line and len(line_floats) > 0:
results["lnL max"] = line_floats[0]
return (results, multi_models, multi_genes)
def parse_nssites(lines, results, multi_models, multi_genes):
"""Determine which NSsites models are present and parse them.
"""
ns_sites = {}
model_re = re.compile("Model (\d+):\s+(.+)")
gene_re = re.compile("Gene\s+([0-9]+)\s+.+")
siteclass_model = results.get("site-class model")
if not multi_models:
# If there's only one model in the results, find out
# which one it is and then parse it.
if siteclass_model is None:
siteclass_model = "one-ratio"
current_model = {"one-ratio": 0,
"NearlyNeutral": 1,
"PositiveSelection": 2,
"discrete": 3,
"beta": 7,
"beta&w>1": 8}[siteclass_model]
if multi_genes:
genes = results["genes"]
current_gene = None
gene_start = None
for line_num, line in enumerate(lines):
gene_res = gene_re.match(line)
if gene_res:
if current_gene is not None:
parse_model(lines[gene_start:line_num], model_results)
genes[current_gene - 1] = model_results
gene_start = line_num
current_gene = int(gene_res.group(1))
model_results = {"description": siteclass_model}
if len(genes[current_gene - 1]) == 0:
model_results = parse_model(lines[gene_start:], model_results)
genes[current_gene - 1] = model_results
else:
model_results = {"description": siteclass_model}
model_results = parse_model(lines, model_results)
ns_sites[current_model] = model_results
else:
# If there are multiple models in the results, scan through
# the file and send each model's text to be parsed individually.
current_model = None
model_start = None
for line_num, line in enumerate(lines):
# Find model names. If this is found on a line,
# all of the following lines until the next time this matches
# contain results for Model X.
# Example match: "Model 1: NearlyNeutral (2 categories)"
model_res = model_re.match(line)
if model_res:
if current_model is not None:
# We've already been tracking a model, so it's time
# to send those lines off for parsing before beginning
# a new one.
parse_model(lines[model_start:line_num], model_results)
ns_sites[current_model] = model_results
model_start = line_num
current_model = int(model_res.group(1))
model_results = {"description": model_res.group(2)}
if ns_sites.get(current_model) is None:
# When we reach the end of the file, we'll still have one more
# model to parse.
model_results = parse_model(lines[model_start:], model_results)
ns_sites[current_model] = model_results
# Only add the ns_sites dict to the results if we really have results.
# Model M0 is added by default in some cases, so if it exists, make sure
# it's not empty
if len(ns_sites) == 1:
m0 = ns_sites.get(0)
if not m0 or len(m0) > 1:
results["NSsites"] = ns_sites
elif len(ns_sites) > 1:
results["NSsites"] = ns_sites
return results
def parse_model(lines, results):
"""Parse an individual NSsites model's results.
"""
parameters = {}
SEs_flag = False
dS_tree_flag = False
dN_tree_flag = False
w_tree_flag = False
num_params = None
tree_re = re.compile("^\([\w #:',.()]*\);\s*$")
branch_re = re.compile("\s+(\d+\.\.\d+)[\s+\d+\.\d+]+")
model_params_re = re.compile("(?<!\S)([a-z]\d?)\s*=\s+(\d+\.\d+)")
for line in lines:
# Find all floating point numbers in this line
line_floats_res = line_floats_re.findall(line)
line_floats = [_nan_float(val) for val in line_floats_res]
# Check if branch-specific results are in the line
branch_res = branch_re.match(line)
# Check if additional model parameters are in the line
model_params = model_params_re.findall(line)
# Find lnL values.
# Example match (lnL = -2021.348300):
# "lnL(ntime: 19 np: 22): -2021.348300 +0.000000"
if "lnL(ntime:" in line and len(line_floats) > 0:
results["lnL"] = line_floats[0]
np_res = re.match("lnL\(ntime:\s+\d+\s+np:\s+(\d+)\)", line)
if np_res is not None:
num_params = int(np_res.group(1))
# Get parameter list. This can be useful for specifying starting
# parameters in another run by copying the list of parameters
# to a file called in.codeml. Since the parameters must be in
# a fixed order and format, copying and pasting to the file is
# best. For this reason, they are grabbed here just as a long
# string and not as individual numbers.
elif len(line_floats) == num_params and not SEs_flag:
parameters["parameter list"] = line.strip()
# Find SEs. The same format as parameters above is maintained
# since there is a correspondance between the SE format and
# the parameter format.
# Example match:
# "SEs for parameters:
# -1.00000 -1.00000 -1.00000 801727.63247 730462.67590 -1.00000
elif "SEs for parameters:" in line:
SEs_flag = True
elif SEs_flag and len(line_floats) == num_params:
parameters["SEs"] = line.strip()
SEs_flag = False
# Find tree lengths.
# Example match: "tree length = 1.71931"
elif "tree length =" in line and len(line_floats) > 0:
results["tree length"] = line_floats[0]
# Find the estimated trees only taking the tree if it has
# lengths or rate estimates on the branches
elif tree_re.match(line) is not None:
if ":" in line or "#" in line:
if dS_tree_flag:
results["dS tree"] = line.strip()
dS_tree_flag = False
elif dN_tree_flag:
results["dN tree"] = line.strip()
dN_tree_flag = False
elif w_tree_flag:
results["omega tree"] = line.strip()
w_tree_flag = False
else:
results["tree"] = line.strip()
elif "dS tree:" in line:
dS_tree_flag = True
elif "dN tree:" in line:
dN_tree_flag = True
elif "w ratios as labels for TreeView:" in line:
w_tree_flag = True
# Find rates for multiple genes
# Example match: "rates for 2 genes: 1 2.75551"
elif "rates for" in line and len(line_floats) > 0:
line_floats.insert(0, 1.0)
parameters["rates"] = line_floats
# Find kappa values.
# Example match: "kappa (ts/tv) = 2.77541"
elif "kappa (ts/tv)" in line and len(line_floats) > 0:
parameters["kappa"] = line_floats[0]
# Find omega values.
# Example match: "omega (dN/dS) = 0.25122"
elif "omega (dN/dS)" in line and len(line_floats) > 0:
parameters["omega"] = line_floats[0]
elif "w (dN/dS)" in line and len(line_floats) > 0:
parameters["omega"] = line_floats
# Find omega and kappa values for multi-gene files
# Example match: "gene # 1: kappa = 1.72615 omega = 0.39333"
elif "gene # " in line:
gene_num = int(re.match("gene # (\d+)", line).group(1))
if parameters.get("genes") is None:
parameters["genes"] = {}
parameters["genes"][gene_num] = {"kappa": line_floats[0],
"omega": line_floats[1]}
# Find dN values.
# Example match: "tree length for dN: 0.2990"
elif "tree length for dN" in line and len(line_floats) > 0:
parameters["dN"] = line_floats[0]
# Find dS values
# Example match: "tree length for dS: 1.1901"
elif "tree length for dS" in line and len(line_floats) > 0:
parameters["dS"] = line_floats[0]
# Find site class distributions.
# Example match 1 (normal model, 2 site classes):
# "p: 0.77615 0.22385"
# Example match 2 (branch site A model, 4 site classes):
# "proportion 0.00000 0.00000 0.73921 0.26079"
elif line[0:2] == "p:" or line[0:10] == "proportion":
site_classes = parse_siteclass_proportions(line_floats)
parameters["site classes"] = site_classes
# Find the omega value corresponding to each site class
# Example match (2 site classes): "w: 0.10224 1.00000"
elif line[0:2] == "w:":
site_classes = parameters.get("site classes")
site_classes = parse_siteclass_omegas(line, site_classes)
parameters["site classes"] = site_classes
# Find the omega values corresponding to a branch type from
# the clade model C for each site class
# Example match:
# "branch type 0: 0.31022 1.00000 0.00000"
elif "branch type " in line:
branch_type = re.match("branch type (\d)", line)
if branch_type:
site_classes = parameters.get("site classes")
branch_type_no = int(branch_type.group(1))
site_classes = parse_clademodelc(branch_type_no, line_floats,
site_classes)
parameters["site classes"] = site_classes
# Find the omega values of the foreground branch for each site
# class in the branch site A model
# Example match:
# "foreground w 0.07992 1.00000 134.54218 134.54218"
elif line[0:12] == "foreground w":
site_classes = parameters.get("site classes")
site_classes = parse_branch_site_a(True, line_floats, site_classes)
parameters["site classes"] = site_classes
# Find the omega values of the background for each site
# class in the branch site A model
# Example match:
# "background w 0.07992 1.00000 0.07992 1.00000"
elif line[0:12] == "background w":
site_classes = parameters.get("site classes")
site_classes = parse_branch_site_a(False, line_floats, site_classes)
parameters["site classes"] = site_classes
# Find dN & dS for each branch, which is organized in a table
# The possibility of NaNs forces me to not use the line_floats
# method.
# Example row (some spaces removed to make it smaller...).
# " 6..7 0.000 167.7 54.3 0.0000 0.0000 0.0000 0.0 0.0"
elif branch_res is not None and len(line_floats) > 0:
branch = branch_res.group(1)
if parameters.get("branches") is None:
parameters["branches"] = {}
# Hack for Jython http://bugs.jython.org/issue1762 float("-nan")
line = line.replace(" -nan", " nan")
params = line.strip().split()[1:]
parameters["branches"][branch] = {
"t": _nan_float(params[0].strip()),
"N": _nan_float(params[1].strip()),
"S": _nan_float(params[2].strip()),
"omega": _nan_float(params[3].strip()),
"dN": _nan_float(params[4].strip()),
"dS": _nan_float(params[5].strip()),
"N*dN": _nan_float(params[6].strip()),
"S*dS": _nan_float(params[7].strip())}
# Find model parameters, which can be spread across multiple
# lines.
# Example matches:
# " p0= 0.99043 p= 0.36657 q= 1.04445
# " (p1= 0.00957) w= 3.25530"
elif len(model_params) > 0:
float_model_params = []
for param in model_params:
float_model_params.append((param[0], _nan_float(param[1])))
parameters.update(dict(float_model_params))
if len(parameters) > 0:
results["parameters"] = parameters
return results
def parse_siteclass_proportions(line_floats):
"""For models which have multiple site classes, find the proportion of the alignment assigned to each class.
"""
site_classes = {}
if len(line_floats) > 0:
for n in range(len(line_floats)):
site_classes[n] = {"proportion": line_floats[n]}
return site_classes
def parse_siteclass_omegas(line, site_classes):
"""For models which have multiple site classes, find the omega estimated for each class.
"""
# The omega results are tabular with strictly 9 characters per column
# (1 to 3 digits before the decimal point and 5 after). This causes
# numbers to sometimes run into each other, so we must use a different
# regular expression to account for this. i.e.:
# w: 0.00012 1.00000109.87121
line_floats = re.findall("\d{1,3}\.\d{5}", line)
if not site_classes or len(line_floats) == 0:
return
for n in range(len(line_floats)):
site_classes[n]["omega"] = line_floats[n]
return site_classes
def parse_clademodelc(branch_type_no, line_floats, site_classes):
"""Parse results specific to the clade model C.
"""
if not site_classes or len(line_floats) == 0:
return
for n in range(len(line_floats)):
if site_classes[n].get("branch types") is None:
site_classes[n]["branch types"] = {}
site_classes[n]["branch types"][branch_type_no] = line_floats[n]
return site_classes
def parse_branch_site_a(foreground, line_floats, site_classes):
"""Parse results specific to the branch site A model.
"""
if not site_classes or len(line_floats) == 0:
return
for n in range(len(line_floats)):
if site_classes[n].get("branch types") is None:
site_classes[n]["branch types"] = {}
if foreground:
site_classes[n]["branch types"]["foreground"] =\
line_floats[n]
else:
site_classes[n]["branch types"]["background"] =\
line_floats[n]
return site_classes
def parse_pairwise(lines, results):
"""Parse results from pairwise comparisons.
"""
# Find pairwise comparisons
# Example:
# 2 (Pan_troglo) ... 1 (Homo_sapie)
# lnL = -291.465693
# 0.01262 999.00000 0.00100
#
# t= 0.0126 S= 81.4 N= 140.6 dN/dS= 0.0010 dN= 0.0000 dS= 0.0115
pair_re = re.compile("\d+ \((.+)\) ... \d+ \((.+)\)")
pairwise = {}
for line in lines:
# Find all floating point numbers in this line
line_floats_res = line_floats_re.findall(line)
line_floats = [_nan_float(val) for val in line_floats_res]
pair_res = pair_re.match(line)
if pair_res:
seq1 = pair_res.group(1)
seq2 = pair_res.group(2)
if pairwise.get(seq1) is None:
pairwise[seq1] = {}
if pairwise.get(seq2) is None:
pairwise[seq2] = {}
if len(line_floats) == 1:
pairwise[seq1][seq2] = {"lnL": line_floats[0]}
pairwise[seq2][seq1] = pairwise[seq1][seq2]
elif len(line_floats) == 6:
pairwise[seq1][seq2] = {"t": line_floats[0],
"S": line_floats[1],
"N": line_floats[2],
"omega": line_floats[3],
"dN": line_floats[4],
"dS": line_floats[5]}
pairwise[seq2][seq1] = pairwise[seq1][seq2]
if len(pairwise) > 0:
results["pairwise"] = pairwise
return results
def parse_distances(lines, results):
"""Parse amino acid sequence distance results.
"""
distances = {}
sequences = []
raw_aa_distances_flag = False
ml_aa_distances_flag = False
matrix_row_re = re.compile("(.+)\s{5,15}")
for line in lines:
# Find all floating point numbers in this line
line_floats_res = line_floats_re.findall(line)
line_floats = [_nan_float(val) for val in line_floats_res]
if "AA distances" in line:
raw_aa_distances_flag = True
# In current versions, the raw distances always come
# first but I don't trust this to always be true
ml_aa_distances_flag = False
elif "ML distances of aa seqs." in line:
ml_aa_distances_flag = True
raw_aa_distances_flag = False
# Parse AA distances (raw or ML), in a lower diagonal matrix
matrix_row_res = matrix_row_re.match(line)
if matrix_row_res and (raw_aa_distances_flag or
ml_aa_distances_flag):
seq_name = matrix_row_res.group(1).strip()
if seq_name not in sequences:
sequences.append(seq_name)
if raw_aa_distances_flag:
if distances.get("raw") is None:
distances["raw"] = {}
distances["raw"][seq_name] = {}
for i in range(0, len(line_floats)):
distances["raw"][seq_name][sequences[i]] = line_floats[i]
distances["raw"][sequences[i]][seq_name] = line_floats[i]
else:
if distances.get("ml") is None:
distances["ml"] = {}
distances["ml"][seq_name] = {}
for i in range(0, len(line_floats)):
distances["ml"][seq_name][sequences[i]] = line_floats[i]
distances["ml"][sequences[i]][seq_name] = line_floats[i]
if len(distances) > 0:
results["distances"] = distances
return results
|
apache-2.0
| -3,771,134,222,919,070,000 | 43.452479 | 112 | 0.557193 | false |
ly0/xxadmin
|
setup.py
|
1
|
1788
|
#!/usr/bin/env python
from setuptools import setup
# version_tuple = __import__('xadmin.version').VERSION
# version = ".".join([str(v) for v in version_tuple])
setup(
name='django-xadmin',
version='0.5.1',
description='Drop-in replacement of Django admin comes with lots of goodies, fully extensible with plugin support, pretty UI based on Twitter Bootstrap.',
long_description=open('README.rst', encoding='utf-8').read(),
author='sshwsfc',
author_email='sshwsfc@gmail.com',
license=open('LICENSE', encoding='utf-8').read(),
url='http://www.xadmin.io',
download_url='http://github.com/sshwsfc/django-xadmin/archive/master.zip',
packages=['xadmin', 'xadmin.plugins', 'xadmin.templatetags', 'xadmin.views'],
include_package_data=True,
install_requires=[
'setuptools',
'django>=1.7',
'django-crispy-forms>=1.4.0',
'django-formtools>=1.0',
'sorl-thumbnail>=11.12.1b',
],
extras_require={
'Excel': ['xlwt', 'xlsxwriter'],
'Reversion': ['django-reversion'],
'Comment': ['django-contrib-comments'],
},
zip_safe=False,
keywords=['admin', 'django', 'xadmin', 'bootstrap'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
"Programming Language :: JavaScript",
'Programming Language :: Python',
"Programming Language :: Python :: 2.7",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
]
)
|
bsd-3-clause
| -3,786,214,088,761,634,300 | 37.042553 | 158 | 0.612416 | false |
johncheetham/jcchess
|
jcchess/move_list.py
|
1
|
9162
|
#
# move_list.py - Display Move List Window
#
# This file is part of jcchess
#
# jcchess is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jcchess is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jcchess. If not, see <http://www.gnu.org/licenses/>.
#
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
import os
from . import gv
#from . import gui
from . import comments
class Move_List:
move_list_ref = None
def __init__(self):
glade_dir = gv.jcchess.get_glade_dir()
self.glade_file = os.path.join(glade_dir, "move_list.glade")
Move_List.move_list_ref = self
self.comments = comments.get_ref()
self.saved_move_list = []
# create move list window
self.builder = Gtk.Builder()
self.builder.set_translation_domain(gv.domain)
self.builder.add_from_file(self.glade_file)
self.builder.connect_signals(self)
self.window = self.builder.get_object("move_list_window")
self.vbox = self.builder.get_object("vbox1") #man
self.treeview = Gtk.TreeView() #man
self.liststore = self.builder.get_object("liststore1")
self.scrolled_window = Gtk.ScrolledWindow()
#self.scrolled_window.set_size_request(150,300)
self.treeview.set_model(self.liststore)
self.scrolled_window.add(self.treeview)
self.vbox.add(self.scrolled_window)
self.comments_button = self.builder.get_object("comments_button")
#self.builder.connect("button_press_event",self.comments_button_clicked_cb)
cell0 = Gtk.CellRendererText()
# cell0.set_property("cell-background", Gdk.color_parse("#F8F8FF"))
tvcolumn0 = Gtk.TreeViewColumn("#")
self.treeview.append_column(tvcolumn0)
tvcolumn0.pack_start(cell0, True)
tvcolumn0.set_min_width(50)
tvcolumn0.set_attributes(cell0, text=0)
cell1 = Gtk.CellRendererText()
# cell1.set_property("cell-background", Gdk.color_parse("#F8F8FF"))
tvcolumn1 = Gtk.TreeViewColumn(_("Move"))
self.treeview.append_column(tvcolumn1)
tvcolumn1.pack_start(cell1, True)
tvcolumn1.set_min_width(100)
tvcolumn1.set_attributes(cell1, text=1)
cell2 = Gtk.CellRendererText()
# cell1.set_property("cell-background", Gdk.color_parse("#F8F8FF"))
tvcolumn2 = Gtk.TreeViewColumn(_("Cmt"))
self.treeview.append_column(tvcolumn2)
tvcolumn2.pack_start(cell2, True)
tvcolumn2.set_min_width(20)
tvcolumn2.set_attributes(cell2, text=2)
self.tree_selection = self.treeview.get_selection()
self.treeview.connect("button_press_event", self.treeview_button_press)
self.treeview.connect("key_press_event", self.treeview_key_press)
self.update()
# user has closed the window
# just hide it
def delete_event(self, widget, event):
self.window.hide()
return True # do not propagate to other handlers
def show_movelist_window(self, b):
# "present" will show the window if it is hidden
# if not hidden it will raise it to the top
self.window.show_all()
self.window.present()
return
# update the move list
# called when the number of moves in the list has changed
def update(self):
# update liststore
self.liststore.clear()
self.liststore.append(("0.", _("Start Pos"), " "))
movelist = gv.jcchess.get_movelist()
moveno = 1
if len(movelist) != 0:
moveno = 1
for move in movelist:
m1 = move[0:2]
m2 = move[2:]
move = m1 + '-' + m2
comment = self.comments.get_comment(moveno)
if gv.show_moves == True:
gv.gui.comment_view.get_buffer().set_text("-")
if comment != "":
cind = "..."
else:
cind = " "
if moveno % 2 == 1:
strmoveno = str(int((moveno + 1) / 2)) + "."
else:
strmoveno = ""
#e = str(moveno) + ".", move, cind
e = strmoveno, move, cind
e1 = str(moveno) + "." + " " + move +" " + cind #+"\n"
le = []
le.append(e1)
self.liststore.append(e)
if gv.show_moves == True:
if moveno == 1:
gv.gui.move_view.get_model().clear()
gv.gui.move_view.get_model().append(le)
moveno += 1
comment = self.comments.get_comment(moveno)
if comment != "":
if gv.show_moves == True:
gv.gui.comment_view.get_buffer().set_text(comment)
GObject.idle_add(self.scroll_to_end)
# sets the move at move_idx as the selected line
# called from jcchess.py for undo/redo move
def set_move(self, move_idx):
path = (move_idx,)
#self.tree_selection.select_path(path) to be bypasses if cursor_changed as event in moves_clicked
self.comments.set_moveno(move_idx)
if gv.show_moves == True:
if move_idx > 0:
path = str(move_idx-1)
#gv.gui.move_view.set_cursor(path, None,False)
if path[0]!=0 and path!='0':
GObject.idle_add(gv.gui.move_view.scroll_to_cell,path,None, False, 0,0) #arguments must be in list
return
def scroll_to_end(self):
adj = self.scrolled_window.get_vadjustment()
adj.set_value(adj.get_upper() - adj.get_page_size())
return False
def treeview_key_press(self, treeview, event):
# to print list of key values do:
# print dir(Gtk.keysyms)
# if up or down pressed then position the board at the move
if event.keyval == Gdk.KEY_Up or event.keyval == Gdk.KEY_Down:
self.treeview_button_press(None, None)
# user clicked on the move list
def treeview_button_press(self, treeview, event):
if gv.jcchess.get_stopped():
GObject.idle_add(self.process_tree_selection)
else:
GObject.idle_add(self.tree_selection.unselect_all)
# set the board position at the move the user clicked on
def move_box_selection(self):
if gv.jcchess.get_stopped():
(treemodel, treeiter) = gv.gui.move_view.get_selection().get_selected()
if treeiter is not None:
k = gv.gui.movestore.get_value(treeiter,0).find(".")
nmove = int(gv.gui.movestore.get_value(treeiter,0)[0:k])
self.comments.set_moveno(nmove)
# now call a method in jcchess.py to position it at the move
# clicked on
gv.jcchess.goto_move(nmove)
path = str(nmove)
self.treeview.set_cursor(path, None,False)
GObject.idle_add(self.treeview.scroll_to_cell,path,None, False, 0,0) #arguments must be in list
else:
GObject.idle_add(gv.gui.move_view.unselect_all)
def process_tree_selection(self):
(treemodel, treeiter) = self.tree_selection.get_selected()
if treeiter is not None:
# get the selected row number
path = treemodel.get_path(treeiter)
move_idx = path.get_indices()[0]
#move_str = treemodel.get_value(treeiter, 0)
#move_str = move_str[0: len(move_str) - 1]
#move_idx = int(move_str)
self.comments.set_moveno(move_idx)
# now call a method in jcchess.py to position it at the move
# clicked on
gv.jcchess.goto_move(move_idx)
def set_comment(self, index, text):
self.comments.set_comment(index,text)
def set_comment_ind(self, ind):
if ind:
cind = "..."
else:
cind = " "
(treemodel, treeiter) = self.tree_selection.get_selected()
if treeiter is not None:
self.liststore.set_value(treeiter, 2, cind)
def comments_button_clicked_cb(self, button):
self.comments.show_comments_window()
def get_ref():
if Move_List.move_list_ref is None:
Move_List.move_list_ref = Move_List()
return Move_List.move_list_ref
|
gpl-3.0
| -270,891,830,651,016,260 | 36.54918 | 115 | 0.56778 | false |
biothings/biothings.api
|
biothings/hub/databuild/differ.py
|
1
|
59337
|
import os
import shutil
import time
import json
from datetime import datetime
import asyncio
from functools import partial
import glob
import random
from biothings.utils.common import timesofar, get_timestamp, \
dump, rmdashfr, loadobj, md5sum
from biothings.utils.mongo import id_feeder, get_target_db, get_previous_collection
from biothings.utils.hub_db import get_src_build
from biothings.utils.loggers import get_logger
from biothings.utils.diff import diff_docs_jsonpatch
from biothings.hub.databuild.backend import generate_folder
from biothings import config as btconfig
from biothings.utils.manager import BaseManager
from .backend import create_backend, merge_src_build_metadata
from biothings.utils.backend import DocMongoBackend
from biothings.utils.jsondiff import make as jsondiff
from biothings.hub import DIFFER_CATEGORY, DIFFMANAGER_CATEGORY
from biothings.hub.datarelease import set_pending_to_release_note
logging = btconfig.logger
class DifferException(Exception):
pass
class BaseDiffer(object):
# diff type name, identifying the diff algorithm
# must be set in sub-class
diff_type = None
def __init__(self, diff_func, job_manager, log_folder):
self.old = None
self.new = None
self.log_folder = log_folder
self.job_manager = job_manager
self.diff_func = diff_func
self.timestamp = datetime.now()
self.logfile = None
self.setup_log()
self.ti = time.time()
self.metadata = {} # diff metadata
self.metadata_filename = None
def setup_log(self):
self.logger, self.logfile = get_logger(
'diff_%s' % self.__class__.diff_type, self.log_folder)
def get_predicates(self):
return []
def get_pinfo(self):
"""
Return dict containing information about the current process
(used to report in the hub)
"""
pinfo = {
"category": DIFFER_CATEGORY,
"source": "",
"step": "",
"description": ""
}
preds = self.get_predicates()
if preds:
pinfo["__predicates__"] = preds
return pinfo
def register_status(self, status, transient=False, init=False, **extra):
src_build = get_src_build()
job_info = {
'status': status,
'step_started_at': datetime.now().astimezone(),
'logfile': self.logfile,
}
diff_key = self.old.target_name
diff_info = {"diff": {diff_key: {}}}
if transient:
# record some "in-progress" information
job_info['pid'] = os.getpid()
else:
# only register time when it's a final state
job_info["time"] = timesofar(self.ti)
t1 = round(time.time() - self.ti, 0)
job_info["time_in_s"] = t1
diff_info["diff"][diff_key]["created_at"] = datetime.now().astimezone()
if "diff" in extra:
diff_info["diff"][diff_key].update(extra["diff"])
if "job" in extra:
job_info.update(extra["job"])
# since the base is the merged collection, we register info there
# as the new collection (diff results are associated to the most recent colleciton)
build = src_build.find_one({'_id': self.new.target_name})
if not build:
self.logger.info(
"Can't find build document '%s', no status to register" %
self.new.target_name)
return
if init:
# init timer for this step
self.ti = time.time()
src_build.update({'_id': self.new.target_name},
{"$push": {
'jobs': job_info
}})
# now refresh/sync
build = src_build.find_one({'_id': self.new.target_name})
else:
# merge extra at root level
# (to keep building data...) and update the last one
# (it's been properly created before when init=True)
build["jobs"] and build["jobs"][-1].update(job_info)
def merge_info(target, d):
if "__REPLACE__" in d.keys():
d.pop("__REPLACE__")
target = d
else:
for k, v in d.items():
if type(v) == dict:
# only merge if both are dict (otherwise replace/set with d)
if k in target and type(target[k]) == dict:
target[k] = merge_info(target[k], v)
else:
v.pop("__REPLACE__", None)
# merge v with "nothing" just to make sure to remove any "__REPLACE__"
v = merge_info({}, v)
target[k] = v
else:
target[k] = v
return target
build = merge_info(build, diff_info)
src_build.replace_one({"_id": build["_id"]}, build)
@asyncio.coroutine
def diff_cols(self,
old_db_col_names,
new_db_col_names,
batch_size,
steps,
mode=None,
exclude=[]):
"""
Compare new with old collections and produce diff files. Root keys can be excluded from
comparison with "exclude" parameter
`*_db_col_names` can be:
1. a colleciton name (as a string) asusming they are
in the target database.
2. tuple with 2 elements, the first one is then either "source" or "target"
to respectively specify src or target database, and the second element is
the collection name.
3. tuple with 3 elements (URI,db,collection), looking like:
("mongodb://user:pass@host","dbname","collection"), allowing to specify
any connection on any server
steps: - 'content' will perform diff on actual content.
- 'mapping' will perform diff on ES mappings (if target collection involved)
- 'reduce' will merge diff files, trying to avoid having many small files
- 'post' is a hook to do stuff once everything is merged (override method post_diff_cols)
mode: 'purge' will remove any existing files for this comparison while 'resume' will happily ignore
existing data and to whatever it's requested (like running steps="post" on existing diff folder...)
"""
# these ones are used to point to the build doc, not the underlying backned
# (ie. if link builder has been used, it refers a colleciton in src_db, but
# we need the metadata from the build doc too)
self.new = create_backend(new_db_col_names, follow_ref=False)
self.old = create_backend(old_db_col_names, follow_ref=False)
# these point to the actual collection containing data
content_new = create_backend(new_db_col_names, follow_ref=True)
content_old = create_backend(old_db_col_names, follow_ref=True)
# check what to do
if type(steps) == str:
steps = [steps]
diff_folder = generate_folder(btconfig.DIFF_PATH, old_db_col_names,
new_db_col_names)
if mode != "force" and os.path.exists(
diff_folder) and "content" in steps:
if mode == "purge" and os.path.exists(diff_folder):
rmdashfr(diff_folder)
elif mode != "resume":
raise FileExistsError(
"Found existing files in '%s', use mode='purge'" %
diff_folder)
if not os.path.exists(diff_folder):
os.makedirs(diff_folder)
# create metadata file storing info about how we created the diff
# and some summary data
diff_stats = {
"update": 0,
"add": 0,
"delete": 0,
"mapping_changed": False
}
self.metadata_filename = os.path.join(diff_folder, "metadata.json")
if os.path.exists(self.metadata_filename):
# load previous metadata in case we try to update/complete diff
self.metadata = json.load(open(self.metadata_filename))
else:
assert self.old.version, "Version for 'old' collection not defined"
self.metadata = {
"diff": {
"type": self.diff_type,
"func": self.diff_func.__name__,
"version": "%s.%s" % (self.old.version, self.new.version),
"stats": diff_stats, # ref to diff_stats
"files": [],
# when "new" is a target collection:
"mapping_file": None,
"info": {
"generated_on": str(datetime.now().astimezone()),
"exclude": exclude,
"steps": steps,
"mode": mode,
"batch_size": batch_size
}
},
"old": {
"backend": old_db_col_names,
"version": self.old.version
},
"new": {
"backend": new_db_col_names,
"version": self.new.version
},
# when "new" is a mongodb target collection:
"_meta": {},
"build_config": {},
}
if isinstance(
self.new, DocMongoBackend
) and self.new.target_collection.database.name == btconfig.DATA_TARGET_DATABASE:
new_doc = get_src_build().find_one(
{"_id": self.new.target_collection.name})
if not new_doc:
raise DifferException("Collection '%s' has no corresponding build document" %
self.new.target_collection.name)
self.metadata["_meta"] = self.get_metadata()
self.metadata["build_config"] = new_doc.get("build_config")
got_error = False
if "mapping" in steps:
def diff_mapping(old, new, diff_folder):
summary = {}
old_build = get_src_build().find_one(
{"_id": old.target_collection.name})
new_build = get_src_build().find_one(
{"_id": new.target_collection.name})
if old_build and new_build:
# mapping diff always in jsondiff
mapping_diff = jsondiff(old_build["mapping"],
new_build["mapping"])
if mapping_diff:
file_name = os.path.join(diff_folder, "mapping.pyobj")
dump(mapping_diff, file_name)
md5 = md5sum(file_name)
summary["mapping_file"] = {
"name": os.path.basename(file_name),
"md5sum": md5,
"size": os.stat(file_name).st_size
}
else:
self.logger.info("Neither '%s' nor '%s' have mappings associated to them, skip" %
(old.target_collection.name, new.target_collection.name))
return summary
def mapping_diffed(f):
res = f.result()
self.register_status("success", job={"step": "diff-mapping"})
if res.get("mapping_file"):
nonlocal got_error
# check mapping differences: only "add" ops are allowed, as any others actions would be
# ingored by ES once applied (you can't update/delete elements of an existing mapping)
mf = os.path.join(diff_folder, res["mapping_file"]["name"])
ops = loadobj(mf)
for op in ops:
if op["op"] != "add":
err = DifferException("Found diff operation '%s' in mapping file, " % op["op"]
+ " only 'add' operations are allowed. You can still produce the "
+ "diff by removing 'mapping' from 'steps' arguments. "
+ "Ex: steps=['content']. Diff operation was: %s" % op)
got_error = err
self.metadata["diff"]["mapping_file"] = res["mapping_file"]
diff_stats["mapping_changed"] = True
json.dump(self.metadata,
open(self.metadata_filename, "w"),
indent=True)
self.logger.info(
"Diff file containing mapping differences generated: %s" %
res.get("mapping_file"))
pinfo = self.get_pinfo()
pinfo["source"] = "%s vs %s" % (self.new.target_name,
self.old.target_name)
pinfo["step"] = "mapping: old vs new"
self.register_status("diffing",
transient=True,
init=True,
job={"step": "diff-mapping"})
job = yield from self.job_manager.defer_to_thread(
pinfo, partial(diff_mapping, self.old, self.new, diff_folder))
job.add_done_callback(mapping_diffed)
yield from job
if got_error:
raise got_error
if content_old == content_new:
self.logger.info(
"Old and new collections are the same, skipping 'content' step"
)
elif "content" in steps:
cnt = 0
jobs = []
pinfo = self.get_pinfo()
pinfo["source"] = "%s vs %s" % (content_new.target_name,
content_old.target_name)
pinfo["step"] = "content: new vs old"
data_new = id_feeder(content_new, batch_size=batch_size)
selfcontained = "selfcontained" in self.diff_type
self.register_status("diffing",
transient=True,
init=True,
job={"step": "diff-content"})
for id_list_new in data_new:
cnt += 1
pinfo["description"] = "batch #%s" % cnt
def diffed(f):
res = f.result()
diff_stats["update"] += res["update"]
diff_stats["add"] += res["add"]
if res.get("diff_file"):
self.metadata["diff"]["files"].append(res["diff_file"])
self.logger.info("(Updated: {}, Added: {})".format(
res["update"], res["add"]))
self.register_status("success",
job={"step": "diff-content"})
self.logger.info("Creating diff worker for batch #%s" % cnt)
job = yield from self.job_manager.defer_to_process(
pinfo,
partial(diff_worker_new_vs_old, id_list_new,
old_db_col_names, new_db_col_names, cnt,
diff_folder, self.diff_func, exclude,
selfcontained))
job.add_done_callback(diffed)
jobs.append(job)
yield from asyncio.gather(*jobs)
self.logger.info(
"Finished calculating diff for the new collection. Total number of docs updated: {}, added: {}"
.format(diff_stats["update"], diff_stats["add"]))
data_old = id_feeder(content_old, batch_size=batch_size)
jobs = []
pinfo = self.get_pinfo()
pinfo["source"] = "%s vs %s" % (content_old.target_name,
content_new.target_name)
pinfo["step"] = "content: old vs new"
for id_list_old in data_old:
cnt += 1
pinfo["description"] = "batch #%s" % cnt
def diffed(f):
res = f.result()
diff_stats["delete"] += res["delete"]
if res.get("diff_file"):
self.metadata["diff"]["files"].append(res["diff_file"])
self.logger.info("(Deleted: {})".format(res["delete"]))
self.logger.info("Creating diff worker for batch #%s" % cnt)
job = yield from self.job_manager.defer_to_process(
pinfo,
partial(diff_worker_old_vs_new, id_list_old,
new_db_col_names, cnt, diff_folder))
job.add_done_callback(diffed)
jobs.append(job)
yield from asyncio.gather(*jobs)
self.logger.info(
"Finished calculating diff for the old collection. Total number of docs deleted: {}"
.format(diff_stats["delete"]))
json.dump(self.metadata,
open(self.metadata_filename, "w"),
indent=True)
self.logger.info(
"Summary: (Updated: {}, Added: {}, Deleted: {}, Mapping changed: {})"
.format(diff_stats["update"], diff_stats["add"],
diff_stats["delete"], diff_stats["mapping_changed"]))
if "reduce" in steps:
@asyncio.coroutine
def merge_diff():
self.logger.info("Reduce/merge diff files")
max_diff_size = getattr(btconfig, "MAX_DIFF_SIZE",
10 * 1024**2)
current_size = 0
cnt = 0
final_res = []
tomerge = []
# .done contains original diff files
done_folder = os.path.join(diff_folder, ".done")
try:
os.mkdir(done_folder)
except FileExistsError:
pass
def merged(f, cnt):
nonlocal got_error
nonlocal final_res
try:
res = f.result()
final_res.extend(res)
self.logger.info("Diff file #%s created" % cnt)
except Exception as e:
got_error = e
diff_files = [f for f in glob.glob(os.path.join(diff_folder, "*.pyobj"))
if not os.path.basename(f).startswith("mapping")]
self.logger.info("%d diff files to process in total" %
len(diff_files))
jobs = []
while diff_files:
if len(diff_files) % 100 == 0:
self.logger.info("%d diff files to process" %
len(diff_files))
if current_size > max_diff_size:
job = yield from self.job_manager.defer_to_process(
pinfo,
partial(reduce_diffs, tomerge, cnt, diff_folder,
done_folder))
job.add_done_callback(partial(merged, cnt=cnt))
jobs.append(job)
current_size = 0
cnt += 1
tomerge = []
else:
diff_file = diff_files.pop()
current_size += os.stat(diff_file).st_size
tomerge.append(diff_file)
assert not diff_files
if tomerge:
job = yield from self.job_manager.defer_to_process(
pinfo,
partial(reduce_diffs, tomerge, cnt, diff_folder,
done_folder))
job.add_done_callback(partial(merged, cnt=cnt))
jobs.append(job)
yield from job
yield from asyncio.gather(*jobs)
return final_res
pinfo = self.get_pinfo()
pinfo["source"] = "diff_folder"
pinfo["step"] = "reduce"
#job = yield from self.job_manager.defer_to_thread(pinfo,merge_diff)
self.register_status("diffing",
transient=True,
init=True,
job={"step": "diff-reduce"})
res = yield from merge_diff()
self.metadata["diff"]["files"] = res
json.dump(self.metadata,
open(self.metadata_filename, "w"),
indent=True)
if got_error:
self.logger.exception("Failed to reduce diff files: %s" %
got_error,
extra={"notify": True})
raise got_error
self.register_status("success", job={"step": "diff-reduce"})
if "post" in steps:
pinfo = self.get_pinfo()
pinfo["source"] = "diff_folder"
pinfo["step"] = "post"
self.register_status("diffing",
transient=True,
init=True,
job={"step": "diff-post"})
job = yield from self.job_manager.defer_to_thread(
pinfo,
partial(self.post_diff_cols,
old_db_col_names,
new_db_col_names,
batch_size,
steps,
mode=mode,
exclude=exclude))
def posted(f):
nonlocal got_error
try:
res = f.result()
self.register_status("success",
job={"step": "diff-post"},
diff={"post": res})
self.logger.info("Post diff process successfully run: %s" %
res)
except Exception as e:
got_error = e
job.add_done_callback(posted)
yield from job
json.dump(self.metadata,
open(self.metadata_filename, "w"),
indent=True)
if got_error:
self.logger.exception("Failed to run post diff process: %s" %
got_error,
extra={"notify": True})
raise got_error
strargs = "[old=%s,new=%s,steps=%s,diff_stats=%s]" % (
old_db_col_names, new_db_col_names, steps, diff_stats)
self.logger.info("success %s" % strargs, extra={"notify": True})
# remove some metadata key for diff registering (some are already in build doc, it would be duplication)
self.metadata.pop("_meta", None)
self.metadata.pop("build_config", None)
# record diff_folder so it's available for later without re-computing it
self.metadata["diff_folder"] = diff_folder
self.register_status("success", diff=self.metadata)
return diff_stats
def diff(self,
old_db_col_names,
new_db_col_names,
batch_size=100000,
steps=["content", "mapping", "reduce", "post"],
mode=None,
exclude=[]):
"""wrapper over diff_cols() coroutine, return a task"""
job = asyncio.ensure_future(
self.diff_cols(old_db_col_names, new_db_col_names, batch_size,
steps, mode, exclude))
return job
def get_metadata(self):
new_doc = get_src_build().find_one(
{"_id": self.new.target_collection.name})
if not new_doc:
raise DifferException("Collection '%s' has no corresponding build document" %
self.new.target_collection.name)
return new_doc.get("_meta", {})
def post_diff_cols(self,
old_db_col_names,
new_db_col_names,
batch_size,
steps,
mode=None,
exclude=[]):
"""Post diff process hook. This coroutine will in a dedicated thread"""
return
class ColdHotDiffer(BaseDiffer):
@asyncio.coroutine
def diff_cols(self, old_db_col_names, new_db_col_names, *args, **kwargs):
self.new = create_backend(new_db_col_names)
new_doc = get_src_build().find_one(
{"_id": self.new.target_collection.name})
assert "cold_collection" in new_doc.get("build_config", {}), "%s document doesn't have " % self.new.target_collection.name \
+ "a premerge collection declared. Is it really a hot merges collection ?"
self.cold = create_backend(new_doc["build_config"]["cold_collection"])
return super(ColdHotDiffer,
self).diff_cols(old_db_col_names, new_db_col_names, *args,
**kwargs)
def get_metadata(self):
new_doc = get_src_build().find_one(
{"_id": self.new.target_collection.name})
cold_doc = get_src_build().find_one(
{"_id": self.cold.target_collection.name})
if not new_doc:
raise DifferException("Collection '%s' has no corresponding build document" %
self.new.target_collection.name)
if not cold_doc:
raise DifferException("Collection '%s' has no corresponding build document" %
self.cold.target_collection.name)
return merge_src_build_metadata([cold_doc, new_doc])
class ColdHotJsonDifferBase(ColdHotDiffer):
def post_diff_cols(self,
old_db_col_names,
new_db_col_names,
batch_size,
steps,
mode=None,
exclude=[]):
"""
Post-process the diff files by adjusting some jsondiff operation. Here's the process.
For updated documents, some operations might illegal in the context of cold/hot merged collections.
Case #1: "remove" op in an update
from a cold/premerge collection, we have that doc:
coldd = {"_id":1, "A":"123", "B":"456", "C":True}
from the previous hot merge we have this doc:
prevd = {"_id":1, "D":"789", "C":True, "E":"abc"}
At that point, the final document, fully merged and indexed is:
finald = {"_id":1, "A":"123", "B":"456", "C":True, "D":"789", "E":"abc"}
We can notice field "C" is common to coldd and prevd.
from the new hot merge, we have:
newd = {"_id":1, "E","abc"} # C and D don't exist anymore
Diffing prevd vs. newd will give jssondiff operations:
[{'op': 'remove', 'path': '/C'}, {'op': 'remove', 'path': '/D'}]
The problem here is 'C' is removed while it was already in cold merge, it should stay because it has come
with some resource involved in the premerge (dependent keys, eg. myvariant, "observed" key comes with certain sources)
=> the jsondiff opetation on "C" must be discarded.
Note: If operation involved a root key (not '/a/c' for instance) and if that key is found in the premerge, then
then remove the operation. (note we just consider root keys, if the deletion occurs deeper in the document,
it's just a legal operation updating innder content)
For deleted documents, the same kind of logic applies
Case #2: "delete"
from a cold/premerge collection, we have that doc:
coldd = {"_id":1, "A":"123", "B":"456", "C":True}
from the previous hot merge we have this doc:
prevd = {"_id":1, "D":"789", "C":True}
fully merged doc:
finald = {"_id":1, "A":"123", "B":"456", "C":True, "D":"789"}
from the new hot merge, we have:
newd = {} # document doesn't exist anymore
Diffing prevd vs. newd will mark document with _id == 1 to be deleted
The problem is we have data for _id=1 on the premerge collection, if we delete the whole document we'd loose too much
information.
=> the deletion must converted into specific "remove" jsondiff operations, for the root keys found in prevd on not in coldd
(in that case: [{'op':'remove', 'path':'/D'}], and not "C" as C is in premerge)
"""
# we should be able to find a cold_collection definition in the src_build doc
# and it should be the same for both old and new
old_doc = get_src_build().find_one({"_id": old_db_col_names})
new_doc = get_src_build().find_one({"_id": new_db_col_names})
assert "build_config" in old_doc and "cold_collection" in old_doc["build_config"], \
"No cold collection defined in src_build for %s" % old_db_col_names
assert "build_config" in new_doc and "cold_collection" in new_doc["build_config"], \
"No cold collection defined in src_build for %s" % new_db_col_names
assert old_doc["build_config"]["cold_collection"] == new_doc["build_config"]["cold_collection"], \
"Cold collections are different in src_build docs %s and %s" % (old_db_col_names, new_db_col_names)
coldcol = get_target_db()[new_doc["build_config"]["cold_collection"]]
assert coldcol.count() > 0, "Cold collection is empty..."
diff_folder = generate_folder(btconfig.DIFF_PATH, old_db_col_names,
new_db_col_names)
diff_files = glob.glob(os.path.join(diff_folder, "diff_*.pyobj"))
fixed = 0
for diff_file in diff_files:
dirty = False
self.logger.info("Post-processing diff file %s" % diff_file)
data = loadobj(diff_file)
# update/remove case #1
for updt in data["update"]:
toremove = []
for patch in updt["patch"]:
pathk = patch["path"].split("/")[
1:] # remove / at the beginning of the path
if patch["op"] == "remove" and \
len(pathk) == 1:
# let's query the premerge
coldd = coldcol.find_one({"_id": updt["_id"]})
if coldd and pathk[0] in coldd:
self.logger.info(
"Fixed a root key in cold collection that should be preserved: '%s' (for doc _id '%s')"
% (pathk[0], updt["_id"]))
toremove.append(patch)
fixed += 1
dirty = True
for p in toremove:
updt["patch"].remove(p)
# delete case #2
toremove = []
prevcol = get_target_db()[old_doc["target_name"]]
for delid in data["delete"]:
coldd = coldcol.find_one({"_id": delid})
if not coldd:
# true deletion is required
continue
else:
prevd = prevcol.find_one({"_id": delid})
prevs = set(prevd.keys())
colds = set(coldd.keys())
keys = prevs.difference(
colds
) # keys exclusively in prevd that should be removed
patches = []
for k in keys:
patches.append({"op": "remove", "path": "/%s" % k})
data["update"].append({"_id": delid, "patch": patches})
self.logger.info(
"Fixed a delete document by converting to update/remove jsondiff operations for keys: %s (_id: '%s')"
% (keys, delid))
fixed += 1
dirty = True
toremove.append(delid)
for i in toremove:
data["delete"].remove(i)
if dirty:
dump(data, diff_file, compress="lzma")
name = os.path.basename(diff_file)
md5 = md5sum(diff_file)
# find info to adjust md5sum
found = False
for i, df in enumerate(self.metadata["diff"]["files"]):
if df["name"] == name:
found = True
break
assert found, "Couldn't find file information in metadata (with md5 value), try to rebuild_diff_file_list() ?"
size = os.stat(diff_file).st_size
self.metadata["diff"]["files"][i] = {
"name": name,
"md5sum": md5,
"size": size
}
self.logger.info(self.metadata["diff"]["files"])
self.logger.info(
"Post-diff process fixing jsondiff operations done: %s fixed" %
fixed)
return {"fixed": fixed}
class JsonDiffer(BaseDiffer):
diff_type = "jsondiff"
def __init__(self, diff_func=diff_docs_jsonpatch, *args, **kwargs):
super(JsonDiffer, self).__init__(diff_func=diff_func, *args, **kwargs)
class SelfContainedJsonDiffer(JsonDiffer):
diff_type = "jsondiff-selfcontained"
class ColdHotJsonDiffer(ColdHotJsonDifferBase, JsonDiffer):
diff_type = "coldhot-jsondiff"
class ColdHotSelfContainedJsonDiffer(ColdHotJsonDifferBase,
SelfContainedJsonDiffer):
diff_type = "coldhot-jsondiff-selfcontained"
def diff_worker_new_vs_old(id_list_new,
old_db_col_names,
new_db_col_names,
batch_num,
diff_folder,
diff_func,
exclude=[],
selfcontained=False):
new = create_backend(new_db_col_names, follow_ref=True)
old = create_backend(old_db_col_names, follow_ref=True)
docs_common = old.mget_from_ids(id_list_new)
ids_common = [_doc['_id'] for _doc in docs_common]
id_in_new = list(set(id_list_new) - set(ids_common))
_updates = []
if len(ids_common) > 0:
_updates = diff_func(old, new, list(ids_common), exclude_attrs=exclude)
file_name = os.path.join(diff_folder, "%s.pyobj" % str(batch_num))
_result = {
'add': id_in_new,
'update': _updates,
'delete': [],
'source': new.target_name,
'timestamp': get_timestamp()
}
if selfcontained:
# consume generator as result will be pickled
_result["add"] = [d for d in new.mget_from_ids(id_in_new)]
summary = {"add": len(id_in_new), "update": len(_updates), "delete": 0}
if len(_updates) != 0 or len(id_in_new) != 0:
dump(_result, file_name)
# compute md5 so when downloaded, users can check integreity
md5 = md5sum(file_name)
summary["diff_file"] = {
"name": os.path.basename(file_name),
"md5sum": md5,
"size": os.stat(file_name).st_size
}
return summary
def diff_worker_old_vs_new(id_list_old, new_db_col_names, batch_num,
diff_folder):
new = create_backend(new_db_col_names, follow_ref=True)
docs_common = new.mget_from_ids(id_list_old)
ids_common = [_doc['_id'] for _doc in docs_common]
id_in_old = list(set(id_list_old) - set(ids_common))
file_name = os.path.join(diff_folder, "%s.pyobj" % str(batch_num))
_result = {
'delete': id_in_old,
'add': [],
'update': [],
'source': new.target_name,
'timestamp': get_timestamp()
}
summary = {"add": 0, "update": 0, "delete": len(id_in_old)}
if len(id_in_old) != 0:
dump(_result, file_name)
# compute md5 so when downloaded, users can check integreity
md5 = md5sum(file_name)
summary["diff_file"] = {
"name": os.path.basename(file_name),
"md5sum": md5,
"size": os.stat(file_name).st_size
}
return summary
def diff_worker_count(id_list, db_col_names, batch_num):
col = create_backend(db_col_names, follow_ref=True)
docs = col.mget_from_ids(id_list)
res = {}
for doc in docs:
for k in doc:
res.setdefault(k, 0)
res[k] += 1
return res
class DiffReportRendererBase(object):
def __init__(self,
max_reported_ids=None,
max_randomly_picked=None,
detailed=False):
self.max_reported_ids = max_reported_ids or hasattr(btconfig, "MAX_REPORTED_IDS") and \
btconfig.MAX_REPORTED_IDS or 1000
self.max_randomly_picked = max_randomly_picked or hasattr(btconfig, "MAX_RANDOMLY_PICKED") and \
btconfig.MAX_RANDOMLY_PICKED or 10
self.detailed = detailed
def save(self, report, filename):
"""
Save report output (rendered) into filename
"""
raise NotImplementedError("implement me")
class DiffReportTxt(DiffReportRendererBase):
def save(self, report, filename="report.txt"):
try:
import prettytable
except ImportError:
raise ImportError(
"Please install prettytable to use this rendered")
def build_id_table(subreport):
if self.detailed:
table = prettytable.PrettyTable(["IDs", "Root keys"])
table.align["IDs"] = "l"
table.align["Root keys"] = "l"
else:
table = prettytable.PrettyTable(["IDs"])
table.align["IDs"] = "l"
if subreport["count"] <= self.max_reported_ids:
ids = subreport["ids"]
else:
ids = [
random.choice(subreport["ids"])
for i in range(self.max_reported_ids)
]
for dat in ids:
if self.detailed:
# list of [_id,[keys]]
table.add_row([dat[0], ", ".join(dat[1])])
else:
table.add_row([dat])
return table
txt = ""
title = "Diff report (generated on %s)" % datetime.now()
txt += title + "\n"
txt += "".join(["="] * len(title)) + "\n"
txt += "\n"
txt += "Metadata\n"
txt += "--------\n"
if report.get("metadata", {}):
txt += "Old collection: %s\n" % repr(report["metadata"].get("old"))
txt += "New collection: %s\n" % repr(report["metadata"].get("new"))
txt += "Batch size: %s\n" % report["metadata"]["diff"]["info"].get(
"batch_size")
txt += "Steps: %s\n" % report["metadata"]["diff"]["info"].get(
"steps")
txt += "Key(s) excluded: %s\n" % report["metadata"]["diff"][
"info"].get("exclude")
txt += "Diff generated on: %s\n" % report["metadata"]["diff"][
"info"].get("generated_on")
else:
txt += "No metadata found in report\n"
txt += "\n"
txt += "Summary\n"
txt += "-------\n"
txt += "Added documents: %s\n" % report["added"]["count"]
txt += "Deleted documents: %s\n" % report["deleted"]["count"]
txt += "Updated documents: %s\n" % report["updated"]["count"]
txt += "\n"
root_keys = report.get("metadata",
{}).get("diff",
{}).get("stats",
{}).get("root_keys", {})
if root_keys:
for src in sorted(root_keys):
txt += "%s: %s\n" % (src, root_keys[src])
else:
txt += "No root keys count found in report\n"
txt += "\n"
txt += "Added documents (%s randomly picked from report)\n" % self.max_reported_ids
txt += "------------------------------------------------\n"
if report["added"]["count"]:
table = build_id_table(report["added"])
txt += table.get_string()
txt += "\n"
else:
txt += "No added document found in report\n"
txt += "\n"
txt += "Deleted documents (%s randomly picked from report)\n" % self.max_reported_ids
txt += "--------------------------------------------------\n"
if report["deleted"]["count"]:
table = build_id_table(report["deleted"])
txt += table.get_string()
txt += "\n"
else:
txt += "No deleted document found in report\n"
txt += "\n"
txt += "Updated documents (%s examples randomly picked from report)\n" % self.max_randomly_picked
txt += "-----------------------------------------------------------\n"
txt += "\n"
for op in sorted(report["updated"]):
if op == "count":
continue # already displayed
if report["updated"][op]:
table = prettytable.PrettyTable([op, "Count", "Examples"])
table.sortby = "Count"
table.reversesort = True
table.align[op] = "l"
table.align["Count"] = "r"
table.align["Examples"] = "l"
for path in report["updated"][op]:
info = report["updated"][op][path]
row = [path, info["count"]]
if info["count"] <= self.max_randomly_picked:
row.append(", ".join(info["ids"]))
else:
row.append(", ".join([
random.choice(info["ids"])
for i in range(self.max_randomly_picked)
]))
table.add_row(row)
txt += table.get_string()
txt += "\n"
else:
txt += "No content found for diff operation '%s'\n" % op
txt += "\n"
txt += "\n"
with open(os.path.join(report["diff_folder"], filename), "w") as fout:
fout.write(txt)
return txt
class DifferManager(BaseManager):
def __init__(self, poll_schedule=None, *args, **kwargs):
"""
DifferManager deals with the different differ objects used to create and
analyze diff between datasources.
"""
super(DifferManager, self).__init__(*args, **kwargs)
self.log_folder = btconfig.LOG_FOLDER
self.timestamp = datetime.now()
self.poll_schedule = poll_schedule
self.setup_log()
def clean_stale_status(self):
src_build = get_src_build()
for build in src_build.find():
dirty = False
for job in build.get("jobs", []):
if job.get("status") == "diffing":
logging.warning(
"Found stale build '%s', marking diff status as 'canceled'"
% build["_id"])
job["status"] = "canceled"
dirty = True
if dirty:
src_build.replace_one({"_id": build["_id"]}, build)
def register_differ(self, klass):
if klass.diff_type is None:
raise DifferException("diff_type must be defined in %s" % klass)
self.register[klass.diff_type] = partial(
klass,
log_folder=btconfig.LOG_FOLDER,
job_manager=self.job_manager)
def configure(self, partial_differs=[JsonDiffer, SelfContainedJsonDiffer]):
for pdiffer in partial_differs:
self.register_differ(pdiffer)
def setup_log(self):
self.logger, self.logfile = get_logger('diffmanager')
def get_predicates(self):
def no_other_diffmanager_step_running(job_manager):
"""DiffManager deals with diff report, release note, publishing,
none of them should run more than one at a time"""
# Note: report output is part a publish_diff, release_note is impacted by diff content,
# overall we keep things simple and don't allow more than one diff manager job to run
# at the same time
return len([
j for j in job_manager.jobs.values()
if j["category"] == DIFFMANAGER_CATEGORY
]) == 0
return [no_other_diffmanager_step_running]
def get_pinfo(self):
"""
Return dict containing information about the current process
(used to report in the hub)
"""
pinfo = {
"category": DIFFMANAGER_CATEGORY,
"source": "",
"step": "",
"description": ""
}
preds = self.get_predicates()
if preds:
pinfo["__predicates__"] = preds
return pinfo
def __getitem__(self, diff_type):
"""
Return an instance of a builder for the build named 'build_name'
Note: each call returns a different instance (factory call behind the scene...)
"""
# we'll get a partial class but will return an instance
pclass = BaseManager.__getitem__(self, diff_type)
return pclass()
def diff(self,
diff_type,
old,
new,
batch_size=100000,
steps=["content", "mapping", "reduce", "post"],
mode=None,
exclude=["_timestamp"]):
"""
Run a diff to compare old vs. new collections. using differ algorithm diff_type. Results are stored in
a diff folder.
Steps can be passed to choose what to do:
- count: will count root keys in new collections and stores them as statistics.
- content: will diff the content between old and new. Results (diff files) format depends on diff_type
"""
# Note: _timestamp is excluded by default since it's an internal field (and exists in mongo doc,
# but not in ES "_source" document (there's a root timestamp under control of
# _timestamp : {enable:true} in mapping
try:
differ = self[diff_type]
old = old or get_previous_collection(new)
job = differ.diff(old,
new,
batch_size=batch_size,
steps=steps,
mode=mode,
exclude=exclude)
def diffed(f):
try:
_ = f.result()
# after creating a build diff, indicate
# a release note should be auto generated
set_pending_to_release_note(new)
except Exception as e:
self.logger.error("Error during diff: %s" % e)
raise
job.add_done_callback(diffed)
return job
except KeyError as e:
raise DifferException("No such differ '%s' (error: %s)" %
(diff_type, e))
def diff_report(self,
old_db_col_names,
new_db_col_names,
report_filename="report.txt",
format="txt",
detailed=True,
max_reported_ids=None,
max_randomly_picked=None,
mode=None):
max_reported_ids = max_reported_ids or hasattr(btconfig, "MAX_REPORTED_IDS") and \
btconfig.MAX_REPORTED_IDS or 1000
max_randomly_picked = max_randomly_picked or hasattr(btconfig, "MAX_RANDOMLY_PICKED") and \
btconfig.MAX_RANDOMLY_PICKED or 10
def do():
if mode == "purge" or not os.path.exists(reportfilepath):
assert format == "txt", "Only 'txt' format supported for now"
report = self.build_diff_report(diff_folder, detailed,
max_reported_ids)
render = DiffReportTxt(max_reported_ids=max_reported_ids,
max_randomly_picked=max_randomly_picked,
detailed=detailed)
return render.save(report, report_filename)
else:
self.logger.debug("Report already generated, now using it")
return open(reportfilepath).read()
@asyncio.coroutine
def main(diff_folder):
got_error = False
pinfo = self.get_pinfo()
pinfo["step"] = "report"
pinfo["source"] = diff_folder
pinfo["description"] = report_filename
job = yield from self.job_manager.defer_to_thread(pinfo, do)
def reported(f):
nonlocal got_error
try:
_ = f.result()
self.logger.info("Diff report ready, saved in %s" %
reportfilepath,
extra={
"notify": True,
"attach": reportfilepath
})
except Exception as e:
got_error = e
job.add_done_callback(reported)
yield from job
if got_error:
self.logger.exception("Failed to create diff report: %s" %
got_error,
extra={"notify": True})
raise got_error
diff_folder = generate_folder(btconfig.DIFF_PATH, old_db_col_names,
new_db_col_names)
reportfilepath = os.path.join(diff_folder, report_filename)
job = asyncio.ensure_future(main(diff_folder))
return job
def build_diff_report(self,
diff_folder,
detailed=True,
max_reported_ids=None):
"""
Analyze diff files in diff_folder and give a summy of changes.
max_reported_ids is the number of IDs contained in the report for each part.
detailed will trigger a deeper analysis, takes more time.
"""
max_reported_ids = max_reported_ids or hasattr(btconfig, "MAX_REPORTED_IDS") and \
btconfig.MAX_REPORTED_IDS or 1000
update_details = {
"add": {}, # "count": 0, "data": {} },
"remove": {}, # "count": 0, "data": {} },
"replace": {}, # "count": 0, "data": {} },
"move": {}, # "count": 0, "data": {} },
"count": 0,
}
adds = {"count": 0, "ids": []}
dels = {"count": 0, "ids": []}
sources = {}
if os.path.isabs(diff_folder):
data_folder = diff_folder
else:
data_folder = os.path.join(btconfig.DIFF_PATH, diff_folder)
metadata = {}
try:
metafile = os.path.join(data_folder, "metadata.json")
metadata = json.load(open(metafile))
except FileNotFoundError:
logging.warning("Not metadata found in diff folder")
if detailed:
raise Exception(
"Can't perform detailed analysis without a metadata file")
def analyze(diff_file, detailed):
data = loadobj(diff_file)
sources[data["source"]] = 1
if detailed:
# TODO: if self-contained, no db connection needed
new_col = create_backend(metadata["new"]["backend"],
follow_ref=True)
old_col = create_backend(metadata["old"]["backend"],
follow_ref=True)
if len(adds["ids"]) < max_reported_ids:
if detailed:
# look for which root keys were added in new collection
for _id in data["add"]:
# selfcontained = dict for whole doc (see TODO above)
if type(_id) == dict:
_id = _id["_id"]
doc = new_col.get_from_id(_id)
rkeys = sorted(doc.keys())
adds["ids"].append([_id, rkeys])
else:
if data["add"] and type(data["add"][0]) == dict:
adds["ids"].extend([d["_id"] for d in data["add"]])
else:
adds["ids"].extend(data["add"])
adds["count"] += len(data["add"])
if len(dels["ids"]) < max_reported_ids:
if detailed:
# look for which root keys were deleted in old collection
for _id in data["delete"]:
doc = old_col.get_from_id(_id)
rkeys = sorted(doc.keys())
dels["ids"].append([_id, rkeys])
else:
dels["ids"].extend(data["delete"])
dels["count"] += len(data["delete"])
for up in data["update"]:
for patch in up["patch"]:
update_details[patch["op"]].setdefault(
patch["path"], {
"count": 0,
"ids": []
})
if len(update_details[patch["op"]][patch["path"]]
["ids"]) < max_reported_ids:
update_details[patch["op"]][
patch["path"]]["ids"].append(up["_id"])
update_details[patch["op"]][patch["path"]]["count"] += 1
update_details["count"] += len(data["update"])
assert len(
sources
) == 1, "Should have one datasource from diff files, got: %s" % [
s for s in sources
]
# we randomize files order b/c we randomly pick some examples from those
# files. If files contains data in order (like chrom 1, then chrom 2)
# we won't have a representative sample
files = glob.glob(os.path.join(data_folder, "*.pyobj"))
random.shuffle(files)
total = len(files)
for i, f in enumerate(files):
if os.path.basename(f).startswith("mapping"):
logging.debug("Skip mapping file")
continue
logging.info("Running report worker for '%s' (%d/%d)" %
(f, i + 1, total))
analyze(f, detailed)
return {
"added": adds,
"deleted": dels,
"updated": update_details,
"diff_folder": diff_folder,
"detailed": detailed,
"metadata": metadata
}
def poll(self, state, func):
super(DifferManager, self).poll(state, func, col=get_src_build())
def trigger_diff(self, diff_type, doc, **kwargs):
"""
Launch a diff given a src_build document. In order to
know the first collection to diff against, get_previous_collection()
method is used.
"""
new_db_col_names = doc["_id"]
old_db_col_names = get_previous_collection(new_db_col_names)
self.diff(diff_type, old_db_col_names, new_db_col_names, **kwargs)
def rebuild_diff_file_list(self, diff_folder):
diff_files = glob.glob(os.path.join(diff_folder, "*.pyobj"))
metadata = json.load(open(os.path.join(diff_folder, "metadata.json")))
try:
metadata["diff"]["files"] = []
if "mapping_file" not in metadata["diff"]:
metadata["diff"]["mapping_file"] = None
for diff_file in diff_files:
name = os.path.basename(diff_file)
md5 = md5sum(diff_file)
info = {
"md5sum": md5,
"name": name,
"size": os.stat(diff_file).st_size
}
if "mapping" in diff_file: # dirty...
metadata["diff"]["mapping"] = info
else:
metadata["diff"]["files"].append(info)
json.dump(metadata,
open(os.path.join(diff_folder, "metadata.json"), "w"),
indent=True)
self.logger.info(
"Successfully rebuild diff_files list with all files found in %s"
% diff_folder)
except KeyError as e:
self.logging.error(
"Metadata is too much damaged, can't rebuild diff_files list: %s"
% e)
def diff_info(self):
dtypes = self.register.keys()
res = {}
for typ in dtypes:
res[typ] = {}
return res
def reduce_diffs(diffs, num, diff_folder, done_folder):
assert diffs
res = []
fn = "diff_%s.pyobj" % num
logging.info("Merging %s => %s" % ([os.path.basename(f)
for f in diffs], fn))
if len(diffs) == 1:
# just rename
outf = os.path.join(diff_folder, fn)
shutil.copyfile(diffs[0], outf)
res.append({
"name": fn,
"md5sum": md5sum(outf),
"size": os.stat(outf).st_size
})
os.rename(diffs[0],
os.path.join(done_folder, os.path.basename(diffs[0])))
return res
merged = loadobj(diffs[0])
os.rename(diffs[0], os.path.join(done_folder, os.path.basename(diffs[0])))
for diff_fn in diffs[1:]:
diff = loadobj(diff_fn)
assert merged["source"] == diff["source"]
for k in ["add", "delete", "update"]:
merged[k].extend(diff[k])
os.rename(diff_fn, os.path.join(done_folder,
os.path.basename(diff_fn)))
dump(merged, os.path.join(diff_folder, fn), compress="lzma")
file_name = os.path.join(diff_folder, fn)
res.append({
"name": fn,
"md5sum": md5sum(file_name),
"size": os.stat(file_name).st_size
})
return res
def set_pending_to_diff(col_name):
src_build = get_src_build()
src_build.update({"_id": col_name}, {"$addToSet": {"pending": "diff"}})
|
apache-2.0
| 6,661,727,702,881,648,000 | 41.842599 | 143 | 0.488026 | false |
vericred/vericred-python
|
test/test_carrier.py
|
1
|
9963
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import vericred_client
from vericred_client.rest import ApiException
from vericred_client.models.carrier import Carrier
class TestCarrier(unittest.TestCase):
""" Carrier unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCarrier(self):
"""
Test Carrier
"""
model = vericred_client.models.carrier.Carrier()
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -8,013,491,673,182,950,000 | 39.012048 | 228 | 0.653317 | false |
vltr/star-wars-saga
|
ep5/cloud_city/darth_vader_vs_luke_skywalker.py
|
1
|
3146
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import logging
import language_typology as lang_typ
import star_wars as sw
class LogFactory(object):
"""
Helper class to provide standard logging
"""
logger = None
@classmethod
def get_logger(cls):
"""
Returns the logger
"""
if cls.logger is None:
cls.logger = logging.getLogger('star_wars')
cls.logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
cls.logger.addHandler(ch)
return cls.logger
class LaunchYourselfIntoTheVoid(Exception):
"""
Raised during really desperate situations
"""
def __init__(self, can_still_be_rescued=False, dont_ask_how=False,
*args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.can_still_be_rescued = can_still_be_rescued
self.dont_ask_how = dont_ask_how
self.who_jumped = kwargs.get('who_jumped', None)
def main():
log = LogFactory.get_logger()
log.warn('Spoiler Alert!')
lando_calrissian = sw.Character(side=None, aka=['Lando',
'Lando Calrissian'])
luke = sw.Character(side=sw.LIGHT, aka=['Luke', 'Skywalker'])
vader = sw.Character(side=sw.DARK, aka=['Darth Vader'])
sw.const.YODA.language_typology = lang_typ.OSV + lang_typ.OAV
vader.threatens(
who=lando_calrissian,
callback=lambda: lando_calrissian.set_side(sw.DARK))
sw.plot(lando_calrissian.betrays(side=sw.LIGHT))
try:
fight = sw.LightSaberFight(luke, vader)
fight.add_defense_techniques(all=True)
while fight.is_not_over() or luke.body.has_both_arms():
fight.strike(vader, on=luke)
fight.strike(luke, on=vader)
try:
vader.talk(sw._('LUKE_I_AM_YOUR_FATHER'))
luke.talk(sw._('NOOOOOOOOOOOOOOOOOOOOOOOO'))
luke.jump()
except LaunchYourselfIntoTheVoid as lyitv:
sw.plot(lando_calrissian.regret(
callback=lambda: lando_calrissian.set_side(sw.LIGHT)))
if lyitv.can_still_be_rescued and \
lyitv.who_jumped is not None and \
isinstance(lyitv.who_jumped, sw.Character) and \
lyitv.who_jumped.aka.contains('Luke'):
sw.plot(lando_calrissian.rescue(lyitv.who_jumped))
if lyitv.dont_ask_how:
sw.plot.next_movie(ep=6)
log.error(sw._('IN_THIS_EPISODE_SUCH_THING_HAPPENS_NOT',
linguistic_typology=sw.const.YODA))
sys.exit(1)
except sw.SameSideException:
log.critical('there should be at least one character at each '
'side of the force for a proper lightsaber fight')
raise
if __name__ == '__main__':
main()
|
unlicense
| -4,721,615,349,197,400,000 | 30.777778 | 72 | 0.572473 | false |
tdubourg/velov-unchained
|
web.py/update_warnings.py
|
1
|
1984
|
# -*- coding: utf-8 -*-
import psycopg2
#Connection to database
conn = psycopg2.connect("dbname=velovunchained user=velovunchained password=velovunchained")
#Cursor used to navigate through the database
cur = conn.cursor()
#Reads the id of the last reservation action tested for the warning rules application
last_action_read = 0
with open("last_action_read.txt", "r") as my_file:
last_action_read = my_file.read().strip()
#This query get all reservation older than 5 minutes
query = """ SELECT user_id,velov_id,time,id
FROM user_action_history
WHERE ( ( id>%s ) AND (action_id=1) AND ( (EXTRACT( EPOCH FROM NOW() ) - time) > 300) ); """
cur.execute(query,last_action_read)
#we store the results of the query in results ( it's a tuple of tuples )
results = cur.fetchall()
#Every line is a transaction of the query's result
for line in results:
user_id = line[0]
velov_id = line[1]
time = line[2]
resa_id = line[3]
last_action_read = resa_id
#this query looks for an "unlock operation" happening less than 5 minutes after the reservation
myQuery = """ SELECT *
FROM user_action_history
WHERE ( (action_id=2) AND (user_id= %s ) AND (velov_id= %s) AND ( (time - %s) < 300) AND ( (time - %s) >0 ) ) """
cur.execute(myQuery,(user_id,velov_id,time,time))
res = cur.fetchall()
if not res:
print("Blame pour l'utiisateur {0} pour la réservation d'id {1}".format(user_id,resa_id))
#This query insert a warning for this user in the database
warningQuery = """ INSERT INTO user_warning_history (user_id, action_history_id) VALUES (%s, %s);"""
cur.execute(warningQuery, (user_id,resa_id))
#Make the change persistent in the database
conn.commit()
else:
print("Reservation OK pour l'utiisateur {0} pour la réservation d'id {1} du vélo {2}".format(user_id,resa_id,velov_id))
last_action_read = resa_id
with open("last_action_read.txt","w") as my_file:
#update the last_action_read file
my_file.write(str(last_action_read))
cur.close()
|
gpl-3.0
| 4,164,946,664,598,484,500 | 36.396226 | 121 | 0.697123 | false |
ufieeehw/IEEE2015
|
ros/ieee2015_simulator/scripts/end_effector_sim.py
|
1
|
7301
|
#!/usr/bin/python
## Math
import numpy as np
## Display
import pygame
import time
import math
## Ros
import rospy
from tf import transformations as tf_trans
## Ros Msgs
from std_msgs.msg import Header, Float64, Int64
from ieee2015_end_effector_servos.msg import Num
from geometry_msgs.msg import Point, PointStamped, PoseStamped, Pose, Quaternion
from dynamixel_msgs.msg import JointState
to_radians_one = 512
to_radians_two = 512
side_control = 1
large_control = 1
small_control = 1
past_location_one = 0
past_location_two = 0
SCREEN_DIM = (750, 750)
ORIGIN = np.array([SCREEN_DIM[0]/2.0, SCREEN_DIM[1]/2.0])
def round_point((x, y)):
'''Round and change point to centered coordinate system'''
return map(int, ((1000 * x) + ORIGIN[0], -(1000 * y) + ORIGIN[1]))
def unround_point((x, y)):
'''Change center-origin coordinates to pygame coordinates'''
return ((x - ORIGIN[0])/1000.0, (-y + ORIGIN[1])/1000.0)
def to_degrees(param):
if param < 0:
temp = 180 - math.fabs(param)
temp2 = temp * 2
return math.fabs(param) + temp2
else:
return param
def check_size(param, servo):
global past_location_one
global past_location_two
temp = int(param * 3.405)
if temp < 1023:
if servo == 1:
past_location_one = temp
if servo == 2:
past_location_two = temp
return temp
if temp > 1024:
if servo == 1:
return past_location_one
if servo == 2:
return past_location_two
class END(object):
def __init__(self):
rospy.init_node('SCARA_simulator')
self.base = np.array([0.0, 0.0], np.float32)
self.point = np.array([0.0, 0.0], np.float32)
self.point_two = np.array([0.0, 0.0], np.float32)
self.starting = np.array([2, -3.5], np.float32)
self.starting_two = np.array([-2, -3.5], np.float32)
self.starting_three = np.array([0, 1], np.float32)
self.desired_pos = rospy.Subscriber('/end_des_pose', PointStamped, self.got_des_pose)
self.desired_pos_two = rospy.Subscriber('/end_des_pose_two', PointStamped, self.got_des_pose_two)
def got_des_pose(self, msg):
'''Recieved desired arm pose'''
self.point = (msg.point.x, msg.point.y)
global to_radians_one
to_radians_one = math.atan2(msg.point.y, msg.point.x)
print to_radians_one
degrees_one = to_degrees(to_radians_one)
xl_format = check_size(degrees_one, 1)
to_radians_one = xl_format
print "TARGETING POSITION: ({}, {})".format(*self.point)
print "LARGE SERVO POSITION ", degrees_one, "radians"
print "LARGE SERVO POSITION: ", xl_format
base_pub = rospy.Publisher('/ieee2015_end_effector_servos', Num, queue_size=1)
base_pub.publish(side_control, to_radians_one, to_radians_two, large_control, small_control)
def got_des_pose_two(self, msg):
'''Recieved desired arm pose'''
self.point = (msg.point.x, msg.point.y)
global to_radians_two
to_radians_two = math.atan2(msg.point.y, msg.point.x) * (180/np.pi) + 60
degrees_two =to_degrees(to_radians_two)
xl_format = check_size(degrees_two, 2)
to_radians_two = xl_format
print "TARGETING POSITION: ({}, {})".format(*self.point)
print "SMALL SERVO moved to ", degrees_two, "radians"
print "SMALL SERVO POSITION: ", xl_format
base_pub = rospy.Publisher('/ieee2015_end_effector_servos', Num, queue_size=1)
base_pub.publish(side_control, to_radians_one)
def draw(self, display, new_base=(0, 0)):
'''Draw the whole arm'''
# Update positions given current
pygame.draw.circle(display, (255, 255, 50), round_point(self.base), int(300), 2)
pygame.draw.line(display, (255, 162, 0), round_point(self.base), round_point(self.point), 3)
pygame.draw.line(display, (255, 130, 0), round_point(self.base), round_point(self.point_two), 3)
pygame.draw.line(display, (255, 255, 255), round_point(self.base), round_point(self.starting), 1)
pygame.draw.line(display, (255, 255, 255), round_point(self.base), round_point(self.starting_two), 1)
pygame.draw.line(display, (255, 255, 255), round_point(self.base), round_point(self.starting_three), 1)
def main():
'''In principle, we can support an arbitrary number of servos in simulation'''
end_one = [END()]
global side_control
global large_control
global small_control
display = pygame.display.set_mode(SCREEN_DIM)
des_pose_pub_end = rospy.Publisher('/end_des_pose', PointStamped, queue_size=1)
des_pose_pub_end_two = rospy.Publisher('/end_des_pose_two', PointStamped, queue_size=1)
def publish_des_pos_end(pos):
'''Publish desired position of the arm end-effector based on click position'''
des_pose_pub_end.publish(
PointStamped(
header = Header(
stamp=rospy.Time.now(),
frame_id='/robot',
),
point=Point(
x=pos[0],
y=pos[1],
z=0
)
)
)
def publish_des_pos_two(pos):
des_pose_pub_end_two.publish(
PointStamped(
header = Header(
stamp=rospy.Time.now(),
frame_id='/robot',
),
point=Point(
x=pos[0],
y=pos[1],
z=0
)
)
)
clock = pygame.time.Clock()
while not rospy.is_shutdown():
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
pt = pygame.mouse.get_pos()
publish_des_pos_end(unround_point(pt))
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_s:
pt = pygame.mouse.get_pos()
publish_des_pos_two(unround_point(pt))
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_z:
side_control = 1
print "CONTROL MODE: Wheel"
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_x:
side_control = 2
print "CONTROL MODE: Angle"
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
large_control = 1
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
large_control = 2
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_e:
small_control = 1
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
small_control = 2
t = time.time()
for arm in end_one:
arm.draw(display)
pygame.display.update()
clock.tick(20)
display.fill((0, 0, 0))
if __name__ == '__main__':
main()
|
gpl-2.0
| 2,497,793,503,725,792,000 | 30.61039 | 111 | 0.553486 | false |
pseudocubic/neutronpy
|
neutronpy/crystal/structure_factors.py
|
1
|
4806
|
# -*- coding: utf-8 -*-
r"""Structure Factors
NuclearStructureFactor
MagneticStructureFactor
MagneticFormFactor
"""
import numpy as np
from ..constants import magnetic_ion_j
class NuclearStructureFactor(object):
r"""Class containing nuclear structure factor calculator
Methods
-------
calc_nuc_str_fac
"""
def calc_nuc_str_fac(self, hkl):
r"""Calculates the structural form factor of the material.
Parameters
----------
hkl : tuple of floats, or tuple of array-like
Reciprocal lattice positions at which the structure
factor should be calculated
Returns
-------
NSF : float or ndarray
Nuclear structure factor at the position or positions specified
Notes
-----
"""
h, k, l = hkl
# Ensures input arrays are complex ndarrays
if isinstance(h, (np.ndarray, list, tuple)):
h = np.array(h).astype(complex)
if isinstance(k, (np.ndarray, list, tuple)):
k = np.array(k).astype(complex)
if isinstance(l, (np.ndarray, list, tuple)):
l = np.array(l).astype(complex)
# construct structure factor
NSF = 0 * 1j
for atom in self.atoms:
NSF += atom.occupancy * atom.b * np.exp(1j * 2. * np.pi * (h * atom.pos[0] + k * atom.pos[1] + l * atom.pos[2])) * \
np.exp(-8 * np.pi ** 2 * atom.Uiso * np.sin(np.deg2rad(self.get_two_theta(atom.pos, self.wavelength) / 2.)) ** 2 / self.wavelength ** 2) * \
np.exp(-np.float(np.dot(np.dot(atom.pos, atom.Uaniso), atom.pos)))
return NSF
class MagneticFormFactor(object):
r"""Class defining a magnetic ion.
Parameters
----------
ion : str
Name of the atom, ion or anion, *e.g.* 'Fe2+'.
Returns
-------
output : Object
Ion object defining a single magnetic ion.
Methods
-------
calc_mag_form_fac
"""
def __init__(self, ion):
self.ion = ion
try:
self.j0 = magnetic_ion_j()[self.ion]['j0']
self.j2 = magnetic_ion_j()[self.ion]['j2']
self.j4 = magnetic_ion_j()[self.ion]['j4']
except ValueError:
raise ValueError('No such ion was found in database.')
def __repr__(self):
return "MagneticFormFactor('{0}')".format(self.ion)
def calc_mag_form_fac(self, q=None, g=None, qrange=None):
r"""Calculate the magnetic form factor of an ion.
Parameters
----------
q : float or list, optional
An array of values or position at which the form
factor should be calcuated.
g : float, optional
The g-factor, which is 2 is left undefined.
qrange : float, optional
The range of q over which the form factor should be
calculated, if no input array q is provided.
Returns
-------
output : tuple
(form factor, q, j\ :sub:`0`, j\ :sub:`2`, j\ :sub:`4`)
Notes
-----
The magnetic form factor of an ion is given by:
.. math:: f(q) = <j_0(q)> + (\frac{2}{g}-1)<j_2(q)> \mathrm{(Jensen and Mackintosh,1991)}
using the 3-gaussian approximation to :math:`f(q)` from the
International Tables of Crystallography (by J. Brown)
"""
if q is None:
if qrange is None:
q = np.linspace(0., 2., 2. / 0.025 + 1)
else:
q = np.linspace(qrange[0], qrange[1], (qrange[1] - qrange[0]) / 0.025 + 1)
if g is None:
g = 2.
x = q / 4. / np.pi
j0 = (self.j0[0] * np.exp(-self.j0[1] * x ** 2) + self.j0[2] *
np.exp(-self.j0[3] * x ** 2) + self.j0[4] *
np.exp(-self.j0[5] * x ** 2) + self.j0[6])
j2 = x ** 2 * (self.j2[0] * np.exp(-self.j2[1] * x ** 2) +
self.j2[2] * np.exp(-self.j2[3] * x ** 2) +
self.j2[4] * np.exp(-self.j2[5] * x ** 2) +
self.j2[6])
j4 = x ** 2 * (self.j4[0] * np.exp(-self.j4[1] * x ** 2) +
self.j4[2] * np.exp(-self.j4[3] * x ** 2) +
self.j4[4] * np.exp(-self.j4[5] * x ** 2) +
self.j4[6])
ff = j0 + (2. / g - 1.) * j2
return ff, q, j0, j2, j4
class MagneticStructureFactor(object):
r"""Class containing magnetic structure factor calculator
Methods
-------
calc_mag_int_vec
calc_mag_str_fac
"""
def calc_mag_int_vec(self):
r"""Calculates magnetic interaction vector
"""
pass
def calc_mag_str_fac(self):
r"""Calculates magnetic structure factor
"""
pass
|
mit
| 3,583,450,151,458,712,000 | 27.270588 | 156 | 0.512901 | false |
oomwoo/ubuntu
|
test/my_cifar_train.py
|
1
|
3782
|
#!/usr/bin/env python
# Trains a CIFAR10 model based on Nervana Neon sample code
# Then, tries to recognize an image, sanity-checks recognition
#
# (c) oomwoo.com 2016
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3.0
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License <http://www.gnu.org/licenses/> for details.
num_epochs = 20
classes =["airplane", "automobile", "bird", "cat", "deer",
"dog", "frog", "horse", "ship", "truck"]
nclasses = len(classes)
from neon.backends import gen_backend
be = gen_backend(backend='cpu', batch_size=128)
from neon.data import load_cifar10
(X_train, y_train), (X_test, y_test), nclass = load_cifar10()
from neon.data import ArrayIterator
train_set = ArrayIterator(X_train, y_train, nclass=nclass, lshape=(3, 32, 32))
test_set = ArrayIterator(X_test, y_test, nclass=nclass, lshape=(3, 32, 32))
from neon.layers import Conv, Affine, Pooling
from neon.initializers import Uniform
from neon.transforms.activation import Rectlin, Softmax
init_uni = Uniform(low=-0.1, high=0.1)
layers = [Conv(fshape=(5,5,16), init=init_uni, activation=Rectlin()),
Pooling(fshape=2, strides=2),
Conv(fshape=(5,5,32), init=init_uni, activation=Rectlin()),
Pooling(fshape=2, strides=2),
Affine(nout=500, init=init_uni, activation=Rectlin()),
Affine(nout=nclasses, init=init_uni, activation=Softmax())]
from neon.models import Model
model = Model(layers)
from neon.layers import GeneralizedCost
from neon.transforms import CrossEntropyMulti
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
from neon.optimizers import GradientDescentMomentum, RMSProp
optimizer = GradientDescentMomentum(learning_rate=0.005,
momentum_coef=0.9)
# Set up callbacks. By default sets up a progress bar
from neon.callbacks.callbacks import Callbacks
callbacks = Callbacks(model, train_set)
model.fit(dataset=train_set, cost=cost, optimizer=optimizer, num_epochs=num_epochs, callbacks=callbacks)
model.save_params("cifar10_model.prm")
# Evaluate performance
from neon.transforms import Misclassification
error_pct = 100 * model.eval(test_set, metric=Misclassification())
print 'Misclassification error = %.1f%%' % error_pct
# Sanity check 1
# an image of a frog from wikipedia
# img_source = "https://upload.wikimedia.org/wikipedia/commons/thumb/5/55/Atelopus_zeteki1.jpg/440px-Atelopus_zeteki1.jpg"
# import urllib
# urllib.urlretrieve(img_source, filename="image.jpg")
from PIL import Image
import numpy as np
# To view images, install Imagemagic
def show_sample(x):
image = x.reshape(3, 32, 32)
image = np.transpose(image, (1, 2, 0))
image = Image.fromarray(np.uint8(image * 255))
image.show()
image = Image.open('frog.jpg')
image = image.crop((0,0,min(image.size),min(image.size)))
image.thumbnail((32, 32))
# image.show()
image = np.asarray(image, dtype=np.float32) # (width, height, channel)
image = np.transpose(image, (2, 0, 1)) # ArrayIterator expects (channel, height, width)
x_new = np.zeros((128, 3072), dtype=np.float32)
x_new[0] = image.reshape(1, 3072) / 255
show_sample(x_new[0])
inference_set = ArrayIterator(x_new, None, nclass=nclass, lshape=(3, 32, 32))
out = model.get_outputs(inference_set)
print classes[out[0].argmax()] + ", ground truth FROG"
# Sanity check 2
out = model.get_outputs(test_set)
# print out
print "Validation set result:"
print(out.argmax(1))
print "Ground truth:"
print y_test.reshape(10000)
|
gpl-3.0
| 5,062,212,840,748,560,000 | 34.679245 | 122 | 0.722105 | false |
okami-1/python-dnssec
|
dnssec/rollerd/conf.py
|
1
|
1175
|
# Copyright (C) 2015 Okami, okami@fuzetsu.info
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
class ConfMixin(object):
def parseconfig(self, path):
config = {}
for line in open(path, 'r').readlines():
line = line.strip()
if line and not line.startswith('#') and not line.startswith(';'):
key, sep, value = line.replace('\t', ' ').partition(' ')
if key and value:
config[key.strip()] = value.strip()
return config
|
gpl-3.0
| -1,912,782,318,009,102,600 | 42.518519 | 80 | 0.674894 | false |
levilucio/SyVOLT
|
ECore_Copier_MM/transformation-Large/HepackagelefteAnnotationsSolveRefEPackageEAnnotationEPackageEAnnotation.py
|
1
|
5043
|
from core.himesis import Himesis
class HepackagelefteAnnotationsSolveRefEPackageEAnnotationEPackageEAnnotation(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HepackagelefteAnnotationsSolveRefEPackageEAnnotationEPackageEAnnotation.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HepackagelefteAnnotationsSolveRefEPackageEAnnotationEPackageEAnnotation, self).__init__(name='HepackagelefteAnnotationsSolveRefEPackageEAnnotationEPackageEAnnotation', num_nodes=27, edges=[])
# Add the edges
self.add_edges([[0, 6], [6, 5], [0, 8], [8, 7], [1, 10], [10, 9], [1, 12], [12, 11], [5, 3], [3, 7], [9, 4], [4, 11], [9, 13], [13, 5], [11, 14], [14, 7], [9, 15], [15, 16], [17, 18], [18, 16], [17, 19], [19, 20], [11, 21], [21, 22], [23, 24], [24, 22], [23, 25], [25, 26], [0, 2], [2, 1]])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """epackagelefteAnnotationsSolveRefEPackageEAnnotationEPackageEAnnotation"""
self["GUID__"] = 5796068321853995095
# Set the node attributes
self.vs[0]["mm__"] = """MatchModel"""
self.vs[0]["GUID__"] = 1263895324752147103
self.vs[1]["mm__"] = """ApplyModel"""
self.vs[1]["GUID__"] = 4920848777709677008
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["GUID__"] = 2297792211197088776
self.vs[3]["associationType"] = """eAnnotations"""
self.vs[3]["mm__"] = """directLink_S"""
self.vs[3]["GUID__"] = 2067284399683342337
self.vs[4]["associationType"] = """eAnnotations"""
self.vs[4]["mm__"] = """directLink_T"""
self.vs[4]["GUID__"] = 4650338416508694541
self.vs[5]["name"] = """"""
self.vs[5]["classtype"] = """EPackage"""
self.vs[5]["mm__"] = """EPackage"""
self.vs[5]["cardinality"] = """+"""
self.vs[5]["GUID__"] = 3512187186455559990
self.vs[6]["mm__"] = """match_contains"""
self.vs[6]["GUID__"] = 782077756959580732
self.vs[7]["name"] = """"""
self.vs[7]["classtype"] = """EAnnotation"""
self.vs[7]["mm__"] = """EAnnotation"""
self.vs[7]["cardinality"] = """+"""
self.vs[7]["GUID__"] = 8907923564340088177
self.vs[8]["mm__"] = """match_contains"""
self.vs[8]["GUID__"] = 7705395290243464715
self.vs[9]["name"] = """"""
self.vs[9]["classtype"] = """EPackage"""
self.vs[9]["mm__"] = """EPackage"""
self.vs[9]["cardinality"] = """1"""
self.vs[9]["GUID__"] = 2905018101663850371
self.vs[10]["mm__"] = """apply_contains"""
self.vs[10]["GUID__"] = 5135774205340798898
self.vs[11]["name"] = """"""
self.vs[11]["classtype"] = """EAnnotation"""
self.vs[11]["mm__"] = """EAnnotation"""
self.vs[11]["cardinality"] = """1"""
self.vs[11]["GUID__"] = 6237918125155074243
self.vs[12]["mm__"] = """apply_contains"""
self.vs[12]["GUID__"] = 1623836670118395903
self.vs[13]["mm__"] = """backward_link"""
self.vs[13]["type"] = """ruleDef"""
self.vs[13]["GUID__"] = 2230608214139945342
self.vs[14]["mm__"] = """backward_link"""
self.vs[14]["type"] = """ruleDef"""
self.vs[14]["GUID__"] = 2589108235200741613
self.vs[15]["mm__"] = """hasAttribute_T"""
self.vs[15]["GUID__"] = 7204361238684139584
self.vs[16]["name"] = """ApplyAttribute"""
self.vs[16]["Type"] = """'String'"""
self.vs[16]["mm__"] = """Attribute"""
self.vs[16]["GUID__"] = 691028435570424746
self.vs[17]["name"] = """eq_"""
self.vs[17]["mm__"] = """Equation"""
self.vs[17]["GUID__"] = 4456850095113727390
self.vs[18]["mm__"] = """leftExpr"""
self.vs[18]["GUID__"] = 690071875798834631
self.vs[19]["mm__"] = """rightExpr"""
self.vs[19]["GUID__"] = 1747402909465587243
self.vs[20]["name"] = """solveRef"""
self.vs[20]["Type"] = """'String'"""
self.vs[20]["mm__"] = """Constant"""
self.vs[20]["GUID__"] = 1245774328332209459
self.vs[21]["mm__"] = """hasAttribute_T"""
self.vs[21]["GUID__"] = 2688282712339288349
self.vs[22]["name"] = """ApplyAttribute"""
self.vs[22]["Type"] = """'String'"""
self.vs[22]["mm__"] = """Attribute"""
self.vs[22]["GUID__"] = 8644799468981193891
self.vs[23]["name"] = """eq_"""
self.vs[23]["mm__"] = """Equation"""
self.vs[23]["GUID__"] = 5435096692354379389
self.vs[24]["mm__"] = """leftExpr"""
self.vs[24]["GUID__"] = 8310646236809688386
self.vs[25]["mm__"] = """rightExpr"""
self.vs[25]["GUID__"] = 8798879238228136701
self.vs[26]["name"] = """solveRef"""
self.vs[26]["Type"] = """'String'"""
self.vs[26]["mm__"] = """Constant"""
self.vs[26]["GUID__"] = 3299797658182553072
|
mit
| -5,931,338,714,505,986,000 | 47.961165 | 298 | 0.519532 | false |
anntzer/seaborn
|
seaborn/_core.py
|
1
|
44884
|
import warnings
import itertools
from copy import copy
from functools import partial
from collections.abc import Iterable, Sequence, Mapping
from numbers import Number
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib as mpl
from ._decorators import (
share_init_params_with_map,
)
from .palettes import (
QUAL_PALETTES,
color_palette,
cubehelix_palette,
_parse_cubehelix_args,
)
from .utils import (
get_color_cycle,
remove_na,
)
class SemanticMapping:
"""Base class for mapping data values to plot attributes."""
# -- Default attributes that all SemanticMapping subclasses must set
# Whether the mapping is numeric, categorical, or datetime
map_type = None
# Ordered list of unique values in the input data
levels = None
# A mapping from the data values to corresponding plot attributes
lookup_table = None
def __init__(self, plotter):
# TODO Putting this here so we can continue to use a lot of the
# logic that's built into the library, but the idea of this class
# is to move towards semantic mappings that are agnositic about the
# kind of plot they're going to be used to draw.
# Fully achieving that is going to take some thinking.
self.plotter = plotter
def map(cls, plotter, *args, **kwargs):
# This method is assigned the __init__ docstring
method_name = "_{}_map".format(cls.__name__[:-7].lower())
setattr(plotter, method_name, cls(plotter, *args, **kwargs))
return plotter
def _lookup_single(self, key):
"""Apply the mapping to a single data value."""
return self.lookup_table[key]
def __call__(self, key, *args, **kwargs):
"""Get the attribute(s) values for the data key."""
if isinstance(key, (list, np.ndarray, pd.Series)):
return [self._lookup_single(k, *args, **kwargs) for k in key]
else:
return self._lookup_single(key, *args, **kwargs)
@share_init_params_with_map
class HueMapping(SemanticMapping):
"""Mapping that sets artist colors according to data values."""
# A specification of the colors that should appear in the plot
palette = None
# An object that normalizes data values to [0, 1] range for color mapping
norm = None
# A continuous colormap object for interpolating in a numeric context
cmap = None
def __init__(
self, plotter, palette=None, order=None, norm=None,
):
"""Map the levels of the `hue` variable to distinct colors.
Parameters
----------
# TODO add generic parameters
"""
super().__init__(plotter)
data = plotter.plot_data["hue"]
if data.notna().any():
map_type = self.infer_map_type(
palette, norm, plotter.input_format, plotter.var_types["hue"]
)
# Our goal is to end up with a dictionary mapping every unique
# value in `data` to a color. We will also keep track of the
# metadata about this mapping we will need for, e.g., a legend
# --- Option 1: numeric mapping with a matplotlib colormap
if map_type == "numeric":
data = pd.to_numeric(data)
levels, lookup_table, norm, cmap = self.numeric_mapping(
data, palette, norm,
)
# --- Option 2: categorical mapping using seaborn palette
elif map_type == "categorical":
cmap = norm = None
levels, lookup_table = self.categorical_mapping(
data, palette, order,
)
# --- Option 3: datetime mapping
else:
# TODO this needs actual implementation
cmap = norm = None
levels, lookup_table = self.categorical_mapping(
# Casting data to list to handle differences in the way
# pandas and numpy represent datetime64 data
list(data), palette, order,
)
self.map_type = map_type
self.lookup_table = lookup_table
self.palette = palette
self.levels = levels
self.norm = norm
self.cmap = cmap
def _lookup_single(self, key):
"""Get the color for a single value, using colormap to interpolate."""
try:
# Use a value that's in the original data vector
value = self.lookup_table[key]
except KeyError:
# Use the colormap to interpolate between existing datapoints
# (e.g. in the context of making a continuous legend)
normed = self.norm(key)
if np.ma.is_masked(normed):
normed = np.nan
value = self.cmap(normed)
return value
def infer_map_type(self, palette, norm, input_format, var_type):
"""Determine how to implement the mapping."""
if palette in QUAL_PALETTES:
map_type = "categorical"
elif norm is not None:
map_type = "numeric"
elif isinstance(palette, (dict, list)):
map_type = "categorical"
elif input_format == "wide":
map_type = "categorical"
else:
map_type = var_type
return map_type
def categorical_mapping(self, data, palette, order):
"""Determine colors when the hue mapping is categorical."""
# -- Identify the order and name of the levels
levels = categorical_order(data, order)
n_colors = len(levels)
# -- Identify the set of colors to use
if isinstance(palette, dict):
missing = set(levels) - set(palette)
if any(missing):
err = "The palette dictionary is missing keys: {}"
raise ValueError(err.format(missing))
lookup_table = palette
else:
if palette is None:
if n_colors <= len(get_color_cycle()):
colors = color_palette(None, n_colors)
else:
colors = color_palette("husl", n_colors)
elif isinstance(palette, list):
if len(palette) != n_colors:
err = "The palette list has the wrong number of colors."
raise ValueError(err)
colors = palette
else:
colors = color_palette(palette, n_colors)
lookup_table = dict(zip(levels, colors))
return levels, lookup_table
def numeric_mapping(self, data, palette, norm):
"""Determine colors when the hue variable is quantitative."""
if isinstance(palette, dict):
# The presence of a norm object overrides a dictionary of hues
# in specifying a numeric mapping, so we need to process it here.
levels = list(sorted(palette))
colors = [palette[k] for k in sorted(palette)]
cmap = mpl.colors.ListedColormap(colors)
lookup_table = palette.copy()
else:
# The levels are the sorted unique values in the data
levels = list(np.sort(remove_na(data.unique())))
# --- Sort out the colormap to use from the palette argument
# Default numeric palette is our default cubehelix palette
# TODO do we want to do something complicated to ensure contrast?
palette = "ch:" if palette is None else palette
if isinstance(palette, mpl.colors.Colormap):
cmap = palette
elif str(palette).startswith("ch:"):
args, kwargs = _parse_cubehelix_args(palette)
cmap = cubehelix_palette(0, *args, as_cmap=True, **kwargs)
else:
try:
cmap = mpl.cm.get_cmap(palette)
except (ValueError, TypeError):
err = "Palette {} not understood"
raise ValueError(err)
# Now sort out the data normalization
if norm is None:
norm = mpl.colors.Normalize()
elif isinstance(norm, tuple):
norm = mpl.colors.Normalize(*norm)
elif not isinstance(norm, mpl.colors.Normalize):
err = "``hue_norm`` must be None, tuple, or Normalize object."
raise ValueError(err)
if not norm.scaled():
norm(np.asarray(data.dropna()))
lookup_table = dict(zip(levels, cmap(norm(levels))))
return levels, lookup_table, norm, cmap
@share_init_params_with_map
class SizeMapping(SemanticMapping):
"""Mapping that sets artist sizes according to data values."""
# An object that normalizes data values to [0, 1] range
norm = None
def __init__(
self, plotter, sizes=None, order=None, norm=None,
):
"""Map the levels of the `size` variable to distinct values.
Parameters
----------
# TODO add generic parameters
"""
super().__init__(plotter)
data = plotter.plot_data["size"]
if data.notna().any():
map_type = self.infer_map_type(
norm, sizes, plotter.var_types["size"]
)
# --- Option 1: numeric mapping
if map_type == "numeric":
levels, lookup_table, norm = self.numeric_mapping(
data, sizes, norm,
)
# --- Option 2: categorical mapping
elif map_type == "categorical":
levels, lookup_table = self.categorical_mapping(
data, sizes, order,
)
# --- Option 3: datetime mapping
# TODO this needs an actual implementation
else:
levels, lookup_table = self.categorical_mapping(
# Casting data to list to handle differences in the way
# pandas and numpy represent datetime64 data
list(data), sizes, order,
)
self.map_type = map_type
self.levels = levels
self.norm = norm
self.sizes = sizes
self.lookup_table = lookup_table
def infer_map_type(self, norm, sizes, var_type):
if norm is not None:
map_type = "numeric"
elif isinstance(sizes, (dict, list)):
map_type = "categorical"
else:
map_type = var_type
return map_type
def _lookup_single(self, key):
try:
value = self.lookup_table[key]
except KeyError:
normed = self.norm(key)
if np.ma.is_masked(normed):
normed = np.nan
size_values = self.lookup_table.values()
size_range = min(size_values), max(size_values)
value = size_range[0] + normed * np.ptp(size_range)
return value
def categorical_mapping(self, data, sizes, order):
levels = categorical_order(data, order)
if isinstance(sizes, dict):
# Dict inputs map existing data values to the size attribute
missing = set(levels) - set(sizes)
if any(missing):
err = f"Missing sizes for the following levels: {missing}"
raise ValueError(err)
lookup_table = sizes.copy()
elif isinstance(sizes, list):
# List inputs give size values in the same order as the levels
if len(sizes) != len(levels):
err = "The `sizes` list has the wrong number of values."
raise ValueError(err)
lookup_table = dict(zip(levels, sizes))
else:
if isinstance(sizes, tuple):
# Tuple input sets the min, max size values
if len(sizes) != 2:
err = "A `sizes` tuple must have only 2 values"
raise ValueError(err)
elif sizes is not None:
err = f"Value for `sizes` not understood: {sizes}"
raise ValueError(err)
else:
# Otherwise, we need to get the min, max size values from
# the plotter object we are attached to.
# TODO this is going to cause us trouble later, because we
# want to restructure things so that the plotter is generic
# across the visual representation of the data. But at this
# point, we don't know the visual representation. Likely we
# want to change the logic of this Mapping so that it gives
# points on a nornalized range that then gets unnormalized
# when we know what we're drawing. But given the way the
# package works now, this way is cleanest.
sizes = self.plotter._default_size_range
# For categorical sizes, use regularly-spaced linear steps
# between the minimum and maximum sizes
sizes = np.linspace(*sizes, len(levels))
lookup_table = dict(zip(levels, sizes))
return levels, lookup_table
def numeric_mapping(self, data, sizes, norm):
if isinstance(sizes, dict):
# The presence of a norm object overrides a dictionary of sizes
# in specifying a numeric mapping, so we need to process it
# dictionary here
levels = list(np.sort(list(sizes)))
size_values = sizes.values()
size_range = min(size_values), max(size_values)
else:
# The levels here will be the unique values in the data
levels = list(np.sort(remove_na(data.unique())))
if isinstance(sizes, tuple):
# For numeric inputs, the size can be parametrized by
# the minimum and maximum artist values to map to. The
# norm object that gets set up next specifies how to
# do the mapping.
if len(sizes) != 2:
err = "A `sizes` tuple must have only 2 values"
raise ValueError(err)
size_range = sizes
elif sizes is not None:
err = f"Value for `sizes` not understood: {sizes}"
raise ValueError(err)
else:
# When not provided, we get the size range from the plotter
# object we are attached to. See the note in the categorical
# method about how this is suboptimal for future development.:
size_range = self.plotter._default_size_range
# Now that we know the minimum and maximum sizes that will get drawn,
# we need to map the data values that we have into that range. We will
# use a matplotlib Normalize class, which is typically used for numeric
# color mapping but works fine here too. It takes data values and maps
# them into a [0, 1] interval, potentially nonlinear-ly.
if norm is None:
# Default is a linear function between the min and max data values
norm = mpl.colors.Normalize()
elif isinstance(norm, tuple):
# It is also possible to give different limits in data space
norm = mpl.colors.Normalize(*norm)
elif not isinstance(norm, mpl.colors.Normalize):
err = f"Value for size `norm` parameter not understood: {norm}"
raise ValueError(err)
else:
# If provided with Normalize object, copy it so we can modify
norm = copy(norm)
# Set the mapping so all output values are in [0, 1]
norm.clip = True
# If the input range is not set, use the full range of the data
if not norm.scaled():
norm(levels)
# Map from data values to [0, 1] range
sizes_scaled = norm(levels)
# Now map from the scaled range into the artist units
if isinstance(sizes, dict):
lookup_table = sizes
else:
lo, hi = size_range
sizes = lo + sizes_scaled * (hi - lo)
lookup_table = dict(zip(levels, sizes))
return levels, lookup_table, norm
@share_init_params_with_map
class StyleMapping(SemanticMapping):
"""Mapping that sets artist style according to data values."""
# Style mapping is always treated as categorical
map_type = "categorical"
def __init__(
self, plotter, markers=None, dashes=None, order=None,
):
"""Map the levels of the `style` variable to distinct values.
Parameters
----------
# TODO add generic parameters
"""
super().__init__(plotter)
data = plotter.plot_data["style"]
if data.notna().any():
# Cast to list to handle numpy/pandas datetime quirks
if variable_type(data) == "datetime":
data = list(data)
# Find ordered unique values
levels = categorical_order(data, order)
markers = self._map_attributes(
markers, levels, unique_markers(len(levels)), "markers",
)
dashes = self._map_attributes(
dashes, levels, unique_dashes(len(levels)), "dashes",
)
# Build the paths matplotlib will use to draw the markers
paths = {}
filled_markers = []
for k, m in markers.items():
if not isinstance(m, mpl.markers.MarkerStyle):
m = mpl.markers.MarkerStyle(m)
paths[k] = m.get_path().transformed(m.get_transform())
filled_markers.append(m.is_filled())
# Mixture of filled and unfilled markers will show line art markers
# in the edge color, which defaults to white. This can be handled,
# but there would be additional complexity with specifying the
# weight of the line art markers without overwhelming the filled
# ones with the edges. So for now, we will disallow mixtures.
if any(filled_markers) and not all(filled_markers):
err = "Filled and line art markers cannot be mixed"
raise ValueError(err)
lookup_table = {}
for key in levels:
lookup_table[key] = {}
if markers:
lookup_table[key]["marker"] = markers[key]
lookup_table[key]["path"] = paths[key]
if dashes:
lookup_table[key]["dashes"] = dashes[key]
self.levels = levels
self.lookup_table = lookup_table
def _lookup_single(self, key, attr=None):
"""Get attribute(s) for a given data point."""
if attr is None:
value = self.lookup_table[key]
else:
value = self.lookup_table[key][attr]
return value
def _map_attributes(self, arg, levels, defaults, attr):
"""Handle the specification for a given style attribute."""
if arg is True:
lookup_table = dict(zip(levels, defaults))
elif isinstance(arg, dict):
missing = set(levels) - set(arg)
if missing:
err = f"These `{attr}` levels are missing values: {missing}"
raise ValueError(err)
lookup_table = arg
elif isinstance(arg, Sequence):
if len(levels) != len(arg):
err = f"The `{attr}` argument has the wrong number of values"
raise ValueError(err)
lookup_table = dict(zip(levels, arg))
elif arg:
err = f"This `{attr}` argument was not understood: {arg}"
raise ValueError(err)
else:
lookup_table = {}
return lookup_table
# =========================================================================== #
class VectorPlotter:
"""Base class for objects underlying *plot functions."""
_semantic_mappings = {
"hue": HueMapping,
"size": SizeMapping,
"style": StyleMapping,
}
# TODO units is another example of a non-mapping "semantic"
# we need a general name for this and separate handling
semantics = "x", "y", "hue", "size", "style", "units"
wide_structure = {
"x": "index", "y": "values", "hue": "columns", "style": "columns",
}
flat_structure = {"x": "index", "y": "values"}
_default_size_range = 1, 2 # Unused but needed in tests, ugh
def __init__(self, data=None, variables={}):
self.assign_variables(data, variables)
for var, cls in self._semantic_mappings.items():
if var in self.semantics:
# Create the mapping function
map_func = partial(cls.map, plotter=self)
setattr(self, f"map_{var}", map_func)
# Call the mapping function to initialize with default values
getattr(self, f"map_{var}")()
@classmethod
def get_semantics(cls, kwargs):
"""Subset a dictionary` arguments with known semantic variables."""
return {k: kwargs[k] for k in cls.semantics}
def assign_variables(self, data=None, variables={}):
"""Define plot variables, optionally using lookup from `data`."""
x = variables.get("x", None)
y = variables.get("y", None)
if x is None and y is None:
self.input_format = "wide"
plot_data, variables = self._assign_variables_wideform(
data, **variables,
)
else:
self.input_format = "long"
plot_data, variables = self._assign_variables_longform(
data, **variables,
)
self.plot_data = plot_data
self.variables = variables
self.var_types = {
v: variable_type(
plot_data[v],
boolean_type="numeric" if v in "xy" else "categorical"
)
for v in variables
}
return self
def _assign_variables_wideform(self, data=None, **kwargs):
"""Define plot variables given wide-form data.
Parameters
----------
data : flat vector or collection of vectors
Data can be a vector or mapping that is coerceable to a Series
or a sequence- or mapping-based collection of such vectors, or a
rectangular numpy array, or a Pandas DataFrame.
kwargs : variable -> data mappings
Behavior with keyword arguments is currently undefined.
Returns
-------
plot_data : :class:`pandas.DataFrame`
Long-form data object mapping seaborn variables (x, y, hue, ...)
to data vectors.
variables : dict
Keys are defined seaborn variables; values are names inferred from
the inputs (or None when no name can be determined).
"""
# TODO raise here if any kwarg values are not None,
# # if we decide for "structure-only" wide API
# First, determine if the data object actually has any data in it
empty = data is None or not len(data)
# Then, determine if we have "flat" data (a single vector)
if isinstance(data, dict):
values = data.values()
else:
values = np.atleast_1d(data)
flat = not any(
isinstance(v, Iterable) and not isinstance(v, (str, bytes))
for v in values
)
if empty:
# Make an object with the structure of plot_data, but empty
plot_data = pd.DataFrame(columns=self.semantics)
variables = {}
elif flat:
# Handle flat data by converting to pandas Series and using the
# index and/or values to define x and/or y
# (Could be accomplished with a more general to_series() interface)
flat_data = pd.Series(data).copy()
names = {
"values": flat_data.name,
"index": flat_data.index.name
}
plot_data = {}
variables = {}
for var in ["x", "y"]:
if var in self.flat_structure:
attr = self.flat_structure[var]
plot_data[var] = getattr(flat_data, attr)
variables[var] = names[self.flat_structure[var]]
plot_data = pd.DataFrame(plot_data).reindex(columns=self.semantics)
else:
# Otherwise assume we have some collection of vectors.
# Handle Python sequences such that entries end up in the columns,
# not in the rows, of the intermediate wide DataFrame.
# One way to accomplish this is to convert to a dict of Series.
if isinstance(data, Sequence):
data_dict = {}
for i, var in enumerate(data):
key = getattr(var, "name", i)
# TODO is there a safer/more generic way to ensure Series?
# sort of like np.asarray, but for pandas?
data_dict[key] = pd.Series(var)
data = data_dict
# Pandas requires that dict values either be Series objects
# or all have the same length, but we want to allow "ragged" inputs
if isinstance(data, Mapping):
data = {key: pd.Series(val) for key, val in data.items()}
# Otherwise, delegate to the pandas DataFrame constructor
# This is where we'd prefer to use a general interface that says
# "give me this data as a pandas DataFrame", so we can accept
# DataFrame objects from other libraries
wide_data = pd.DataFrame(data, copy=True)
# At this point we should reduce the dataframe to numeric cols
numeric_cols = wide_data.apply(variable_type) == "numeric"
wide_data = wide_data.loc[:, numeric_cols]
# Now melt the data to long form
melt_kws = {"var_name": "columns", "value_name": "values"}
if "index" in self.wide_structure.values():
melt_kws["id_vars"] = "index"
wide_data["index"] = wide_data.index.to_series()
plot_data = wide_data.melt(**melt_kws)
# Assign names corresponding to plot semantics
for var, attr in self.wide_structure.items():
plot_data[var] = plot_data[attr]
plot_data = plot_data.reindex(columns=self.semantics)
# Define the variable names
variables = {}
for var, attr in self.wide_structure.items():
obj = getattr(wide_data, attr)
variables[var] = getattr(obj, "name", None)
return plot_data, variables
def _assign_variables_longform(self, data=None, **kwargs):
"""Define plot variables given long-form data and/or vector inputs.
Parameters
----------
data : dict-like collection of vectors
Input data where variable names map to vector values.
kwargs : variable -> data mappings
Keys are seaborn variables (x, y, hue, ...) and values are vectors
in any format that can construct a :class:`pandas.DataFrame` or
names of columns or index levels in ``data``.
Returns
-------
plot_data : :class:`pandas.DataFrame`
Long-form data object mapping seaborn variables (x, y, hue, ...)
to data vectors.
variables : dict
Keys are defined seaborn variables; values are names inferred from
the inputs (or None when no name can be determined).
Raises
------
ValueError
When variables are strings that don't appear in ``data``.
"""
plot_data = {}
variables = {}
# Data is optional; all variables can be defined as vectors
if data is None:
data = {}
# TODO should we try a data.to_dict() or similar here to more
# generally accept objects with that interface?
# Note that dict(df) also works for pandas, and gives us what we
# want, whereas DataFrame.to_dict() gives a nested dict instead of
# a dict of series.
# Variables can also be extraced from the index attribute
# TODO is this the most general way to enable it?
# There is no index.to_dict on multiindex, unfortunately
try:
index = data.index.to_frame()
except AttributeError:
index = {}
# The caller will determine the order of variables in plot_data
for key, val in kwargs.items():
if isinstance(val, (str, bytes)):
# String inputs trigger __getitem__
if val in data:
# First try to get an entry in the data object
plot_data[key] = data[val]
variables[key] = val
elif val in index:
# Failing that, try to get an entry in the index object
plot_data[key] = index[val]
variables[key] = val
else:
# We don't know what this name means
err = f"Could not interpret input '{val}'"
raise ValueError(err)
else:
# Otherwise, assume the value is itself a vector of data
# TODO check for 1D here or let pd.DataFrame raise?
plot_data[key] = val
# Try to infer the name of the variable
variables[key] = getattr(val, "name", None)
# Construct a tidy plot DataFrame. This will convert a number of
# types automatically, aligning on index in case of pandas objects
plot_data = pd.DataFrame(plot_data, columns=self.semantics)
# Reduce the variables dictionary to fields with valid data
variables = {
var: name
for var, name in variables.items()
if plot_data[var].notnull().any()
}
return plot_data, variables
def _semantic_subsets(
self, grouping_semantics, reverse=False, from_comp_data=False,
):
"""Generator for getting subsets of data defined by semantic variables.
Parameters
----------
grouping_semantics : list of strings
Semantic variables that define the subsets of data.
reverse : bool, optional
If True, reverse the order of iteration.
from_comp_data : bool, optional
If True, use self.comp_data rather than self.plot_data
Yields
------
sub_vars : dict
Keys are semantic names, values are the level of that semantic.
sub_data : :class:`pandas.DataFrame`
Subset of ``plot_data`` for this combination of semantic values.
"""
if isinstance(grouping_semantics, str):
grouping_semantics = [grouping_semantics]
# Reduce to the semantics used in this plot
grouping_semantics = [
var for var in grouping_semantics if var in self.variables
]
if from_comp_data:
data = self.comp_data
else:
data = self.plot_data
if grouping_semantics:
grouped_data = data.groupby(
grouping_semantics, sort=False, as_index=False
)
grouping_keys = []
for var in grouping_semantics:
# TODO this is messy, add "semantic levels" property?
map_obj = getattr(self, f"_{var}_map")
grouping_keys.append(map_obj.levels)
iter_keys = itertools.product(*grouping_keys)
if reverse:
iter_keys = reversed(list(iter_keys))
for key in iter_keys:
# Pandas fails with singleton tuple inputs
pd_key = key[0] if len(key) == 1 else key
try:
data_subset = grouped_data.get_group(pd_key)
except KeyError:
continue
yield dict(zip(grouping_semantics, key)), data_subset
else:
yield {}, data
@property
def comp_data(self):
"""Dataframe with numeric x and y, after unit conversion and log scaling."""
if not hasattr(self, "ax"):
# Probably a good idea, but will need a bunch of tests updated
# Most of these tests should just use the external interface
# Then this can be reeneabled.
# raise AttributeError("No Axes attached to plotter")
return self.plot_data
if not hasattr(self, "_comp_data"):
comp_data = self.plot_data.copy(deep=False)
for var in "xy":
axis = getattr(self.ax, f"{var}axis")
comp_var = axis.convert_units(self.plot_data[var])
if axis.get_scale() == "log":
comp_var = np.log10(comp_var)
comp_data[var] = comp_var
self._comp_data = comp_data
return self._comp_data
def _attach(self, ax, allowed_types=None, log_scale=None):
"""Associate the plotter with a matplotlib Axes and initialize its units.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Axes object that we will eventually plot onto.
allowed_types : str or list of str
If provided, raise when either the x or y variable does not have
one of the declared seaborn types.
log_scale : bool, number, or pair of bools or numbers
If not False, set the axes to use log scaling, with the given
base or defaulting to 10. If a tuple, interpreted as separate
arguments for the x and y axes.
"""
if allowed_types is None:
# TODO should we define this default somewhere?
allowed_types = ["numeric", "datetime", "categorical"]
elif isinstance(allowed_types, str):
allowed_types = [allowed_types]
for var in set("xy").intersection(self.variables):
# Check types of x/y variables
var_type = self.var_types[var]
if var_type not in allowed_types:
err = (
f"The {var} variable is {var_type}, but one of "
f"{allowed_types} is required"
)
raise TypeError(err)
# Register with the matplotlib unit conversion machinery
# TODO do we want to warn or raise if mixing units?
axis = getattr(ax, f"{var}axis")
seed_data = self.plot_data[var]
if var_type == "categorical":
seed_data = categorical_order(seed_data)
axis.update_units(seed_data)
# Possibly log-scale one or both axes
if log_scale is not None:
# Allow single value or x, y tuple
try:
scalex, scaley = log_scale
except TypeError:
scalex = log_scale if "x" in self.variables else False
scaley = log_scale if "y" in self.variables else False
for axis, scale in zip("xy", (scalex, scaley)):
if scale:
set_scale = getattr(ax, f"set_{axis}scale")
if scale is True:
set_scale("log")
else:
set_scale("log", **{f"base{axis}": scale})
self.ax = ax
def _add_axis_labels(self, ax, default_x="", default_y=""):
"""Add axis labels from internal variable names if not already existing."""
if not ax.get_xlabel():
ax.set_xlabel(self.variables.get("x", default_x))
if not ax.get_ylabel():
ax.set_ylabel(self.variables.get("y", default_y))
def variable_type(vector, boolean_type="numeric"):
"""Determine whether a vector contains numeric, categorical, or dateime data.
This function differs from the pandas typing API in two ways:
- Python sequences or object-typed PyData objects are considered numeric if
all of their entries are numeric.
- String or mixed-type data are considered categorical even if not
explicitly represented as a :class:pandas.api.types.CategoricalDtype`.
Parameters
----------
vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence
Input data to test.
binary_type : 'numeric' or 'categorical'
Type to use for vectors containing only 0s and 1s (and NAs).
Returns
-------
var_type : 'numeric', 'categorical', or 'datetime'
Name identifying the type of data in the vector.
"""
# Special-case all-na data, which is always "numeric"
if pd.isna(vector).all():
return "numeric"
# Special-case binary/boolean data, allow caller to determine
# This triggers a numpy warning when vector has strings/objects
# https://github.com/numpy/numpy/issues/6784
# Because we reduce with .all(), we are agnostic about whether the
# comparison returns a scalar or vector, so we will ignore the warning.
# It triggers a separate DeprecationWarning when the vector has datetimes:
# https://github.com/numpy/numpy/issues/13548
# This is considered a bug by numpy and will likely go away.
with warnings.catch_warnings():
warnings.simplefilter(
action='ignore', category=(FutureWarning, DeprecationWarning)
)
if np.isin(vector, [0, 1, np.nan]).all():
return boolean_type
# Defer to positive pandas tests
if pd.api.types.is_numeric_dtype(vector):
return "numeric"
if pd.api.types.is_categorical_dtype(vector):
return "categorical"
if pd.api.types.is_datetime64_dtype(vector):
return "datetime"
# --- If we get to here, we need to check the entries
# Check for a collection where everything is a number
def all_numeric(x):
for x_i in x:
if not isinstance(x_i, Number):
return False
return True
if all_numeric(vector):
return "numeric"
# Check for a collection where everything is a datetime
def all_datetime(x):
for x_i in x:
if not isinstance(x_i, (datetime, np.datetime64)):
return False
return True
if all_datetime(vector):
return "datetime"
# Otherwise, our final fallback is to consider things categorical
return "categorical"
def infer_orient(x=None, y=None, orient=None, require_numeric=True):
"""Determine how the plot should be oriented based on the data.
For historical reasons, the convention is to call a plot "horizontally"
or "vertically" oriented based on the axis representing its dependent
variable. Practically, this is used when determining the axis for
numerical aggregation.
Paramters
---------
x, y : Vector data or None
Positional data vectors for the plot.
orient : string or None
Specified orientation, which must start with "v" or "h" if not None.
require_numeric : bool
If set, raise when the implied dependent variable is not numeric.
Returns
-------
orient : "v" or "h"
Raises
------
ValueError: When `orient` is not None and does not start with "h" or "v"
TypeError: When dependant variable is not numeric, with `require_numeric`
"""
x_type = None if x is None else variable_type(x)
y_type = None if y is None else variable_type(y)
nonnumeric_dv_error = "{} orientation requires numeric `{}` variable."
single_var_warning = "{} orientation ignored with only `{}` specified."
if x is None:
if str(orient).startswith("h"):
warnings.warn(single_var_warning.format("Horizontal", "y"))
if require_numeric and y_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Vertical", "y"))
return "v"
elif y is None:
if str(orient).startswith("v"):
warnings.warn(single_var_warning.format("Vertical", "x"))
if require_numeric and x_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Horizontal", "x"))
return "h"
elif str(orient).startswith("v"):
if require_numeric and y_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Vertical", "y"))
return "v"
elif str(orient).startswith("h"):
if require_numeric and x_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Horizontal", "x"))
return "h"
elif orient is not None:
raise ValueError(f"Value for `orient` not understood: {orient}")
elif x_type != "numeric" and y_type == "numeric":
return "v"
elif x_type == "numeric" and y_type != "numeric":
return "h"
elif require_numeric and "numeric" not in (x_type, y_type):
err = "Neither the `x` nor `y` variable appears to be numeric."
raise TypeError(err)
else:
return "v"
def unique_dashes(n):
"""Build an arbitrarily long list of unique dash styles for lines.
Parameters
----------
n : int
Number of unique dash specs to generate.
Returns
-------
dashes : list of strings or tuples
Valid arguments for the ``dashes`` parameter on
:class:`matplotlib.lines.Line2D`. The first spec is a solid
line (``""``), the remainder are sequences of long and short
dashes.
"""
# Start with dash specs that are well distinguishable
dashes = [
"",
(4, 1.5),
(1, 1),
(3, 1.25, 1.5, 1.25),
(5, 1, 1, 1),
]
# Now programatically build as many as we need
p = 3
while len(dashes) < n:
# Take combinations of long and short dashes
a = itertools.combinations_with_replacement([3, 1.25], p)
b = itertools.combinations_with_replacement([4, 1], p)
# Interleave the combinations, reversing one of the streams
segment_list = itertools.chain(*zip(
list(a)[1:-1][::-1],
list(b)[1:-1]
))
# Now insert the gaps
for segments in segment_list:
gap = min(segments)
spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))
dashes.append(spec)
p += 1
return dashes[:n]
def unique_markers(n):
"""Build an arbitrarily long list of unique marker styles for points.
Parameters
----------
n : int
Number of unique marker specs to generate.
Returns
-------
markers : list of string or tuples
Values for defining :class:`matplotlib.markers.MarkerStyle` objects.
All markers will be filled.
"""
# Start with marker specs that are well distinguishable
markers = [
"o",
"X",
(4, 0, 45),
"P",
(4, 0, 0),
(4, 1, 0),
"^",
(4, 1, 45),
"v",
]
# Now generate more from regular polygons of increasing order
s = 5
while len(markers) < n:
a = 360 / (s + 1) / 2
markers.extend([
(s + 1, 1, a),
(s + 1, 0, a),
(s, 1, 0),
(s, 0, 0),
])
s += 1
# Convert to MarkerStyle object, using only exactly what we need
# markers = [mpl.markers.MarkerStyle(m) for m in markers[:n]]
return markers[:n]
def categorical_order(vector, order=None):
"""Return a list of unique data values.
Determine an ordered list of levels in ``values``.
Parameters
----------
vector : list, array, Categorical, or Series
Vector of "categorical" values
order : list-like, optional
Desired order of category levels to override the order determined
from the ``values`` object.
Returns
-------
order : list
Ordered list of category levels not including null values.
"""
if order is None:
if hasattr(vector, "categories"):
order = vector.categories
else:
try:
order = vector.cat.categories
except (TypeError, AttributeError):
try:
order = vector.unique()
except AttributeError:
order = pd.unique(vector)
if variable_type(vector) == "numeric":
order = np.sort(order)
order = filter(pd.notnull, order)
return list(order)
|
bsd-3-clause
| -8,576,295,906,422,801,000 | 33.552733 | 84 | 0.563586 | false |
jeffwdoak/free_energies
|
free_energies/electronicdos.py
|
1
|
14960
|
#!/usr/bin/python
# electronicdos.py v0.5 5-16-2012 Jeff Doak jeff.w.doak@gmail.com
import numpy as np
from scipy.interpolate import UnivariateSpline
from scipy.integrate import quad
from scipy.optimize import fsolve
import sys, subprocess
BOLTZCONST = 8.617e-5 #eV/K
class ElectronicDOS:
"""
Class to calculate equilibrium carrier concentrations, as well as
equilibrium thermodynamic properties of an electronic density of states.
Class constants of ElectronicDOS:
- BOLTZCONST - Boltzmann's Constant (eV/K)
Instance attributes of ElectronicDOS:
- n_atoms - number of atoms of the unit cell from which the DOS was
calculated
- energy - numpy array of energies at which DOS is calculated (eV)
- dos_tot - numpy array of density of spin up and spin down allowed electron
states at each energy in the array energy (# states/eV/atom)
- dos_spin - numpy array of the difference in density between spin up and
spin down states (# states/eV/atom)
- e_min - minimum energy in numpy array energy (eV)
- e_max - maximum energy in numpy array energy (eV)
- e_fermi - zero Kelvin fermi energy for the electronic DOS (eV)
- step_size - energy difference between two consecutive points in the DOSCAR
file (eV)
- vbm - valence band maximum, set to e_fermi for metals (eV)
- cbm - conduction band minimum, set to e_fermi for metals (eV)
- band_gap - band gap around the fermi energy, zero for metals (eV)
- temp - numpy array of temperatures at which to calculate equilibrium
electron chemical potentials, electron concentrations, and hole
concentrations (K)
- mu_e - numpy array of electron chemical potentials calculated at each
temperature in temp (eV)
- num_e - numpy array of equilibrium electron concentrations calculated at
each temperature in temp (# e's/atom)
- num_h - numpy array of equilibrium hole concentrations calculated at each
temperature in temp (# h's/atom)
- E_el - numpy array of electronic energy calculated at each temperature
in temp (eV/atom)
- S_el - numpy array of electronic entropy calculated at each temperature
in temp (kB/atom)
- F_el - numpy array of electronic free energy calculated at each
temperature in temp (eV/atom)
"""
def __init__(self,input_,format=None):
if isinstance(input_,str):
try:
input_ = open(input_,'r')
except IOError:
print "Error reading input file."
print "Program will now exit!"
sys.exit(1)
if isinstance(input_,file):
if format == "ezvasp":
self.read_ezvasp_dos(input_)
else:
self.read_doscar(input_)
nelec = subprocess.Popen("grep NELECT OUTCAR",
shell=True,stdin=None,stdout=subprocess.PIPE).communicate()[0]
self.nelec = int(float(nelec.split()[2]))
self.get_bandgap()
# Calculate finite temperature properties
self.temp = np.linspace(0,2000,21)
self.mu_e = np.zeros_like(self.temp)
self.num_e = np.zeros_like(self.temp)
self.num_h = np.zeros_like(self.temp)
self.E_el = np.zeros_like(self.temp)
self.S_el = np.zeros_like(self.temp)
self.F_el = np.zeros_like(self.temp)
# Calculate E_el_0
self.E_el_0 = None
tol = 1e-5
for i in range(len(self.temp)):
if i < tol:
self.mu_e[i] = self.e_fermi
self.E_el[i] = 0.0
self.S_el[i] = 0.0
self.num_e[i] = 0.0
self.num_h[i] = 0.0
elif i > 0.0:
self.mu_e[i] = self.calc_mu_e(self.temp[i])
if self.E_el_0 == None:
self.E_el_0 = self.calc_E_el(self.mu_e[i],self.temp[i])
self.num_e[i] = self.n(self.mu_e[i],self.temp[i])
self.num_h[i] = self.p(self.mu_e[i],self.temp[i])
self.E_el[i] = (self.calc_E_el(self.mu_e[i],self.temp[i]))
self.S_el[i] = self.calc_S_el(self.mu_e[i],self.temp[i])
self.E_el[1:] = self.E_el[1:] - self.E_el_0
self.F_el = self.E_el - self.temp*BOLTZCONST*self.S_el
def read_doscar(self,input_):
"""
Reads in a doscar file to grab the density of states as a function of
energy. The argument input_ is assumed to be a file object.
"""
self.n_atoms = int(input_.readline().split()[0])
# Discard header information
for i in range(4):
input_.readline()
# Read in Fermi Energy
line = input_.readline().split()
self.e_max = float(line[0])
self.e_min = float(line[1])
self.e_fermi = float(line[3])
energy = []; dos_tot = []; dos_spin = []
for line in input_:
line = line.split()
energy.append(float(line[0]))
if len(line) == 3:
dos_tot.append(float(line[1])) # DOS includes spin up and down
dos_spin.append(0.0)
elif len(line) == 5:
dos_tot.append(float(line[1])+float(line[2]))
dos_spin.append(float(line[1])-float(line[2]))
self.energy = np.array(energy)
#self.dos_tot = np.array(dos_tot)/float(self.n_atoms)
self.dos_tot = np.array(dos_tot)
#self.dos_spin = np.array(dos_spin)/float(self.n_atoms)
self.dos_spin = np.array(dos_spin)
self.dos_spline = UnivariateSpline(self.energy,self.dos_tot)
def read_ezvasp_dos(self,input_):
"""
Reads an ezvasp-formatted dos.out file to get the electronic density of
states. The argument input_ is assumned to be a file object.
"""
nions = subprocess.Popen("grep NIONS OUTCAR",
shell=True,stdin=None,stdout=subprocess.PIPE).communicate()[0]
self.n_atoms = int(float(nions.split()[-1]))
self.e_min = 0.0
line = input_.readline().split()
self.nelec = int(float(line[0]))
self.step_size = float(line[1])
self.scale = float(line[2])
energy = []; dos_tot = []
i = 0
for line in input_:
line = line.split()
dos_tot.append(float(line[0]))
energy.append(float(i)*self.step_size)
i += 1
self.energy = np.array(energy)
self.dos_tot = np.array(dos_tot)
self.dos_spin = np.zeros_like(self.dos_tot) # Change this for spin-polar
self.dos_spline = UnivariateSpline(self.energy,self.dos_tot)
self.e_max = self.energy[-1]
# Find the 0 Kelvin 'Fermi Energy' using ATAT's method
ne = 0.0
for i in range(len(self.dos_tot)):
ne += self.dos_tot[i]*self.step_size
e_fermi = self.energy[i]
if ne >= self.nelec:
break
self.e_fermi = e_fermi
def get_bandgap(self):
"""
Finds the band gap of a DOS around the fermi energy.
"""
self.step_size = self.energy[1] - self.energy[0]
i = 0
not_found = True
while not_found:
if self.energy[i] < self.e_fermi and self.dos_tot[i] > 1e-3:
bot = self.energy[i]
elif self.energy[i] > self.e_fermi and self.dos_tot[i] > 1e-3:
top = self.energy[i]
not_found = False
i += 1
if top - bot < 2*self.step_size:
self.vbm = self.cbm = self.e_fermi
self.band_gap = 0.0
else:
self.vbm = bot; self.cbm = top
self.band_gap = top - bot
def shift_energy(self,new_ref):
"""
Change the reference energy for all of the energy attributes.
"""
self.energy = self.energy - new_ref
self.e_min = self.e_min - new_ref
self.e_max = self.e_max - new_ref
self.e_fermi = self.e_fermi - new_ref
self.vbm = self.vbm - new_ref
self.cbm = self.cbm - new_ref
self.mu_e = self.mu_e - new_ref
#def sum_dos(self,weight,start,end,args=None):
def sum_dos(self,weight,start,end,args=None):
"""
Sums the density of states, dos, in the energy range [start,end], weighted
by the function weight, which takes as inputs energy and args.
"""
flag = False
sum = 0.
for i in range(len(self.energy)):
if flag:
sum += self.step_size*self.dos_tot[i]*weight(
self.energy[i],args)
if self.energy[i] > end:
break
elif self.energy[i] >= start:
flag = True
return sum
#def integrate_dos(self,weight,start,end,args=None,threshold=0.1):
def ium_dos(self,weight,start,end,args=None,threshold=0.1):
"""
Takes numpy arrays containing the energy and dos and integrates them over
the range [start,end] with the weighting function weight. Weight should take
as an argument the integrated energy and a list of other arguements args.
"""
def integrand(x,weight,args):
return self.dos_spline(x)*weight(x,args)
result = quad(
integrand,start,end,args=(weight,args),full_output=1,limit=350)
integral = result[0]
error = result[1]
#if error > integral*threshold:
# print "Numerical integration error is greater than"
# print str(threshold)+" of the integrated value."
# sys.exit(1)
return integral
def n(self,mu_e,T):
"""
Calculate the intrinsic number of conduction electrons per atom at an
electron chemical potential mu_e and temperature T.
"""
def fermi(x,args):
mu = args[0]; T = args[1]
return 1./(np.exp((x-mu)/(BOLTZCONST*T))+1.)
#n = self.integrate_dos(fermi,self.cbm,self.e_max,args=(mu_e,T))
#n = self.sum_dos(fermi,self.cbm,self.e_max,args=(mu_e,T))
n = self.sum_dos(fermi,mu_e,self.e_max,args=(mu_e,T))
return n
def p(self,mu_e,T):
"""
Calculate the intrinsic number of valence holes per atom at an electron
chemical potential of mu_e and temperature T.
"""
def fermi(x,args):
mu = args[0]; T = args[1]
return 1./(np.exp((mu-x)/(BOLTZCONST*T))+1.)
#p = self.integrate_dos(fermi,self.e_min,self.vbm,args=(mu_e,T))
#p = self.sum_dos(fermi,self.e_min,self.vbm,args=(mu_e,T))
p = self.sum_dos(fermi,self.e_min,mu_e,args=(mu_e,T))
return p
def charge_neut2(self,mu_e,args):
def fermi(x,args):
mu = args[0]; T = args[1]
return 1./(np.exp((x-mu)/(BOLTZCONST*T))+1.)
T = args
n_sum = self.sum_dos(fermi,self.e_min,self.e_max,args=(mu_e,T))
return self.nelec - n_sum
def charge_neutrality(self,mu_e,args):
"""
Condition for charge neutrality for intrinsic doping in a perfect
semiconductor. This function should be overwritten for a more
complicated case.
"""
T = args # Args could also include atomic chemical potentials.
return self.p(mu_e,T) - self.n(mu_e,T)
def calc_mu_e(self,temp):
"""
Calculate the electron chemical potential at temperature temp using the
condition of charge neutrality.
"""
#mu_e = fsolve(self.charge_neutrality,self.e_fermi,args=(temp))
mu_e = fsolve(self.charge_neut2,self.e_fermi,args=(temp))
return mu_e
def calc_E_el(self,mu_e,T):
"""
Calculate the electronic energy at a temperature T and electron chemical
potential mu_e.
"""
def energy(x,args):
return x
def fermi_energy(x,args):
mu = args[0]; T = args[1]
if x-mu < -30.0*BOLTZCONST*T:
return x
elif x-mu > 30.0*BOLTZCONST*T:
return 0.0
else:
return x/(np.exp((x-mu)/(BOLTZCONST*T))+1.)
#E = self.integrate_dos(fermi_energy,self.e_min,self.e_max,args=(mu_e,T))
#E_0 = self.integrate_dos(
# fermi_energy,self.e_min,self.e_max,args=(mu_e,T))
E = self.sum_dos(fermi_energy,self.e_min,self.e_max,args=(mu_e,T))
#E_0 = self.sum_dos(energy,self.e_min,self.e_fermi,args=None)
return E
def calc_S_el(self,mu_e,T):
"""
Calculate the electronic entropy at an electron chemical potential mu_e
and temperature T.
"""
def weight(x,args):
mu = args[0]; T = args[1]
x = (x - mu)/(BOLTZCONST*T)
f = 1.0/(np.exp(x)+1)
if f > 1e-5 and (1.0 - f) > 1e-5:
return -f*np.log(f)-(1.-f)*np.log(1.-f)
else:
return 0.0
#f = -np.log(np.exp(x)+1)/(np.exp(x)+1)
#f += -np.log(np.exp(-x)+1)/(np.exp(-x)+1)
#return f
#S = self.integrate_dos(weight,self.e_min,self.e_max,args=(mu_e,T))
S = self.sum_dos(weight,self.e_min,self.e_max,args=(mu_e,T))
return S
def fermi_dirac_dist(x,args):
"""
Calculates the Fermi-Dirac distribution for an energy x, temperature
args[0], and electron chemical potential args[1].
"""
T = args[0]; mu = args[1]
return 1./(np.exp((x-mu)/(BOLTZCONST*T))+1.)
def test2(argv):
doscar = ElectronicDOS(open(str(argv[0]),'r'))
T = 500
#n = doscar.integrate_dos(
# fermi_dirac_dist,doscar.cbm,doscar.e_max,args=(T,doscar.e_fermi))
p = doscar.p(doscar.e_fermi,T)
print p
def test3(argv):
format = None
if len(argv) > 1:
format = str(argv[1])
doscar = ElectronicDOS(open(str(argv[0]),'r'),format)
print doscar.temp
print doscar.num_e
print doscar.num_h
print doscar.E_el
print doscar.S_el
print doscar.F_el
def atat_test(argv):
format = None
if len(argv) > 1:
format = str(argv[1])
doscar = ElectronicDOS(open(str(argv[0]),'r'),format)
print doscar.E_el_0
for i in range(len(doscar.temp)):
print doscar.temp[i],doscar.mu_e[i],doscar.E_el[i],doscar.S_el[i],doscar.F_el[i]
def test1(argv):
import matplotlib.pyplot as plt
doscar = ElectronicDOS(open(str(argv[0]),'r'))
plt.plot(doscar.energy,doscar.dos_tot)
plt.show()
def main(argv):
import matplotlib.pyplot as plt
doscar = open(str(argv[0]))
e_fermi,energy,n_tot,n_spin = read_doscar(doscar)
plt.plot(energy,n_tot)
if len(argv) > 1:
doscar2 = open(str(argv[1]))
e_fermi2,energy2,n_tot2,n_spin2 = read_doscar(doscar2)
plt.plot(energy2,n_tot2)
plt.show()
if __name__ == "__main__":
import sys
#test3(sys.argv[1:])
atat_test(sys.argv[1:])
|
mit
| -575,477,087,709,007,550 | 37.656331 | 88 | 0.566644 | false |
strogo/turbion
|
turbion/bits/utils/cache/utils.py
|
1
|
2422
|
from django.utils.functional import curry
from django.core.cache import cache
from django.utils.encoding import smart_str
from django.utils.itercompat import is_iterable
def to_list(arg):
"""Converts `arg` to list"""
if is_iterable(arg) and not isinstance(arg, dict):
return list(arg)
else:
return [arg]
def make_cache_key(base, suffix_list):
return ":".join(map(smart_str, [base] + suffix_list))
class CacheWrapper(object):
def __init__(self, func, trigger, suffix, base_name):
self.trigger = trigger
self.suffix = suffix and suffix or (lambda:[])
self.func = func
self.base_name = base_name
self.connect_invalidators()
def __call__(self, *args, **kwargs):
cache_key = make_cache_key(
self.base_name,
to_list(self.suffix(*args, **kwargs))
)
value = cache.get(cache_key)
if value is None:
value = self.func(*args, **kwargs)
cache.set(cache_key, value)
return value
def connect_invalidators(self):
"""Connects invalidator to all needed signals"""
defaults = {
"suffix": lambda *args, **kwargs: [],
"filter": lambda _: True,
"signal": []
}
for t in to_list(self.trigger):
trigger = defaults.copy()
trigger.update(t)
suffix_getter = trigger["suffix"]
sender = trigger["sender"]
_filter = trigger["filter"]
signals = trigger["signal"]
for signal in to_list(signals):
def make_cache_invalidator(suffix_getter):
def cache_invalidator(signal, sender, *args, **kwargs):
if "instance" in kwargs and not _filter(kwargs["instance"]):
return
cache.delete(
make_cache_key(
self.base_name,
to_list(suffix_getter(*args, **kwargs))
)
)
return cache_invalidator
signal.connect(
make_cache_invalidator(suffix_getter),
sender=sender,
weak=False
)
|
bsd-3-clause
| -4,414,260,199,410,681,000 | 31.293333 | 84 | 0.490917 | false |
galeone/dynamic-training-bench
|
dytb/inputs/predefined/PASCALVOC2012Classification.py
|
1
|
10931
|
#Copyright (C) 2017 Paolo Galeone <nessuno@nerdz.eu>
#
#This Source Code Form is subject to the terms of the Mozilla Public
#License, v. 2.0. If a copy of the MPL was not distributed with this
#file, you can obtain one at http://mozilla.org/MPL/2.0/.
#Exhibit B is not attached; this software is compatible with the
#licenses expressed under Section 1.12 of the MPL v2.
"""PASCAL VOC 2012"""
import os
import sys
import tarfile
import xml.etree.ElementTree as etree
import csv
from collections import defaultdict
from six.moves import urllib
import tensorflow as tf
from ..processing import build_batch
from ..images import read_image_jpg
from ..interfaces.Input import Input
from ..interfaces.InputType import InputType
class PASCALVOC2012Classification(Input):
"""Routine for decoding the PASCAL VOC 2012 binary file format."""
def __init__(self, add_input_to_label=False):
# Global constants describing the PASCAL VOC 2012 data set.
# resize image to a fixed size
# the resize dimension is an hyperparameter
self._name = 'PASCAL-VOC-2012-Classification'
self._image_height = 150
self._image_width = 150
self._image_depth = 3
# multiple boxes enable the return of a tensor
# of boxes instead of a single box per image
self._multiple_bboxes = False
self.CLASSES = [
"aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car",
"cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike",
"person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"
]
self._bboxes = {"train": defaultdict(list), "val": defaultdict(list)}
self._tf_bboxes = {"train": None, "val": None}
self._num_classes = 20
self._num_examples_per_epoch_for_train = 13609
self._num_examples_per_epoch_for_eval = 13841
self._num_examples_per_epoch_for_test = self._num_examples_per_epoch_for_eval
self._data_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'data', 'PASCALVOC2012')
self._data_url = 'http://pjreddie.com/media/files/VOCtrainval_11-May-2012.tar'
self._maybe_download_and_extract()
self._add_input_to_label = add_input_to_label
@property
def name(self):
"""Returns the name of the input source"""
return self._name
def num_examples(self, input_type):
"""Returns the number of examples per the specified input_type
Args:
input_type: InputType enum
"""
InputType.check(input_type)
if input_type == InputType.train:
return self._num_examples_per_epoch_for_train
elif input_type == InputType.test:
return self._num_examples_per_epoch_for_test
return self._num_examples_per_epoch_for_eval
@property
def num_classes(self):
"""Returns the number of classes"""
return self._num_classes
def _read_image_and_box(self, bboxes_csv):
"""Extract the filename from the queue, read the image and
produce a single box
Returns:
image, box
"""
reader = tf.TextLineReader(skip_header_lines=True)
_, row = reader.read(bboxes_csv)
# file ,y_min, x_min, y_max, x_max, label
record_defaults = [[""], [0.], [0.], [0.], [0.], [0.]]
# eg:
# 2008_000033,0.1831831831831832,0.208,0.7717717717717718,0.952,0
filename, y_min, x_min, y_max, x_max, label = tf.decode_csv(
row, record_defaults)
image_path = os.path.join(self._data_dir, 'VOCdevkit', 'VOC2012',
'JPEGImages') + "/" + filename + ".jpg"
# image is normalized in [-1,1], convert to #_image_depth depth
image = read_image_jpg(image_path, depth=self._image_depth)
return image, tf.stack([y_min, x_min, y_max, x_max, label])
def _read(self, filename_queue):
image, bbox_and_label = self._read_image_and_box(
filename_queue) #bbox is a single box
bbox = bbox_and_label[:4]
label = tf.cast(bbox_and_label[-1], tf.int32)
image = tf.squeeze(
tf.image.crop_and_resize(
tf.expand_dims(image, axis=0),
tf.expand_dims(bbox, axis=0),
box_ind=[0],
crop_size=[self._image_height, self._image_width]),
axis=[0])
return image, label
def inputs(self, input_type, batch_size, augmentation_fn=None):
"""Construct input for PASCALVOC2012Classification evaluation using the Reader ops.
Args:
input_type: InputType enum
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, self._image_height, self._image_width, self._image_depth] size.
labels: tensor with batch_size labels
"""
InputType.check(input_type)
if input_type == InputType.train:
filenames = [os.path.join(self._data_dir, 'train.csv')]
num_examples_per_epoch = self._num_examples_per_epoch_for_train
else:
filenames = [os.path.join(self._data_dir, 'val.csv')]
num_examples_per_epoch = self._num_examples_per_epoch_for_eval
for name in filenames:
if not tf.gfile.Exists(name):
raise ValueError('Failed to find file: ' + name)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(
num_examples_per_epoch * min_fraction_of_examples_in_queue)
with tf.variable_scope("{}_input".format(input_type)):
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
image, label = self._read(filename_queue)
if augmentation_fn:
image = augmentation_fn(image)
return build_batch(
image,
label if not self._add_input_to_label else [label, image],
min_queue_examples,
batch_size,
shuffle=input_type == InputType.train)
def _maybe_download_and_extract(self):
"""Download and extract the tarball"""
dest_directory = self._data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = self._data_url.split('/')[-1]
archivepath = os.path.join(dest_directory, filename)
if not os.path.exists(archivepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
archivepath, _ = urllib.request.urlretrieve(self._data_url,
archivepath, _progress)
print()
statinfo = os.stat(archivepath)
print('Successfully downloaded', filename, statinfo.st_size,
'bytes.')
tarfile.open(archivepath, 'r').extractall(dest_directory)
print('Sucessfully extracted.')
# Now self._data dir contains VOCDevkit folder
# Build train.csv and val.csv file in self._data_dir
csv_header = ["filename", "y_min", "x_min", "y_max", "x_max", "label"]
if os.path.exists(os.path.join(
self._data_dir, 'train.csv')) and os.path.exists(
os.path.join(self._data_dir, 'val.csv')):
return
base_dir = os.path.join(
self._data_dir,
'VOCdevkit',
'VOC2012',
)
for current_set in ['train', 'val']:
csv_path = os.path.join(self._data_dir,
'{}.csv'.format(current_set))
with open(csv_path, mode='w') as csv_file:
# header
writer = csv.DictWriter(csv_file, csv_header)
writer.writeheader()
for current_class in self.CLASSES:
lines = open(
os.path.join(
base_dir, 'ImageSets', 'Main', '{}_{}.txt'.format(
current_class,
current_set))).read().strip().split("\n")
for line in lines:
splitted = line.split()
if len(splitted) < 1:
print(splitted, line, current_class)
if splitted[1] == "-1":
continue
image_xml = os.path.join(base_dir, 'Annotations',
'{}.xml'.format(splitted[0]))
image_filename = splitted[0]
# parse XML
tree = etree.parse(image_xml)
root = tree.getroot()
size = root.find('size')
width = float(size.find('width').text)
height = float(size.find('height').text)
for obj in root.iter('object'):
# skip difficult & object.name not in current class
label = obj.find('name').text
if label != current_class:
continue
difficult = obj.find('difficult').text
if int(difficult) == 1:
continue
bndbox = obj.find('bndbox')
normalized_bbox = [
# y_min
float(bndbox.find('ymin').text) / height,
# x_min
float(bndbox.find('xmin').text) / width,
# y_max
float(bndbox.find('ymax').text) / height,
# x_max
float(bndbox.find('xmax').text) / width
]
label_id = self.CLASSES.index(current_class)
writer.writerow({
"filename": image_filename,
"y_min": normalized_bbox[0],
"x_min": normalized_bbox[1],
"y_max": normalized_bbox[2],
"x_max": normalized_bbox[3],
"label": label_id
})
print('{}.csv created'.format(current_set))
|
mpl-2.0
| 2,044,158,810,422,092,500 | 40.249057 | 117 | 0.519349 | false |
Ecogenomics/CheckM
|
checkm/util/img.py
|
1
|
23429
|
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import os
import sys
import logging
from collections import defaultdict
from checkm.util.seqUtils import readFasta
from checkm.util.taxonomyUtils import ranksByLabel
class IMG(object):
genomeDir = '/srv/whitlam/bio/db/img//07042014/genomes/'
pfamExtension = '.pfam.tab.txt'
tigrExtension = '.tigrfam.tab.txt'
def __init__(self, imgMetadataFile, redundantTIGRFAMsFile):
self.logger = logging.getLogger()
self.metadataFile = imgMetadataFile
self.redundantTIGRFAMs = redundantTIGRFAMsFile
self.cachedGenomeSeqLens = None
self.cachedGenomeFamilyPositions = None
self.cachedGenomeFamilyScaffolds = None
def filterGenomeIds(self, genomeIds, metadata, fieldToFilterOn, valueToRetain):
filteredGenomeIds = set()
for genomeId in genomeIds:
if metadata[genomeId][fieldToFilterOn] == valueToRetain:
filteredGenomeIds.add(genomeId)
return filteredGenomeIds
def geneIdToScaffoldId(self, genomeId):
d = {}
for line in open(self.genomeDir + genomeId + '/' + genomeId + '.gff'):
if line[0] == '#':
continue
lineSplit = line.split('\t')
scaffoldId = lineSplit[0]
geneId = lineSplit[8]
geneId = geneId[geneId.find('=') + 1:geneId.find(';')]
d[geneId] = scaffoldId
return d
def pfamIdToGeneId(self, genomeId):
return self.clusterIdToGeneId(genomeId, self.pfamExtension)
def tigrIdToGeneId(self, genomeId):
return self.clusterIdToGeneId(genomeId, self.tigrExtension)
def clusterIdToGeneId(self, genomeId, extension):
d = {}
bHeader = True
geneAnnotationFile = os.path.join(self.genomeDir, genomeId, genomeId + extension)
for line in open(geneAnnotationFile):
if bHeader:
bHeader = False
continue
lineSplit = line.split('\t')
geneId = lineSplit[0]
clusterId = lineSplit[8]
d[clusterId] = d.get(clusterId, []) + [geneId]
return d
def genomeMetadata(self):
return self.genomeMetadataFromFile(self.metadataFile)
def genomeMetadataFromFile(self, metadataFile):
metadata = {}
bHeader = True
for line in open(metadataFile):
lineSplit = line.split('\t')
lineSplit = [x.strip() for x in lineSplit]
if bHeader:
statusIndex = lineSplit.index('Status')
scaffoldCountIndex = lineSplit.index('Scaffold Count')
gcIndex = lineSplit.index('GC Count')
genomeSizeIndex = lineSplit.index('Genome Size')
geneCountIndex = lineSplit.index('Gene Count')
codingBaseCountIndex = lineSplit.index('Coding Base Count')
bioticRelationshipsIndex = lineSplit.index('Biotic Relationships')
n50Index = lineSplit.index('N50')
domainIndex = lineSplit.index('Domain')
phylumIndex = lineSplit.index('Phylum')
classIndex = lineSplit.index('Class')
orderIndex = lineSplit.index('Order')
familyIndex = lineSplit.index('Family')
genusIndex = lineSplit.index('Genus')
speciesIndex = lineSplit.index('Species')
bHeader = False
continue
genomeId = lineSplit[0].strip()
rDomain = lineSplit[domainIndex].strip()
rPhylum = lineSplit[phylumIndex].strip()
rClass = lineSplit[classIndex].strip()
rOrder = lineSplit[orderIndex].strip()
rFamily = lineSplit[familyIndex].strip()
rGenus = lineSplit[genusIndex].strip()
rSpecies = lineSplit[speciesIndex].strip()
metadata[genomeId] = {}
metadata[genomeId]['status'] = lineSplit[statusIndex]
metadata[genomeId]['taxonomy'] = [rDomain, rPhylum, rClass, rOrder, rFamily, rGenus, rSpecies]
metadata[genomeId]['scaffold count'] = int(lineSplit[scaffoldCountIndex])
try:
metadata[genomeId]['GC Count'] = int(lineSplit[gcIndex])
metadata[genomeId]['GC %'] = float(lineSplit[gcIndex]) / int(lineSplit[genomeSizeIndex])
except:
metadata[genomeId]['GC Count'] = 'NA'
metadata[genomeId]['GC %'] = 'NA'
try:
metadata[genomeId]['genome size'] = int(lineSplit[genomeSizeIndex])
except:
metadata[genomeId]['genome size'] = 'NA'
try:
metadata[genomeId]['gene count'] = int(lineSplit[geneCountIndex])
except:
metadata[genomeId]['gene count'] = 'NA'
try:
metadata[genomeId]['coding base count'] = int(lineSplit[codingBaseCountIndex])
except:
metadata[genomeId]['coding base count'] = 'NA'
metadata[genomeId]['biotic relationships'] = lineSplit[bioticRelationshipsIndex]
metadata[genomeId]['N50'] = int(lineSplit[n50Index])
return metadata
def genomesWithMissingData(self, genomeIds):
missingPFAM = self.missingPfamData(genomeIds)
missingTIGR = self.missingTigrData(genomeIds)
return missingPFAM.union(missingTIGR)
def missingPfamData(self, genomeIds):
missing = set()
for genomeId in genomeIds:
if not os.path.exists(IMG.genomeDir + genomeId + '/' + genomeId + self.pfamExtension):
missing.add(genomeId)
# if os.path.exists(IMG.genomeDir + genomeId + '/' + genomeId + '.genes.fna'):
# print '[Warning] ' + genomeId + ' contains ORF data, but not PFAM annotations.'
return missing
def missingTigrData(self, genomeIds):
missing = set()
for genomeId in genomeIds:
if not os.path.exists(IMG.genomeDir + genomeId + '/' + genomeId + self.tigrExtension):
missing.add(genomeId)
# if os.path.exists(IMG.genomeDir + genomeId + '/' + genomeId + '.genes.fna'):
# print '[Warning] ' + genomeId + ' contains ORF data, but not TIGRFAM annotations.'
return missing
def genomeIdsByTaxonomy(self, taxonStr, metadata):
searchTaxa = taxonStr.split(';')
genomeIdsOfInterest = set()
for genomeId in metadata:
bKeep = True
for r in range(0, len(searchTaxa)):
if taxonStr == 'universal':
bKeep = True
elif taxonStr == 'prokaryotes' and (metadata[genomeId]['taxonomy'][0] == 'Bacteria' or metadata[genomeId]['taxonomy'][0] == 'Archaea'):
bKeep = True
elif searchTaxa[r].strip() == metadata[genomeId]['taxonomy'][r]:
bKeep = True
else:
bKeep = False
break
if bKeep:
genomeIdsOfInterest.add(genomeId)
return genomeIdsOfInterest
def getGenomesByClade(self, rank, clade, metadata):
rankIndex = ranksByLabel[rank]
genomeIdsOfInterest = set()
for genomeId in metadata:
if metadata[genomeId]['taxonomy'][rankIndex] == clade:
genomeIdsOfInterest.add(genomeId)
return genomeIdsOfInterest
def lineageStats(self, metadata, mostSpecificRank):
stats = {}
for r in range(0, mostSpecificRank + 1):
for _, data in metadata.items():
taxaStr = ';'.join(data['taxonomy'][0:r + 1])
stats[taxaStr] = stats.get(taxaStr, 0) + 1
return stats
def lineagesSorted(self, metadata, mostSpecificRank=6):
lineages = []
for r in range(0, mostSpecificRank + 1):
taxa = set()
for _, data in metadata.items():
if 'unclassified' not in data['taxonomy'][0:r + 1]:
taxa.add(';'.join(data['taxonomy'][0:r + 1]))
lineages += sorted(list(taxa))
return lineages
def lineagesByCriteria(self, metadata, minGenomes, mostSpecificRank):
l = []
stats = self.lineageStats(metadata, mostSpecificRank)
for lineage in self.lineagesSorted(metadata, mostSpecificRank):
if stats[lineage] > minGenomes:
l.append(lineage)
return l
def __readTable(self, table, genomeIds, extension, clusterIdIndex):
for genomeId in genomeIds:
count = {}
bHeader = True
geneIdToFamilyIds = defaultdict(set)
for line in open(os.path.join(self.genomeDir, genomeId, genomeId + extension)):
if bHeader:
bHeader = False
continue
lineSplit = line.split('\t')
geneId = lineSplit[0]
clusterId = lineSplit[clusterIdIndex]
# IMG may annotate multiple parts of a gene as coming
# from the same cluster (PFAM, TIGRFAM), but this should
# only count as 1 gene having this annotation
if clusterId not in geneIdToFamilyIds[geneId]:
geneIdToFamilyIds[geneId].add(clusterId)
count[clusterId] = count.get(clusterId, 0) + 1
for clusterId, c in count.items():
if clusterId not in table:
table[clusterId] = {}
table[clusterId][genomeId] = c
def geneCountTable(self, genomeIds):
table = {}
self.__readTable(table, genomeIds, self.pfamExtension, 8)
self.__readTable(table, genomeIds, self.tigrExtension, 6)
return table
def filterGeneCountTable(self, genomeIds, table, ubiquityThreshold=0.9, singleCopyThreshold=0.9):
idsToFilter = []
for pfamId, genomeCounts in table.items():
ubiquity = 0
singleCopy = 0
for genomeId in genomeIds:
count = genomeCounts.get(genomeId, 0)
if count > 0:
ubiquity += 1
if count == 1:
singleCopy += 1
if (float(ubiquity) / len(genomeIds) < ubiquityThreshold) or (float(singleCopy) / len(genomeIds) < singleCopyThreshold):
idsToFilter.append(pfamId)
for clusterId in idsToFilter:
table.pop(clusterId)
return table
def __genomeIdToClusterScaffold(self, genomeId):
"""Determine position of PFAM and TIGRFAM genes in genome."""
# determine mapping from gene ids to PFAM/TIGRFAM ids
familyFile = os.path.join(self.genomeDir, genomeId, genomeId)
pfamIdToGeneIds = self.familyIdToGeneId(familyFile + self.pfamExtension, 8)
tigrIdToGeneIds = self.familyIdToGeneId(familyFile + self.tigrExtension, 6)
# determine scaffold of genes from GFF file
gffFile = os.path.join(self.genomeDir, genomeId, genomeId + '.gff')
genePosition = {}
for line in open(gffFile):
if line[0] == '#':
continue
lineSplit = line.split('\t')
if len(lineSplit) != 9:
continue # line likely annotates a CRISPR
seqId = lineSplit[0]
geneId = lineSplit[8].split(';')[0]
geneId = geneId[geneId.find('=') + 1:]
genePosition[geneId] = seqId
# create gene mapping table
try:
# In theory, every PFAM or TIGRFAM gene identified should have
# an entry in the GFF file and thus a position. In practice, there
# are a few cases where this isn't tree (?) so only PFAMs/TIGRFAMs
# with GFF entries are considered.
familyIdToScaffoldIds = {}
for pfamId, geneIds in pfamIdToGeneIds.items():
scaffolds = []
for geneId in geneIds:
scaffold = genePosition.get(geneId, None)
if scaffold != None:
scaffolds.append(scaffold)
if scaffolds:
familyIdToScaffoldIds[pfamId] = scaffolds
for tigrId, geneIds in tigrIdToGeneIds.items():
scaffolds = []
for geneId in geneIds:
scaffold = genePosition.get(geneId, None)
if scaffold != None:
scaffolds.append(scaffold)
if scaffold:
familyIdToScaffoldIds[tigrId] = scaffolds
except:
print ('[BUG]: __genomeIdToClusterScaffold')
print (sys.exc_info()[0])
print (genomeId, geneId, tigrId, pfamId)
sys.exit(1)
return familyIdToScaffoldIds
def precomputeGenomeFamilyScaffolds(self, genomeIds):
"""Cache scaffold of PFAM and TIGRFAM genes in genomes."""
# This function is intended to speed up functions, such as geneDistTable(),
# that are called multiple times (typically during simulations)
self.cachedGenomeFamilyScaffolds = {}
for genomeId in genomeIds:
self.cachedGenomeFamilyScaffolds[genomeId] = self.__genomeIdToClusterScaffold(genomeId)
return self.cachedGenomeFamilyScaffolds
def familyIdToGeneId(self, filename, clusterIdIndex):
"""Determine gene ids associated with PFAMs or TIGRFAMs."""
familyIdToGeneId = defaultdict(set)
with open(filename) as f:
f.readline()
for line in f:
lineSplit = line.split('\t')
geneId = lineSplit[0]
familyId = lineSplit[clusterIdIndex]
familyIdToGeneId[familyId].update([geneId])
return familyIdToGeneId
def __genomeSeqLens(self, genomeId):
"""Determine length of contigs/scaffolds comprising genome."""
genomeFile = os.path.join(self.genomeDir, genomeId, genomeId + '.fna')
seqs = readFasta(genomeFile)
seqLens = {}
for seqId, seq in seqs.items():
seqLens[seqId] = len(seq)
return seqLens
def precomputeGenomeSeqLens(self, genomeIds):
"""Cache the length of contigs/scaffolds for all genomes."""
# This function is intended to speed up functions, such as geneDistTable(),
# that are called multiple times (typically during simulations)
self.cachedGenomeSeqLens = {}
for genomeId in genomeIds:
self.cachedGenomeSeqLens[genomeId] = self.__genomeSeqLens(genomeId)
return self.cachedGenomeSeqLens
def __genomeFamilyPositions(self, genomeId, seqLens, spacingBetweenContigs):
"""Determine position of PFAM and TIGRFAM genes in genome."""
# determine mapping from gene ids to PFAM/TIGRFAM ids
familyFile = os.path.join(self.genomeDir, genomeId, genomeId)
pfamIdToGeneIds = self.familyIdToGeneId(familyFile + self.pfamExtension, 8)
tigrIdToGeneIds = self.familyIdToGeneId(familyFile + self.tigrExtension, 6)
# determine position of genes from GFF file
gffFile = os.path.join(self.genomeDir, genomeId, genomeId + '.gff')
genePosition = {}
contigStart = 0
curSeqId = None
for line in open(gffFile):
if line[0] == '#':
continue
lineSplit = line.split('\t')
if len(lineSplit) != 9:
continue # line likely annotates a CRISPR
# check if we've moved to the next contig
if curSeqId == None:
curSeqId = lineSplit[0]
if curSeqId != lineSplit[0]:
contigStart += spacingBetweenContigs + seqLens[curSeqId]
curSeqId = lineSplit[0]
geneId = lineSplit[8].split(';')[0]
geneId = geneId[geneId.find('=') + 1:]
start = int(lineSplit[3])
end = int(lineSplit[4])
genePosition[geneId] = [contigStart + start, contigStart + end]
# create gene mapping table
try:
# In theory, every PFAM or TIGRFAM gene identified should have
# an entry in the GFF file and thus a position. In practice, there
# are a few cases where this isn't tree (?) so only PFAMs/TIGRFAMs
# with GFF entries are considered.
familyIdToGenomePositions = {}
for pfamId, geneIds in pfamIdToGeneIds.items():
positions = []
for geneId in geneIds:
position = genePosition.get(geneId, None)
if position != None:
positions.append(position)
if positions:
familyIdToGenomePositions[pfamId] = positions
for tigrId, geneIds in tigrIdToGeneIds.items():
positions = []
for geneId in geneIds:
position = genePosition.get(geneId, None)
if position != None:
positions.append(position)
if positions:
familyIdToGenomePositions[tigrId] = positions
except:
print ('[BUG]: __genomeFamilyPositions')
print (sys.exc_info()[0])
print (genomeId, geneId, tigrId, pfamId)
sys.exit(1)
return familyIdToGenomePositions
def precomputeGenomeFamilyPositions(self, genomeIds, spacingBetweenContigs):
"""Cache position of PFAM and TIGRFAM genes in genomes."""
# This function is intended to speed up functions, such as geneDistTable(),
# that are called multiple times (typically during simulations)
self.cachedGenomeFamilyPositions = {}
for genomeId in genomeIds:
self.cachedGenomeFamilyPositions[genomeId] = self.__genomeFamilyPositions(genomeId, self.cachedGenomeSeqLens[genomeId], spacingBetweenContigs)
def geneDistTable(self, genomeIds, markerGenes, spacingBetweenContigs=0):
"""Create table indicating position of each marker gene in a genome."""
# Note: genomes split into multiple contigs are treated as contiguous,
# with a spacing between contigs as specified
table = {}
for genomeId in genomeIds:
# read length of scaffolds/contigs in genome
if self.cachedGenomeSeqLens:
seqLens = self.cachedGenomeSeqLens[genomeId]
else:
seqLens = self.__genomeSeqLens(genomeId)
# read position of protein families on genome
if self.cachedGenomeFamilyPositions:
genomeFamilyPositions = self.cachedGenomeFamilyPositions[genomeId]
else:
genomeFamilyPositions = self.__genomeFamilyPositions(genomeId, seqLens, spacingBetweenContigs)
# create marker gene position table for genome
clusterIdToGenomePositions = {}
for markerGene in markerGenes:
positions = genomeFamilyPositions.get(markerGene, None)
if positions != None:
clusterIdToGenomePositions[markerGene] = genomeFamilyPositions[markerGene]
table[genomeId] = clusterIdToGenomePositions
return table
def identifyMitochondrialChloroplastGenes(self, genomeId):
# identify mitochondrial or chloroplast sequences
mitoChloroSeqs = set()
for line in open(self.genomeDir + genomeId + '/' + genomeId + '.fna'):
if line[0] == '>':
if 'mitochondria' in line.lower() or 'mitochondrion' in line.lower() or 'chloroplast' in line.lower():
mitoChloroSeqs.add(line[1:].split()[0])
# identify mitochondrial or chloroplast genes
mitoChloroGenes = set()
for line in open(self.genomeDir + genomeId + '/' + genomeId + '.gff'):
if line[0] == '#':
continue
lineSplit = line.split('\t')
seqId = lineSplit[0]
if seqId in mitoChloroSeqs:
desc = lineSplit[8]
geneId = desc.split(';')[0].split('=')[1]
mitoChloroGenes.add(geneId)
return mitoChloroGenes
def identifyRedundantPFAMs(self, markerGenes):
pfamIdToTigrId = defaultdict(list)
for line in open(self.redundantTIGRFAMs):
lineSplit = line.split('\t')
pfamId = lineSplit[0]
tigrId = lineSplit[1].rstrip()
pfamIdToTigrId[pfamId].append(tigrId)
pfamToRemove = set()
for markerGene in markerGenes:
if markerGene in pfamIdToTigrId:
for tigrId in pfamIdToTigrId[markerGene]:
if tigrId in markerGenes:
pfamToRemove.add(markerGene)
return pfamToRemove
def identifyRedundantTIGRFAMs(self, markerGenes):
tigrIdToPfamId = {}
for line in open(self.redundantTIGRFAMs):
lineSplit = line.split('\t')
pfamId = lineSplit[0]
tigrId = lineSplit[1].rstrip()
tigrIdToPfamId[tigrId] = tigrIdToPfamId.get(tigrId, []) + [pfamId]
tigrToRemove = set()
for markerGene in markerGenes:
if markerGene in tigrIdToPfamId:
for pfamId in tigrIdToPfamId[markerGene]:
if pfamId in markerGenes:
tigrToRemove.add(markerGene)
return tigrToRemove
|
gpl-3.0
| 3,703,315,242,581,544,000 | 37.777589 | 154 | 0.558539 | false |
dhrone/pydKeg
|
displays/winstar_weg.py
|
1
|
9885
|
#!/usr/bin/python
# coding: UTF-8
# Driver for Winstar WEH001602A 16x2 OLED display on the RPi
# Written by: Ron Ritchey
# Derived from Lardconcepts
# https://gist.github.com/lardconcepts/4947360
# Which was also drived from Adafruit
# http://forums.adafruit.com/viewtopic.php?f=8&t=29207&start=15#p163445
#
# Ultimately this is a minor variation of the HD44780 controller
#
# Useful references
# General overview of HD44780 style displays
# https://en.wikipedia.org/wiki/Hitachi_HD44780_LCD_controller
#
# More detail on initialization and timing
# http://web.alfredstate.edu/weimandn/lcd/lcd_initialization/lcd_initialization_index.html
#
# Documenation for the similar Winstar WS0010 board currently available at
# http://www.picaxe.com/docs/oled.pdf
from __future__ import unicode_literals
import time, math,logging
import lcd_display_driver
import fonts
import graphics as g
from PIL import Image
import logging
import display
try:
import _winstar_weg
C_LIBRARY = True
except:
logging.debug("_winstar_weg not found")
C_LIBRARY = False
try:
import RPi.GPIO as GPIO
except:
logging.debug("RPi.GPIO not installed")
class winstar_weg(lcd_display_driver.lcd_display_driver):
# commands
LCD_CLEARDISPLAY = 0x01
LCD_RETURNHOME = 0x02
LCD_ENTRYMODESET = 0x04
LCD_DISPLAYCONTROL = 0x08
LCD_CURSORSHIFT = 0x10
LCD_FUNCTIONSET = 0x20
LCD_SETCGRAMADDR = 0x40
LCD_SETDDRAMADDR = 0x80
# flags for display entry mode
LCD_ENTRYRIGHT = 0x00
LCD_ENTRYLEFT = 0x02
LCD_ENTRYSHIFTINCREMENT = 0x01
LCD_ENTRYSHIFTDECREMENT = 0x00
# flags for display on/off control
LCD_DISPLAYON = 0x04
LCD_DISPLAYOFF = 0x00
LCD_CURSORON = 0x02
LCD_CURSOROFF = 0x00
LCD_BLINKON = 0x01
LCD_BLINKOFF = 0x00
# flags for display/cursor shift
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
# flags for display/cursor shift
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
LCD_MOVERIGHT = 0x04
LCD_MOVELEFT = 0x00
# flags for function set
LCD_8BITMODE = 0x10
LCD_4BITMODE = 0x00
LCD_2LINE = 0x08
LCD_1LINE = 0x00
LCD_5x10s = 0x04
LCD_5x8DOTS = 0x00
def __init__(self, rows=16, cols=100, rs=7, e=8, datalines=[25, 24, 23, 27]):
# Default arguments are appropriate for Raspdac V3 only!!!
self.pins_db = datalines
self.pin_rs = rs
self.pin_e = e
self.rows = rows
self.cols = cols
self.fb = [[]]
self.FONTS_SUPPORTED = True
# Initialize the default font
font = fonts.bmfont.bmfont('latin1_5x8_fixed.fnt')
self.fp = font.fontpkg
# Set GPIO pins to handle communications to display
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
for pin in self.pins_db:
GPIO.setup(pin, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(self.pin_e, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(self.pin_rs, GPIO.OUT, initial=GPIO.LOW)
GPIO.output(self.pin_e, False)
# initialization sequence taken from audiophonics.fr site
# there is a good writeup on the HD44780 at Wikipedia
# https://en.wikipedia.org/wiki/Hitachi_HD44780_LCD_controller
if C_LIBRARY:
_winstar_weg.initgpio(self.pin_rs, self.pin_e, self.pins_db[0], self.pins_db[1], self.pins_db[2], self.pins_db[3])
# Assuming that the display may already be in 4 bit mode
# send four 0000 instructions to resync the display
for i in range(1,5):
self.writeonly4bits(0x00, False)
self.delayMicroseconds(1000)
# Now place in 8 bit mode so that we start from a known state
# issuing function set twice in case we are in 4 bit mode
self.writeonly4bits(0x03, False)
self.writeonly4bits(0x03, False)
self.delayMicroseconds(1000)
# placing display in 4 bit mode
self.writeonly4bits(0x02, False)
self.delayMicroseconds(1000)
# From this point forward, we need to use write4bits function which
# implements the two stage write that 4 bit mode requires
self.write4bits(0x08, False) # Turn display off
self.write4bits(0x29, False) # Function set for 4 bits, 2 lines, 5x8 font, Western European font table
self.write4bits(0x06, False) # Entry Mode set to increment and no shift
self.write4bits(0x1F, False) # Set to char mode and turn on power
self.write4bits(0x01, False) # Clear display and reset cursor
self.write4bits(0x0c, False) # Turn on display
# Set up parent class. Note. This must occur after display has been
# initialized as the parent class may attempt to load custom fonts
super(winstar_weg, self).__init__(rows,cols)
def clear(self):
# Set cursor back to 0,0
self.setCursor(0,0) # set cursor position to zero
self.fb = [[]]
# And then clear the screen
self.write4bits(self.LCD_CLEARDISPLAY) # command to clear display
self.delayMicroseconds(2000) # 2000 microsecond sleep
def setCursor(self, row, col):
if row >= self.rows or col >= self.cols:
raise IndexError
# Convert from pixels to bytes
row = int(math.ceil(row/8.0))
self.write4bits(self.LCD_SETDDRAMADDR | col)
self.write4bits(self.LCD_SETCGRAMADDR | row)
def loadcustomchars(self, char, fontdata):
# Custom characters are unnecessary on a graphical display
return
def message(self, text, row=0, col=0, varwidth=True):
''' Send string to LCD. Newline wraps to second line'''
if row >= self.rows or col >= self.cols:
raise IndexError
textwidget = display.gwidgetText(text, self.fp, {}, [], varwidth )
self.update(textwidget.image)
def update(self, image):
# Make image the same size as the display
img = image.crop( (0,0,self.cols, self.rows))
# Compute frame from image
frame = self.getframe( img, 0,0, self.cols,self.rows )
if C_LIBRARY:
_winstar_weg.updateframe(self.pin_rs, self.pin_e, self.pins_db[0], self.pins_db[1], self.pins_db[2], self.pins_db[3],frame)
else:
self.updateframe(frame)
def updateframe(self, newbuf):
rows = int(math.ceil(self.rows/8.0))
for j in range(0, rows):
self.setCursor(j*8,0)
for i in range(0, self.cols):
try:
byte = newbuf[j][i]
except IndexError:
byte = 0
self.write4bits(byte, True)
def cleanup(self):
GPIO.cleanup()
def msgtest(self, text, wait=1.5):
self.clear()
lcd.message(text)
time.sleep(wait)
if __name__ == '__main__':
import getopt,sys,os
import graphics as g
import fonts
import moment
def processevent(events, starttime, prepost, db, dbp):
for evnt in events:
t,var,val = evnt
if time.time() - starttime >= t:
if prepost in ['pre']:
db[var] = val
elif prepost in ['post']:
dbp[var] = val
logging.basicConfig(format=u'%(asctime)s:%(levelname)s:%(message)s', handlers=[logging.StreamHandler()], level=logging.DEBUG)
try:
opts, args = getopt.getopt(sys.argv[1:],"hr:c:",["row=","col=","rs=","e=","d4=","d5=","d6=", "d7="])
except getopt.GetoptError:
print 'winstar_weg.py -r <rows> -c <cols> --rs <rs> --e <e> --d4 <d4> --d5 <d5> --d6 <d6> --d7 <d7>'
sys.exit(2)
# Set defaults
# These are for the wiring used by a Raspdac V3
rows = 16
cols = 80
rs = 7
e = 8
d4 = 25
d5 = 24
d6 = 23
d7 = 27
for opt, arg in opts:
if opt == '-h':
print 'winstar_weg.py -r <rows> -c <cols> --rs <rs> --e <e> --d4 <d4> --d5 <d5> --d6 <d6> --d7 <d7>'
sys.exit()
elif opt in ("-r", "--rows"):
rows = int(arg)
elif opt in ("-c", "--cols"):
cols = int(arg)
elif opt in ("--rs"):
rs = int(arg)
elif opt in ("--e"):
e = int(arg)
elif opt in ("--d4"):
d4 = int(arg)
elif opt in ("--d5"):
d5 = int(arg)
elif opt in ("--d6"):
d6 = int(arg)
elif opt in ("--d7"):
d7 = int(arg)
db = {
'actPlayer':'mpd',
'playlist_position':1,
'playlist_length':5,
'title':"Nicotine & Gravy",
'artist':"Beck",
'album':'Midnight Vultures',
'elapsed':0,
'length':400,
'volume':50,
'stream':'Not webradio',
'utc': moment.utcnow(),
'outside_temp_formatted':'46\xb0F',
'outside_temp_max':72,
'outside_temp_min':48,
'outside_conditions':'Windy',
'system_temp_formatted':'98\xb0C',
'state':'stop',
'system_tempc':81.0
}
dbp = {
'actPlayer':'mpd',
'playlist_position':1,
'playlist_length':5,
'title':"Nicotine & Gravy",
'artist':"Beck",
'album':'Midnight Vultures',
'elapsed':0,
'length':400,
'volume':50,
'stream':'Not webradio',
'utc': moment.utcnow(),
'outside_temp_formatted':'46\xb0F',
'outside_temp_max':72,
'outside_temp_min':48,
'outside_conditions':'Windy',
'system_temp_formatted':'98\xb0C',
'state':'stop',
'system_tempc':81.0
}
events = [
(15, 'state', 'play'),
(20, 'title', 'Mixed Bizness'),
(30, 'volume', 80),
(40, 'title', 'I Never Loved a Man (The Way I Love You)'),
(40, 'artist', 'Aretha Franklin'),
(40, 'album', 'The Queen Of Soul'),
(70, 'state', 'stop'),
(90, 'state', 'play'),
(100, 'title', 'Do Right Woman, Do Right Man'),
(120, 'volume', 100),
(140, 'state', 'play' )
]
try:
pins = [d4, d5, d6, d7]
print "Winstar OLED Display Test"
print "ROWS={0}, COLS={1}, RS={2}, E={3}, Pins={4}".format(rows,cols,rs,e,pins)
lcd = winstar_weg(rows,cols,rs,e,[d4, d5, d6, d7])
lcd.clear()
lcd.message("pydPiper\nStarting",0,0,True)
time.sleep(2)
lcd.clear()
starttime = time.time()
elapsed = int(time.time()-starttime)
timepos = time.strftime(u"%-M:%S", time.gmtime(int(elapsed))) + "/" + time.strftime(u"%-M:%S", time.gmtime(int(254)))
dc = display.display_controller((80,16))
f_path = os.path.join(os.path.dirname(__file__), 'pages_test.py')
dc.load(f_path, db,dbp )
starttime=time.time()
while True:
elapsed = int(time.time()-starttime)
db['elapsed']=elapsed
db['utc'] = moment.utcnow()
processevent(events, starttime, 'pre', db, dbp)
img = dc.next()
processevent(events, starttime, 'post', db, dbp)
lcd.update(img)
time.sleep(.1)
except KeyboardInterrupt:
pass
finally:
lcd.clear()
lcd.message("Goodbye!", 0, 0, True)
time.sleep(2)
lcd.clear()
GPIO.cleanup()
print "Winstar OLED Display Test Complete"
|
mit
| 3,913,770,316,773,542,000 | 24.675325 | 126 | 0.666566 | false |
redhat-openstack/heat
|
heat/db/sqlalchemy/migrate_repo/versions/021_resource_data.py
|
1
|
1757
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
resource_data = sqlalchemy.Table(
'resource_data', meta,
sqlalchemy.Column('id',
sqlalchemy.Integer,
primary_key=True,
nullable=False),
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
sqlalchemy.Column('key', sqlalchemy.String(255)),
sqlalchemy.Column('value', sqlalchemy.Text),
sqlalchemy.Column('redact', sqlalchemy.Boolean),
sqlalchemy.Column('resource_id',
sqlalchemy.String(36),
sqlalchemy.ForeignKey('resource.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
sqlalchemy.Table('resource', meta, autoload=True)
resource_data.create()
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
resource_data = sqlalchemy.Table('resource_data', meta, autoload=True)
resource_data.drop()
|
apache-2.0
| 5,780,820,065,338,006,000 | 35.604167 | 78 | 0.635743 | false |
zyzyis/monetdb
|
clients/python3/monetdb/sql/monetize.py
|
1
|
1844
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright 2008-2015 MonetDB B.V.
"""
functions for converting python objects to monetdb SQL format. If you want
to add support for a specific type you should add a function as a value to
the mapping dict and the datatype as key.
"""
import datetime
import decimal
from monetdb.exceptions import ProgrammingError
def monet_none(data):
"""
returns a NULL string
"""
return "NULL"
def monet_bool(data):
"""
returns "true" or "false"
"""
return ["false", "true"][bool(data)]
def monet_escape(data):
"""
returns an escaped string
"""
data = str(data).replace("\\", "\\\\")
data = data.replace("\'", "\\\'")
return "'%s'" % str(data)
def monet_bytes(data):
"""
converts bytes to string
"""
return monet_escape(data)
mapping = (
(str, monet_escape),
(bytes, monet_bytes),
(int, str),
(complex, str),
(float, str),
(decimal.Decimal, str),
(datetime.datetime, monet_escape),
(datetime.time, monet_escape),
(datetime.date, monet_escape),
(datetime.timedelta, monet_escape),
(bool, monet_bool),
(type(None), monet_none),
)
mapping_dict = dict(mapping)
def convert(data):
"""
Return the appropriate convertion function based upon the python type.
"""
if type(data) in mapping_dict:
return mapping_dict[type(data)](data)
else:
for type_, func in mapping:
if issubclass(type(data), type_):
return func(data)
#if hasattr(data, '__str__'):
# return monet_escape
raise ProgrammingError("type %s not supported as value" % type(data))
|
mpl-2.0
| -8,862,742,702,309,967,000 | 22.05 | 74 | 0.621475 | false |
t-wissmann/qutebrowser
|
qutebrowser/browser/webengine/certificateerror.py
|
2
|
1605
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Wrapper over a QWebEngineCertificateError."""
from PyQt5.QtWebEngineWidgets import QWebEngineCertificateError
from qutebrowser.utils import usertypes, utils, debug
class CertificateErrorWrapper(usertypes.AbstractCertificateErrorWrapper):
"""A wrapper over a QWebEngineCertificateError."""
def __init__(self, error):
super().__init__(error)
self.ignore = False
def __str__(self):
return self._error.errorDescription()
def __repr__(self):
return utils.get_repr(
self, error=debug.qenum_key(QWebEngineCertificateError,
self._error.error()),
string=str(self))
def url(self):
return self._error.url()
def is_overridable(self):
return self._error.isOverridable()
|
gpl-3.0
| -561,993,302,736,005,100 | 32.4375 | 74 | 0.699688 | false |
airtonix/cmsplugin-embedded-pages
|
cmsplugin_embeddedpages/models.py
|
1
|
3826
|
import os
from django.db import models
from cms.models.pluginmodel import CMSPlugin
from .lib.choices import (
DynamicTemplateChoices,
PlaceholdersDynamicChoices,
# PageIDsDynamicChoices,
# PageAttributeDynamicChoices,
)
TEMPLATE_PATH = os.path.join("cmsplugin_embeddedpages","layouts")
GROUP_TEMPLATE_PATH = os.path.join(TEMPLATE_PATH, "groups")
PAGE_TEMPLATE_PATH = os.path.join(TEMPLATE_PATH, "pages")
#class FilterRule(models.Model):
# QUERY_ACTION_CHOICES = (
# ("filter", "Show only"),
# ("exclude", "Hide"),
# )
# OPERATION_CHOICES = (
# ("=", "Equal To"),
# ("_lt =", "Less Than"),
# ("_lte =", "Less than or Equal to"),
# ("_gt =", "Greater than"),
# ("_gte =", "Greater than or Equal to"),
# ("_contains =", "Contains"),
# ("_icontains =", "Contains (case insensitive)"),
# ("_startswith =", "Starts with"),
# ("_istartswith =", "Starts with (case insensitive)"),
# ("_isnull =", "Is Null"),
# ("_in =", "Is in the list"),
# )
# attribute_name = models.CharField("Attribute", max_length=128)
# attribute_operation = models.CharField("Operator", max_length=128,
# choices=OPERATION_CHOICES)
# attribute_value = models.CharField("Value", max_length=128,
# blank=True, null=True)
# query_action = models.CharField("Action", max_length=128,
# choices=QUERY_ACTION_CHOICES)
#class Ruleset(models.Model):
# rule = models.ForeignKey('FilterRule')
# view = models.ForeignKey('Settings')
# description = models.CharField(max_length=128)
class PagePluginSettings(CMSPlugin):
""" Stores options for cmsplugin that shows lists of ProductTypes
"""
group_template = models.CharField(choices=DynamicTemplateChoices(
path=GROUP_TEMPLATE_PATH,
include='.html',
exclude='base'),
max_length=256, blank=True, null=True,
help_text="""Select a template to render this
list. Templates are stored in : {0}""".format(GROUP_TEMPLATE_PATH))
page_template = models.CharField(choices=DynamicTemplateChoices(
path=PAGE_TEMPLATE_PATH,
include='.html',
exclude='base'),
max_length=256, blank=True, null=True,
help_text="""Select a template to render this
list. Templates are stored in : {0}""".format(PAGE_TEMPLATE_PATH))
root = models.ForeignKey("cms.Page",
help_text="""Start including pages at a page which has this ID""")
placeholders = models.CharField(blank=True, null=True,
choices = PlaceholdersDynamicChoices(), max_length=128,
help_text="""Only render content within these placeholders.""")
include_root = models.BooleanField(default=True,
help_text="""Should the root page also be included in the output?
If the root page is also the page where this plugin is being used then
it will never be included. (to prevent recursion)""")
placeholders = models.CharField( choices = PlaceholdersDynamicChoices(),
max_length=128, blank=True, null=True,
help_text="""Only render content within placeholders of these names.""")
depth = models.PositiveIntegerField(default=0,
help_text="""How deep should menu traversal go?""")
# filters = models.ManyToManyField('FilterRule',
# through = Ruleset,
# help_text="""Page attributes to perform filters on.""")
def __unicode__(self):
output = U"[{0}] {1}".format(
self.root.id,
self.root.get_slug(),
)
if self.depth >= 0:
output = U"{0}, Traversing: {1} level".format(output, self.depth)
return output
|
bsd-2-clause
| 7,365,644,589,555,425,000 | 35.438095 | 78 | 0.607946 | false |
dvorberg/movierip
|
movierip/__init__.py
|
1
|
5281
|
#!/usr/bin/env python
# -*- coding: utf-8; mode: python; -*-
## Copyright 2010–17 by Diedrich Vorberg <diedrich@tux4web.de>
##
## All Rights Reserved
##
## For more Information on orm see the README file.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## I have added a copy of the GPL in the file LICENSE
import sys, os, os.path, optparse, lxml.etree, re, subprocess, types, urllib
from string import *
from cStringIO import StringIO
from t4.debug import log, debug
import tmdb3 as tmdb; tmdb.set_key("a888a20b801aeefde1ba41a932898d48")
options = None
def present_movie(idx, movie):
print idx,
if type(movie.releasedate) == types.UnicodeType:
year = movie.releasedate
elif not movie.releasedate:
year = "unknown"
else:
year = str(movie.releasedate.year)
print "%s (%s)" % ( movie.title, year, )
def search_tmdb_for_movie(title):
"""
Run a search query on the tmdb for `title`.
"""
global options
name, year = name_and_year(title)
if year is not None:
result = tmdb.searchMovieWithYear("%s (%i)" % (name, year))
movies = list(result)
if year is not None:
def year_match(movie):
if movie is None:
return False
else:
if not movie.releasedate:
return False
else:
y = movie.releasedate.year
return y == year or y == year - 1 or y == year + 1
movies = filter(year_match, movies)
else:
result = tmdb.searchMovie(name)
movies = list(result)
if len(movies) == 0:
raise Exception("No movie found: %s" % repr(name))
elif len(movies) > 1:
movies.sort(lambda a, b: cmp(str(a.releasedate), str(b.releasedate)))
print
for idx, movie in enumerate(movies):
present_movie(idx, movie)
print
if options is None or options.choose is None:
print "Enter index [0]:",
i = strip(raw_input())
if i == "":
idx = 0
else:
idx = int(i)
else:
idx = int(options.choose)
else:
idx = 0
return movies[idx]
tmdb_id_re = re.compile(r"\d+")
def get_tmdb_movie_for(title):
"""
Title can be either one of:
• The 'Title' (string)
• The 'Title (year)' (string)
• tmdb id (integer)
"""
if type(title) == types.IntType or tmdb_id_re.match(title) is not None:
try:
return tmdb.Movie(int(title))
except KeyError:
raise Exception("Movie with tmdb %i not found." % int(title))
else:
return search_tmdb_for_movie(title)
def info_from_tmdb_movie(movie):
if movie.tagline is None:
movie.tagline = ""
if movie.overview is None:
movie.overview = ""
if strip(movie.tagline):
description = movie.tagline + \
u" — " + movie.overview
else:
description = movie.overview
# Download artwork
if options is None or not options.art:
url = movie.poster.geturl()
rest, artfn = rsplit(url, "/", 1)
artpath = os.path.join("/tmp", artfn)
if not os.path.exists(artpath):
jpeg = urllib.urlopen(url).read()
fp = open(artpath, "w")
fp.write(jpeg)
fp.close()
options.art = artpath
info = { "song": movie.title,
"description": description, }
genres = movie.genres
if len(genres) > 0:
info["genre"] = genres[0].name
if movie.releasedate:
info["year"] = str(movie.releasedate.year)
return info
def name_and_year(filename):
"""
Attempt to parse a filename. Return a tuple a ( name, year, ).
The year may be None, otherwise it's an integer.
"""
filename_re = re.compile(r"(.*?)\.(\d+)\.\d+p.*")
match = filename_re.match(filename)
if match is not None:
name, year = match.groups()
name = replace(name, ".", " ")
return ( name, int(year), )
filename_re = re.compile(r"(.*?)\((\d+)\).*")
match = filename_re.match(filename)
if match is not None:
name, year = match.groups()
return ( strip(name), int(year), )
else:
try:
name, ext = split(filename, ".", 1)
except ValueError:
name = filename
return ( strip(name), None, )
|
gpl-3.0
| 2,584,131,944,249,580,500 | 28.446927 | 77 | 0.564599 | false |
Azure/azure-storage-python
|
azure-storage-file/azure/storage/file/_download_chunking.py
|
1
|
6685
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import threading
def _download_file_chunks(file_service, share_name, directory_name, file_name,
download_size, block_size, progress, start_range, end_range,
stream, max_connections, progress_callback, validate_content,
timeout, operation_context, snapshot):
downloader_class = _ParallelFileChunkDownloader if max_connections > 1 else _SequentialFileChunkDownloader
downloader = downloader_class(
file_service,
share_name,
directory_name,
file_name,
download_size,
block_size,
progress,
start_range,
end_range,
stream,
progress_callback,
validate_content,
timeout,
operation_context,
snapshot,
)
if max_connections > 1:
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(max_connections)
list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets()))
else:
for chunk in downloader.get_chunk_offsets():
downloader.process_chunk(chunk)
class _FileChunkDownloader(object):
def __init__(self, file_service, share_name, directory_name, file_name,
download_size, chunk_size, progress, start_range, end_range,
stream, progress_callback, validate_content, timeout, operation_context, snapshot):
# identifiers for the file
self.file_service = file_service
self.share_name = share_name
self.directory_name = directory_name
self.file_name = file_name
# information on the download range/chunk size
self.chunk_size = chunk_size
self.download_size = download_size
self.start_index = start_range
self.file_end = end_range
# the destination that we will write to
self.stream = stream
# progress related
self.progress_callback = progress_callback
self.progress_total = progress
# parameters for each get file operation
self.validate_content = validate_content
self.timeout = timeout
self.operation_context = operation_context
self.snapshot = snapshot
def get_chunk_offsets(self):
index = self.start_index
while index < self.file_end:
yield index
index += self.chunk_size
def process_chunk(self, chunk_start):
if chunk_start + self.chunk_size > self.file_end:
chunk_end = self.file_end
else:
chunk_end = chunk_start + self.chunk_size
chunk_data = self._download_chunk(chunk_start, chunk_end).content
length = chunk_end - chunk_start
if length > 0:
self._write_to_stream(chunk_data, chunk_start)
self._update_progress(length)
# should be provided by the subclass
def _update_progress(self, length):
pass
# should be provided by the subclass
def _write_to_stream(self, chunk_data, chunk_start):
pass
def _download_chunk(self, chunk_start, chunk_end):
return self.file_service._get_file(
self.share_name,
self.directory_name,
self.file_name,
start_range=chunk_start,
end_range=chunk_end - 1,
validate_content=self.validate_content,
timeout=self.timeout,
_context=self.operation_context,
snapshot=self.snapshot
)
class _ParallelFileChunkDownloader(_FileChunkDownloader):
def __init__(self, file_service, share_name, directory_name, file_name,
download_size, chunk_size, progress, start_range, end_range,
stream, progress_callback, validate_content, timeout, operation_context, snapshot):
super(_ParallelFileChunkDownloader, self).__init__(file_service, share_name, directory_name, file_name,
download_size, chunk_size, progress, start_range, end_range,
stream, progress_callback, validate_content, timeout,
operation_context, snapshot)
# for a parallel download, the stream is always seekable, so we note down the current position
# in order to seek to the right place when out-of-order chunks come in
self.stream_start = stream.tell()
# since parallel operations are going on
# it is essential to protect the writing and progress reporting operations
self.stream_lock = threading.Lock()
self.progress_lock = threading.Lock()
def _update_progress(self, length):
if self.progress_callback is not None:
with self.progress_lock:
self.progress_total += length
total_so_far = self.progress_total
self.progress_callback(total_so_far, self.download_size)
def _write_to_stream(self, chunk_data, chunk_start):
with self.stream_lock:
self.stream.seek(self.stream_start + (chunk_start - self.start_index))
self.stream.write(chunk_data)
class _SequentialFileChunkDownloader(_FileChunkDownloader):
def __init__(self, file_service, share_name, directory_name, file_name, download_size, chunk_size, progress,
start_range, end_range, stream, progress_callback, validate_content, timeout, operation_context,
snapshot):
super(_SequentialFileChunkDownloader, self).__init__(file_service, share_name, directory_name, file_name,
download_size, chunk_size, progress, start_range,
end_range, stream, progress_callback, validate_content,
timeout, operation_context, snapshot)
def _update_progress(self, length):
if self.progress_callback is not None:
self.progress_total += length
self.progress_callback(self.progress_total, self.download_size)
def _write_to_stream(self, chunk_data, chunk_start):
# chunk_start is ignored in the case of sequential download since we cannot seek the destination stream
self.stream.write(chunk_data)
|
mit
| 1,599,597,774,716,400,000 | 41.044025 | 119 | 0.597457 | false |
carljm/django-model-utils
|
docs/conf.py
|
1
|
8114
|
#
# django-model-utils documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 31 22:27:07 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
from pkg_resources import get_distribution
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-model-utils'
copyright = '2015, Carl Meyer'
parent_dir = os.path.dirname(os.path.dirname(__file__))
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
release = get_distribution('django-model-utils').version
# for example take major/minor
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-model-utilsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-model-utils.tex', 'django-model-utils Documentation',
'Carl Meyer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-model-utils', 'django-model-utils Documentation',
['Carl Meyer'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-model-utils', 'django-model-utils Documentation',
'Carl Meyer', 'django-model-utils', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
bsd-3-clause
| 6,096,815,436,245,315,000 | 31.586345 | 80 | 0.707419 | false |
lino-framework/lino
|
lino/core/actions.py
|
1
|
42601
|
# -*- coding: UTF-8 -*-
# Copyright 2009-2021 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""This defines the :class:`Action` class and the :func:`action`
decorator, and some of the standard actions. See :ref:`dev.actions`.
"""
import logging; logger = logging.getLogger(__name__)
from django.utils.translation import gettext_lazy as _
from django.utils.translation import gettext
from django.utils.text import format_lazy
from django.utils.encoding import force_str
from django.conf import settings
from django.db import models
from django.apps import apps ; get_models = apps.get_models
from lino.core import constants
from lino.core import layouts
from lino.core import fields
from lino.core import keyboard
from lino.modlib.users.utils import get_user_profile
from lino.utils.choosers import check_for_chooser
from .permissions import Permittable
from .utils import obj2unicode
from .utils import resolve_model
from .utils import navinfo
from .utils import Parametrizable
from .utils import traverse_ddh_fklist
from .requests import InstanceAction
def discover_choosers():
logger.debug("Discovering choosers for database fields...")
# ~ logger.debug("Instantiate model reports...")
for model in get_models():
# ~ n = 0
allfields = model._meta.fields
for field in allfields:
check_for_chooser(model, field)
# ~ logger.debug("Discovered %d choosers in model %s.",n,model)
def install_layout(cls, k, layout_class, **options):
"""
- `cls` is the actor (a class object)
- `k` is one of 'detail_layout', 'insert_layout', 'params_layout', 'card_layout'
- `layout_class`
"""
# if str(cls) == 'courses.Pupils':
# print("20160329 install_layout", k, layout_class)
dl = cls.__dict__.get(k, None)
if dl is None: # and not cls._class_init_done:
dl = getattr(cls, k, None)
if dl is None:
return
if isinstance(dl, str):
if '\n' in dl or not '.' in dl:
setattr(cls, k, layout_class(dl, cls, **options))
else:
layout_class = settings.SITE.models.resolve(dl)
if layout_class is None:
raise Exception("Unresolved {} {!r} for {}".format(k, dl, cls))
setattr(cls, k, layout_class(None, cls, **options))
elif isinstance(dl, layouts.Panel):
options.update(dl.options)
setattr(cls, k, layout_class(dl.desc, cls, **options))
else:
if not isinstance(dl, layout_class):
if not isinstance(cls, type):
# cls is an action instance
cls = cls.__class__
msg = "{}.{}.{} must be a string, " \
"a Panel or an instance of {} (not {!r})"
raise Exception(msg.format(
cls.__module__, cls.__name__, k, layout_class.__name__, dl))
if dl._datasource is None:
dl.set_datasource(cls)
setattr(cls, k, dl)
elif not issubclass(cls, dl._datasource):
raise Exception(
"Cannot reuse %s instance (%s of %r) for %r" %
(dl.__class__, k, dl._datasource, cls))
def register_params(cls):
"""`cls` is either an actor (a class object) or an action (an
instance).
"""
if cls.parameters is not None:
for k, v in cls.parameters.items():
v.set_attributes_from_name(k)
v.table = cls
# v.model = cls # 20181023 experimentally
if cls.params_layout is None:
cls.params_layout = cls._layout_class.join_str.join(
cls.parameters.keys())
install_layout(cls, 'params_layout', cls._layout_class)
# e.g. ledger.ByJournal is just a mixin but provides a default value for its children
elif cls.params_layout is not None:
raise Exception(
"{} has a params_layout but no parameters".format(
cls))
# if isinstance(cls, type) and cls.__name__.endswith("Users"):
# # if isinstance(cls, type) and cls.model is not None and cls.model.__name__ == "User":
# # if str(cls.model) != "users.User":
# # raise Exception("{} {}".format(cls, cls.model))
# print("20200825 {}.register_params {} {}".format(
# cls, cls.parameters, cls.params_layout))
def setup_params_choosers(self):
if self.parameters:
for k, fld in self.parameters.items():
if isinstance(fld, models.ForeignKey):
msg = "Invalid target %s in parameter {} of {}".format(
k, self)
fld.remote_field.model = resolve_model(fld.remote_field.model, strict=msg)
fields.set_default_verbose_name(fld)
check_for_chooser(self, fld)
def make_params_layout_handle(self):
# `self` is either an Action instance or an Actor class object
return self.params_layout.get_layout_handle(
settings.SITE.kernel.default_ui)
class Action(Parametrizable, Permittable):
"""
Abstract base class for all actions.
The first argument is the optional `label`, other arguments should
be specified as keywords and can be any of the existing class
attributes.
"""
# ~ __metaclass__ = ActionMetaClass
_layout_class = layouts.ActionParamsLayout
label = None
button_text = None
button_color = None
"""
The olor to be used on icon-less buttons for this action
(i.e. which have no :attr:`icon_name`). See also
:attr:`lino.core.site.Site.use_silk_icons`.
Not yet implemented. This is currently being ignored.
"""
debug_permissions = False
save_action_name = None
disable_primary_key = True
"""
Whether primary key fields should be disabled when using this
action. This is `True` for all actions except :class:`ShowInsert`.
"""
keep_user_values = False
"""
Whether the parameter window should keep its values between
different calls. If this is True, Lino does not fill any default
values and leaves those from a previous call.
Deprecated because it (1) is not used on any production site, (2) has a
least two side effect: the fields *never* get a default value, even not on
first execution, and you cannot explicitly specify programmatic field
values. And (3) we actually wouldn't want to specify this per action but per
field.
"""
icon_name = None
"""
The class name of an icon to be used for this action when
rendered as toolbar button. Allowed icon names are defined in
:data:`lino.core.constants.ICON_NAMES`.
"""
ui5_icon_name = None
react_icon_name = None
hidden_elements = frozenset()
combo_group = None
"""
The name of another action to which to "attach" this action.
Both actions will then be rendered as a single combobutton.
"""
parameters = None
use_param_panel = False
"""
Used internally. This is True for window actions whose window use
the parameter panel: grid and emptytable (but not showdetail)
"""
no_params_window = False
"""
Set this to `True` if your action has :attr:`parameters` but you
do *not* want it to open a window where the user can edit these
parameters before calling the action.
Setting this attribute to `True` means that the calling code must
explicitly set all parameter values. Usage example are the
:attr:`lino_xl.lib.polls.models.AnswersByResponse.answer_buttons`
and :attr:`lino_xl.lib-tickets.Ticket.quick_assign_to`
virtual fields.
"""
sort_index = 90
"""
Determines the sort order in which the actions will be presented
to the user.
List actions are negative and come first.
Predefined `sort_index` values are:
===== =================================
value action
===== =================================
-1 :class:`as_pdf <lino_xl.lib.appypod.PrintTableAction>`
10 :class:`ShowInsert`
11 :attr:`duplicate <lino.mixins.duplicable.Duplicable.duplicate>`
20 :class:`detail <ShowDetail>`
30 :class:`delete <DeleteSelected>`
31 :class:`merge <lino.core.merge.MergeAction>`
50 :class:`Print <lino.mixins.printable.BasePrintAction>`
51 :class:`Clear Cache <lino.mixins.printable.ClearCacheAction>`
52 :attr:`lino.modlib.users.UserPlan.start_plan`
53 :attr:`lino.modlib.users.UserPlan.update_plan`
60 :class:`ShowSlaveTable`
90 default for all custom row actions
100 :class:`SubmitDetail`
200 default for all workflow actions (:class:`ChangeStateAction <lino.core.workflows.ChangeStateAction>`)
===== =================================
"""
help_text = None
submit_form_data = False
"""
Should the running of the action include all known form values in
the request.
"""
auto_save = True
"""
What to do when this action is being called while the user is on a
dirty record.
- `False` means: forget any changes in current record and run the
action.
- `True` means: save any changes in current record before running
the action. `None` means: ask the user.
"""
extjs_main_panel = None
"""
Used by :mod:`lino_xl.lib.extensible` and
:mod:`lino.modlib.awesome_uploader`.
Example::
class CalendarAction(dd.Action):
extjs_main_panel = "Lino.CalendarApp().get_main_panel()"
...
"""
js_handler = None
"""
This is usually `None`. Otherwise it is the name of a Javascript
callable to be called without arguments. That callable must have
been defined in a :attr:`lino.core.plugin.Plugin.site_js_snippets`
of the plugin.
"""
action_name = None
"""
Internally used to store the name of this action within the
defining Actor's namespace.
"""
defining_actor = None
"""
The :class:`lino.core.actors.Actor` who uses this action for the
first time. This is set during :meth:`attach_to_actor`. This is
used internally e.g. by :mod:`lino.modlib.extjs` when generating
JavaScript code for certain actions.
"""
parameters = None
"""
See :attr:`lino.core.utils.Parametrizable.parameters`.
"""
key = None
"""
Not used. The keyboard hotkey to associate to this action in a
user interface.
"""
default_format = 'html'
"""
Used internally.
"""
editable = True
"""
Whether the parameter fields should be editable.
Setting this to False seems nonsense.
"""
readonly = True
opens_a_window = False
# hide_top_toolbar = False
# """
# Used internally if :attr:`opens_a_window` to say whether the
# window has a top toolbar.
#
# """
# hide_navigator = False
# """
# Used internally if :attr:`opens_a_window` to say whether the
# window has a navigator.
#
# """
show_in_plain = False
"""
Whether this action should be displayed as a button in the toolbar
of a plain html view.
"""
show_in_bbar = True
"""
Whether this action should be displayed in the toolbar.
TODO: rename this to `show_in_toolbar`.
In ExtJS this will also cause it to be in the context menu of a grid.
For example the :class:`CheckinVisitor
<lino_xl.lib.reception.CheckinVisitor>`,
:class:`ReceiveVisitor
<lino_xl.lib.reception.ReceiveVisitor>` and
:class:`CheckoutVisitor
<lino_xl.lib.reception.CheckoutVisitor>` actions have this
attribute explicitly set to `False` because otherwise they would be
visible in the toolbar.
"""
show_in_workflow = False
"""
Whether this action should be displayed in the
:attr:`workflow_buttons <lino.core.model.Model.workflow_buttons>`
column. If this is True, then Lino will automatically set
:attr:`custom_handler` to True.
"""
custom_handler = False
"""
Whether this action is implemented as Javascript function call.
This is necessary if you want your action to be callable using an
"action link" (html button).
"""
select_rows = True
"""
True if this action needs an object to act on.
Set this to `False` if this action is a list action, not a row
action.
"""
http_method = 'GET'
"""
HTTP method to use when this action is called using an AJAX call.
"""
preprocessor = 'null' # None
"""
Name of a Javascript function to be invoked on the web client when
this action is called.
"""
window_type = None
"""
On actions that opens_a_window this must be a unique one-letter
string expressing the window type.
Allowed values are:
- None : opens_a_window is False
- 't' : ShowTable
- 'd' : ShowDetail
- 'i' : ShowInsert
This can be used e.g. by a summary view to decide how to present the
summary data (usage example
:meth:`lino.modlib.uploads.AreaUploads.get_table_summary`).
"""
callable_from = "td"
"""
A string that specifies from which :attr:`window_type` this action
is callable. None means that it is only callable from code.
Default value is 'td' which means from both table and detail
(including ShowEmptyTable which is subclass of ShowDetail). But
not callable from ShowInsert.
"""
hide_virtual_fields = False
required_states = None
def __init__(self, label=None, **kwargs):
if label is not None:
self.label = label
# if self.parameters is not None and self.select_rows:
# self.show_in_bbar = False
# # see ticket #105
for k, v in kwargs.items():
if not hasattr(self, k):
raise Exception("Invalid action keyword %s" % k)
setattr(self, k, v)
if self.show_in_workflow:
self.custom_handler = True
if self.icon_name:
if self.icon_name not in constants.ICON_NAMES:
raise Exception(
"Unkonwn icon_name '{0}'".format(self.icon_name))
register_params(self)
def __get__(self, instance, owner):
"""
When a model has an action "foo", then getting an attribute
"foo" of a model instance will return an :class:`InstanceAction`.
"""
if instance is None:
return self
return InstanceAction(
self, instance.get_default_table(), instance, owner)
def get_django_form(self):
"""returns a django form object based on the params of this action"""
from django import forms
mapping = {
"PasswordField":"CharField"
}
class LinoForm(forms.Form):
pass
for name,field in self.parameters.items():
setattr(LinoForm, name, getattr(forms, mapping.get(field.__class__.__name__,field.__class__.__name__))())
return LinoForm
@classmethod
def decorate(cls, *args, **kw):
"""
Return a decorator which turns an instance method on a model or a
class method on an actor into an action of this class.
The decorated method will be installed as the actions's
:meth:`run_from_ui <Action.run_from_ui>` method.
All arguments are forwarded to :meth:`Action.__init__`.
"""
def decorator(fn):
assert not 'required' in kw
# print 20140422, fn.__name__
kw.setdefault('custom_handler', True)
a = cls(*args, **kw)
def wrapped(ar):
obj = ar.selected_rows[0] if ar.selected_rows else ar.actor.model
return fn(obj, ar)
a.run_from_ui = wrapped
return a
return decorator
def get_required_roles(self, actor):
return actor.required_roles
def is_callable_from(self, caller):
"""
Return `True` if this action makes sense as a button from within
the specified `caller` (an action instance which must have a
:attr:`window_type`). Do not override this method on your
subclass ; rather specify :attr:`callable_from`.
"""
assert caller.window_type is not None
if self.callable_from is None:
return False
return caller.window_type in self.callable_from
# return isinstance(caller, self.callable_from)
def is_window_action(self):
"""Return `True` if this is a "window action" (i.e. which opens a GUI
window on the client before executin).
"""
return self.opens_a_window or (
self.parameters and not self.no_params_window)
def get_status(self, ar, **kw):
if self.parameters is not None:
if self.keep_user_values:
kw.update(field_values={})
else:
defaults = kw.get('field_values', {})
pv = self.params_layout.params_store.pv2dict(
ar, ar.action_param_values, **defaults)
kw.update(field_values=pv)
return kw
def get_chooser_for_field(self, fieldname):
d = getattr(self, '_choosers_dict', {})
return d.get(fieldname, None)
def get_choices_text(self, obj, request, field):
return obj.get_choices_text(request, self, field)
def make_params_layout_handle(self):
return make_params_layout_handle(self)
def get_data_elem(self, name):
# same as in Actor but here it is an instance method
return self.defining_actor.get_data_elem(name)
def get_param_elem(self, name):
# same as in Actor but here it is an instance method
if self.parameters:
return self.parameters.get(name, None)
return None
def get_widget_options(self, name, **options):
# same as in Actor but here it is an instance method
return options
def get_label(self):
"""
Return the `label` of this action, or the `action_name` if the
action has no explicit label.
"""
return self.label or self.action_name
def get_button_label(self, actor):
if actor is None or actor.default_action is None:
return self.label
if self is actor.default_action.action:
return actor.label
# return actor.get_actor_label() # 20200307
else:
return self.button_text or self.label
# since 20140923 return u"%s %s" % (self.label, actor.label)
def full_name(self, actor):
if self.action_name is None:
raise Exception("Tried to full_name() on %r" % self)
# ~ return repr(self)
if self.parameters and not self.no_params_window:
return self.defining_actor.actor_id + '.' + self.action_name
return str(actor) + '.' + self.action_name
def get_action_title(self, ar):
return ar.get_title()
def __repr__(self):
if self.label is None:
name = self.action_name
else:
label_repr = repr(str(self.label))
name = "{} ({})".format(self.action_name, label_repr)
# if self.button_text:
# name = repr(str(self.button_text)) + " " + name
return "<{}.{} {}>".format(
self.__class__.__module__,
self.__class__.__name__,
name)
def __str__(self):
# return force_str(self.label)
# return str(self.get_label())
return str(self.get_label())
def unused__str__(self):
raise Exception("20121003 Must use full_name(actor)")
if self.defining_actor is None:
return repr(self)
if self.action_name is None:
return repr(self)
return str(self.defining_actor) + ':' + self.action_name
# ~ def set_permissions(self,*args,**kw)
# ~ self.permission = perms.factory(*args,**kw)
def attach_to_workflow(self, wf, name):
if self.action_name is not None:
assert self.action_name == name
self.action_name = name
self.defining_actor = wf
setup_params_choosers(self)
def attach_to_actor(self, owner, name):
"""
Called once per actor and per action on startup before a
:class:`BoundAction` instance is created. If this returns
False, then the action won't be attached to the given actor.
The owner is the actor which "defines" the action, i.e. uses
that instance for the first time. Subclasses of the owner may
re-use the same instance without becoming the owner.
"""
# if not actor.editable and not self.readonly:
# return False
if self.defining_actor is not None:
# already defined by another actor
return True
self.defining_actor = owner
# if self.label is None:
# self.label = name
if self.action_name is not None:
return True
# if name == self.action_name:
# return True
# raise Exception(
# "tried to attach named action %s.%s as %s" %
# (actor, self.action_name, name))
self.action_name = name
setup_params_choosers(self)
# setup_params_choosers(self.__class__)
return True
def get_action_permission(self, ar, obj, state):
"""Return (True or False) whether the given :class:`ActionRequest
<lino.core.requests.BaseRequest>` `ar` should get permission
to run on the given Model instance `obj` (which is in the
given `state`).
Derived Action classes may override this to add vetos.
E.g. the MoveUp action of a Sequenced is not available on the
first row of given `ar`.
This should be used only for light-weight tests. If this
requires a database lookup, consider disabling the action in
:meth:`disabled_fields
<lino.core.model.Model.disabled_fields>` where you can disable
multiple actions and fields at once.
"""
return True
def get_view_permission(self, user_type):
"""
Return True if this action is visible for users of given user_type.
"""
return True
def run_from_ui(self, ar, **kwargs):
"""
Execute the action. `ar` is an :class:`ActionRequest
<lino.core.requests.BaseRequest>` object representing the
context in which the action is running.
"""
raise NotImplementedError(
"%s has no run_from_ui() method" % self.__class__)
def run_from_code(self, ar=None, *args, **kwargs):
"""
Probably to be deprecated.
Execute the action. The default calls :meth:`run_from_ui`. You
may override this to define special behaviour
"""
self.run_from_ui(ar, *args, **kwargs)
def run_from_session(self, ses, *args, **kw): # 20130820
if len(args):
obj = args[0]
else:
obj = None
ia = InstanceAction(self, self.defining_actor, obj, None)
return ia.run_from_session(ses, **kw)
def action_param_defaults(self, ar, obj, **kw):
"""Same as :meth:`lino.core.actors.Actor.param_defaults`, except that
on an action it is a instance method.
Note that this method is not called for actions which are rendered
in a toolbar (:ticket:`1336`).
Usage examples:
:class:`lino.modlib.users.actions.SendWelcomeMail`
"""
for k, pf in list(self.parameters.items()):
# print 20151203, pf.name, repr(pf.rel.to)
kw[k] = pf.get_default()
return kw
def setup_action_request(self, actor, ar):
pass
def get_layout_aliases(self):
"""
Yield a series of (ALIAS, repl) tuples that cause a name ALIAS in a
layout based on this action to be replaced by its replacement `repl`.
"""
return []
class TableAction(Action):
def get_action_title(self, ar):
return ar.get_title()
# class RedirectAction(Action):
# def get_target_url(self, elem):
# raise NotImplementedError
class ShowTable(TableAction):
use_param_panel = True
show_in_workflow = False
opens_a_window = True
window_type = 't'
action_name = 'grid'
select_rows = False
callable_from = None
def get_label(self):
return self.label or self.defining_actor.label
def get_window_layout(self, actor):
# ~ return self.actor.list_layout
return None
def get_window_size(self, actor):
return actor.window_size
class ShowDetail(Action):
help_text = _("Open a detail window on this record.")
action_name = 'detail'
label = _("Detail")
icon_name = 'application_form'
ui5_icon_name = "sap-icon://detail-view"
opens_a_window = True
window_type = 'd'
show_in_workflow = False
save_action_name = 'submit_detail'
callable_from = 't'
sort_index = 20
def __init__(self, dl, label=None, **kwargs):
self.owner = dl
super(ShowDetail, self).__init__(label, **kwargs)
def get_required_roles(self, actor):
if self.owner.required_roles is None:
return actor.required_roles
return self.owner.required_roles
def get_window_layout(self, actor):
return actor.detail_layout
def get_window_size(self, actor):
wl = self.get_window_layout(actor)
return wl.window_size
class ShowEmptyTable(ShowDetail):
"""
The default action for :class:`lino.utils.report.EmptyTable`.
"""
use_param_panel = True
action_name = 'show'
default_format = 'html'
icon_name = None
callable_from = 't'
# def attach_to_actor(self, actor, name):
# self.label = actor.label
# return super(ShowEmptyTable, self).attach_to_actor(actor, name)
def get_label(self):
return self.label or self.defining_actor.label
def as_bootstrap_html(self, ar):
return super(ShowEmptyTable, self).as_bootstrap_html(ar, '-99998')
class ShowInsert(TableAction):
save_action_name = 'submit_insert'
show_in_plain = True
disable_primary_key = False
label = _("New")
if True: # settings.SITE.use_silk_icons:
icon_name = 'add' # if action rendered as toolbar button
else:
# button_text = u"❏" # 274F Lower right drop-shadowed white square
# button_text = u"⊞" # 229e SQUARED PLUS
button_text = "⊕" # 2295 circled plus
ui5_icon_name = "sap-icon://add"
help_text = _("Insert a new record")
show_in_workflow = False
opens_a_window = True
window_type = 'i'
# hide_navigator = True
sort_index = 10
# hide_top_toolbar = True
# required_roles = set([SiteUser])
action_name = 'insert'
key = keyboard.INSERT # (ctrl=True)
hide_virtual_fields = True
# readonly = False
select_rows = False
http_method = "POST"
def attach_to_actor(self, owner, name):
if owner.model is not None:
self.help_text = format_lazy(
_("Open a dialog window to insert a new {}."), owner.model._meta.verbose_name)
return super(ShowInsert, self).attach_to_actor(owner, name)
def get_action_title(self, ar):
# return _("Insert into %s") % force_str(ar.get_title())
if ar.actor.model is None:
return _("Insert into %s") % force_str(ar.get_title())
return format_lazy(_("New {}"), ar.actor.model._meta.verbose_name)
def get_window_layout(self, actor):
return actor.insert_layout or actor.detail_layout
def get_window_size(self, actor):
wl = self.get_window_layout(actor)
return wl.window_size
def get_view_permission(self, user_type):
# the action is readonly because it doesn't write to the
# current object, but since it does modify the database we
# want to hide it for readonly users.
if user_type and user_type.readonly:
return False
return super(ShowInsert, self).get_view_permission(user_type)
def create_instance(self, ar):
"""
Create a temporary instance that will not be saved, used only to
build the button.
"""
return ar.create_instance()
def get_status(self, ar, **kw):
kw = super(ShowInsert, self).get_status(ar, **kw)
if 'record_id' in kw:
return kw
if 'data_record' in kw:
return kw
# raise Exception("20150218 %s" % self)
elem = self.create_instance(ar)
rec = ar.elem2rec_insert(ar.ah, elem)
kw.update(data_record=rec)
return kw
# class UpdateRowAction(Action):
# show_in_workflow = False
# readonly = False
# # required_roles = set([SiteUser])
# this is a first attempt to solve the "cannot use active fields in
# insert window" problem. not yet ready for use. the idea is that
# active fields should not send a real "save" request (either POST or
# PUT) in the background but a "validate_form" request which creates a
# dummy instance from form content, calls it's full_clean() method to
# have other fields filled in, and then return the modified form
# content. Fails because the Record.phantom in ExtJS then still gets
# lost.
class ValidateForm(Action):
# called by active_fields
show_in_workflow = False
action_name = 'validate'
readonly = False
auto_save = False
callable_from = None
def run_from_ui(self, ar, **kwargs):
elem = ar.create_instance_from_request(**kwargs)
ar.ah.store.form2obj(ar, ar.rqdata, elem, False)
elem.full_clean()
ar.success()
# ar.set_response(rows=[ar.ah.store.row2list(ar, elem)])
ar.goto_instance(elem)
class SaveGridCell(Action):
"""
Called when user edited a cell of a non-phantom record in a grid.
Installed as `update_action` on every :class:`Actor`.
"""
sort_index = 10
show_in_workflow = False
action_name = 'grid_put'
http_method = "PUT"
readonly = False
auto_save = False
callable_from = None
def run_from_ui(self, ar, **kw):
# logger.info("20140423 SubmitDetail")
elem = ar.selected_rows[0]
elem.save_existing_instance(ar)
ar.set_response(rows=[ar.ah.store.row2list(ar, elem)])
# We also need *either* `rows` (when this was called from a
# Grid) *or* `goto_instance` (when this was called from a
# form).
class SubmitDetail(SaveGridCell):
"""Save changes in the detail form.
This is rendered as the "Save" button of a :term:`detail window`.
Installed as `submit_detail` on every actor.
"""
sort_index = 100
icon_name = 'disk'
help_text = _("Save changes in this form")
label = _("Save")
action_name = ShowDetail.save_action_name
submit_form_data = True
callable_from = 'd'
def run_from_ui(self, ar, **kw):
# logger.info("20210213a SubmitDetail")
for elem in ar.selected_rows:
# logger.info("20210213b SubmitDetail %s", elem)
elem.save_existing_instance(ar)
if not settings.SITE.is_installed("react"):
# No point in clos
if ar.actor.stay_in_grid:
ar.close_window()
else:
ar.goto_instance(elem)
class CreateRow(Action):
"""
Called when user edited a cell of a phantom record in a grid.
"""
sort_index = 10
auto_save = False
show_in_workflow = False
readonly = False
callable_from = None
http_method = "POST"
# select_rows = False
# submit_form_data = True
def run_from_ui(self, ar, **kwargs):
elem = ar.create_instance_from_request(**kwargs)
self.save_new_instance(ar, elem)
def save_new_instance(self, ar, elem):
elem.save_new_instance(ar)
ar.success(_("%s has been created.") % obj2unicode(elem))
# print(19062017, "Ticket 1910")
if ar.actor.handle_uploaded_files is None:
# The `rows` can contain complex strings which cause
# decoding problems on the client when responding to a
# file upload
ar.set_response(rows=[ar.ah.store.row2list(ar, elem)])
ar.set_response(navinfo=navinfo(ar.data_iterator, elem))
# if ar.actor.stay_in_grid and ar.requesting_panel:
if ar.actor.stay_in_grid:
# do not open a detail window on the new instance
return
ar.goto_instance(elem)
# No need to ask refresh_all since closing the window will
# automatically refresh the underlying window.
def save_new_instances(self, ar, elems):
"""Currently only used for file uploads."""
for e in elems:
e.save_new_instance(ar)
ar.success(
_("%s files have been uploaded: %s") % (len(elems), "\n".join([obj2unicode(elem) for elem in elems])))
# print(19062017, "Ticket 1910")
if ar.actor.handle_uploaded_files is None:
ar.set_response(rows=[ar.ah.store.row2list(ar, elem[0])])
ar.set_response(navinfo=navinfo(ar.data_iterator, elem[0]))
else:
# Must set text/html for file uploads, otherwise the
# browser adds a <PRE></PRE> tag around the AJAX response.
ar.set_content_type('text/html')
# if ar.actor.stay_in_grid and ar.requesting_panel:
if ar.actor.stay_in_grid:
# do not open a detail window on the new instance
return
ar.goto_instance(elem[0])
# No need to ask refresh_all since closing the window will
# automatically refresh the underlying window.
class SubmitInsert(CreateRow):
label = _("Create")
action_name = None # 'post'
help_text = _("Create the record and open a detail window on it")
http_method = "POST"
callable_from = 'i'
def run_from_ui(self, ar, **kwargs):
# must set requesting_panel to None, otherwise javascript
# button actions would try to refer the requesting panel which
# is going to be closed (this disturbs at least in ticket
# #219)
ar.requesting_panel = None
if ar.actor.handle_uploaded_files is not None:
# Must set text/html for file uploads, otherwise the
# browser adds a <PRE></PRE> tag around the AJAX response.
# 20210217 And this is true also in case of a ValidationError
ar.set_content_type('text/html')
# print("20201230 SubmitInsert.run_from_ui", ar)
if ar.actor.handle_uploaded_files is not None and len(ar.request.FILES.getlist("file")) > 1:
# Multiple uploads possible, note plural method names.
elems = ar.create_instances_from_request(**kwargs)
self.save_new_instances(ar, elems)
ar.set_response(close_window=True)
else:
elem = ar.create_instance_from_request(**kwargs)
self.save_new_instance(ar, elem)
ar.set_response(close_window=True)
# if settings.SITE.is_installed("react"):
# ar.goto_instance(elem)
# ar.set_response(
# eval_js=ar.renderer.obj2url(ar, elem).replace('javascript:', '', 1)
# )
# class SubmitInsertAndStay(SubmitInsert):
# sort_index = 11
# switch_to_detail = False
# action_name = 'poststay'
# label = _("Create without detail")
# help_text = _("Don't open a detail window on the new record")
class ExplicitRefresh(Action): # experimental 20170929
label = _("Go")
show_in_bbar = False
# js_handler = 'function(panel) {panel.refresh()}'
js_handler = 'function(btn, evt) {console.log("20170928", this); this.refresh()}'
# def run_from_ui(self, ar, **kw):
# ar.set_response(refresh_all=True)
class ShowSlaveTable(Action):
"""
An action which opens a window showing another table (to be
specified when instantiating the action).
"""
TABLE2ACTION_ATTRS = ('help_text', 'icon_name', 'react_icon_name', 'label',
'sort_index', 'required_roles', 'button_text')
show_in_bbar = True
def __init__(self, slave_table, **kw):
self.slave_table = slave_table
self.explicit_attribs = set(kw.keys())
super(ShowSlaveTable, self).__init__(**kw)
@classmethod
def get_actor_label(self):
return self.get_label() or self.slave_table.label
def attach_to_actor(self, actor, name):
if isinstance(self.slave_table, str):
T = settings.SITE.models.resolve(self.slave_table)
if T is None:
msg = "Invalid action {} on actor {!r}: " \
"no table named {}".format(
name, actor, self.slave_table)
raise Exception(msg)
self.slave_table = T
for k in self.TABLE2ACTION_ATTRS:
if k not in self.explicit_attribs:
attr = getattr(self.slave_table, k, None)
setattr(self, k, attr)
return super(ShowSlaveTable, self).attach_to_actor(actor, name)
def run_from_ui(self, ar, **kw):
obj = ar.selected_rows[0]
sar = ar.spawn(self.slave_table, master_instance=obj)
js = ar.renderer.request_handler(sar)
ar.set_response(eval_js=js)
class MultipleRowAction(Action):
"""Base class for actions that update something on every selected row.
"""
custom_handler = True
def run_on_row(self, obj, ar):
"""This is being called on every selected row.
"""
raise NotImplementedError()
def run_from_ui(self, ar, **kw):
ar.success(**kw)
n = 0
for obj in ar.selected_rows:
if not ar.response.get('success'):
ar.info("Aborting remaining rows")
break
ar.info("%s for %s...", str(self.label), str(obj))
n += self.run_on_row(obj, ar)
ar.set_response(refresh_all=True)
msg = _("%d row(s) have been updated.") % n
ar.info(msg)
# ~ ar.success(msg,**kw)
class DeleteSelected(MultipleRowAction):
"""Delete the selected row(s).
This action is automatically installed on every editable actor.
"""
action_name = 'delete_selected' # because...
if True: # settings.SITE.use_silk_icons:
icon_name = 'delete'
else:
button_text = "⊖" # 2296 CIRCLED MINUS
# button_text = u"⊟" # 229F SQUARED MINUS
ui5_icon_name = 'sap-icon://less'
help_text = _("Delete this record")
auto_save = False
sort_index = 30
readonly = False
show_in_workflow = False
# required_roles = set([SiteUser])
# ~ callable_from = (ShowTable,ShowDetail)
# ~ needs_selection = True
label = _("Delete")
# ~ url_action_name = 'delete'
key = keyboard.DELETE # (ctrl=True)
# ~ client_side = True
def run_from_ui(self, ar, **kw):
objects = []
for obj in ar.selected_rows:
objects.append(str(obj))
msg = ar.actor.disable_delete(obj, ar)
if msg is not None:
ar.error(None, msg, alert=True)
return
# build a list of volatile related objects that will be deleted together
# with this one
cascaded_objects = {}
kernel = settings.SITE.kernel
for obj in ar.selected_rows:
# print(20201229, "selected:", obj)
for m, fk in traverse_ddh_fklist(obj.__class__):
if fk.name in m.allow_cascaded_delete:
qs = m.objects.filter(**{fk.name:obj})
n = qs.count()
if n:
# print(20201229, n, fk, m, qs)
if m in cascaded_objects:
cascaded_objects[m] += n
else:
cascaded_objects[m] = n
# print "20141208 generic related objects for %s:" % obj
for gfk, fk_field, qs in kernel.get_generic_related(obj):
if gfk.name in qs.model.allow_cascaded_delete:
n = qs.count()
if n:
cascaded_objects[qs.model] = n
def ok(ar2):
super(DeleteSelected, self).run_from_ui(ar, **kw)
# refresh_all must be True e.g. for when user deletes an item of a
# bank statement
ar2.success(record_deleted=True, refresh_all=True)
# hack required for extjs:
if ar2.actor.detail_action:
ar2.set_response(
detail_handler_name=ar2.actor.detail_action.full_name())
d = dict(num=len(objects), targets=', '.join(objects))
if len(objects) == 1:
d.update(type=ar.actor.model._meta.verbose_name)
else:
d.update(type=ar.actor.model._meta.verbose_name_plural)
if len(objects) > 10:
objects = objects[:9] + ["..."]
msg = gettext("You are about to delete %(num)d %(type)s\n(%(targets)s)") % d
if len(cascaded_objects):
lst = ["{} {}".format(n, m._meta.verbose_name if n == 1 else m._meta.verbose_name_plural)
for m, n in cascaded_objects.items()]
msg += "\n" + gettext("as well as all related volatile records ({})").format(
", ".join(lst))
ar.confirm(ok, "{}. {}".format(msg, gettext("Are you sure?")),
uid="deleting %(num)d %(type)s pks=" % d + "".join([str(t.pk) for t in ar.selected_rows]))
def run_on_row(self, obj, ar):
obj.delete_instance(ar)
return 1
action = Action.decorate
def get_view_permission(e):
if isinstance(e, Permittable) and not e.get_view_permission(
get_user_profile()):
return False
# e.g. pcsw.ClientDetail has a tab "Other", visible only to system
# admins but the "Other" contains a GridElement RolesByPerson
# which is not per se reserved for system admins. js of normal
# users should not try to call on_master_changed() on it
parent = e.parent
while parent is not None:
if isinstance(parent, Permittable) and not parent.get_view_permission(
get_user_profile()):
return False # bug 3 (bcss_summary) blog/2012/0927
parent = parent.parent
return True
|
bsd-2-clause
| -562,974,953,244,720,830 | 31.863426 | 117 | 0.600808 | false |
MikeLx/open-source-search-engine
|
script/inject/inject.py
|
1
|
1956
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
import json
import re
import subprocess
import multiprocessing
#Generate environment with:
#pex -r requests -r multiprocessing -e inject:main -o warc-inject -s '.' --no-wheel
#pex -r requests -r multiprocessing -o warc-inject
def injectItem(item):
metadata = subprocess.Popen(['./ia','metadata', item], stdout=subprocess.PIPE).communicate()[0]
#print 'item metadata is ', metadata, 'item is ', item
md = json.loads(metadata)
for ff in md['files']:
if not ff['name'].endswith('arc.gz'): continue
itemMetadata = {'mtime':ff['mtime']}
itemMetadata.update(md['metadata'])
postVars = {'url':'http://archive.org/download/%s/%s' %(item,ff['name']),
'metadata':json.dumps(itemMetadata),
'c':'ait'}
print "sending", postVars,' to gb'
rp = requests.post("http://localhost:8000/admin/inject", postVars)
print postVars['url'], rp.status_code:
def getPage(page):
#r = requests.get('https://archive.org/advancedsearch.php?q=collection%3Aarchiveitdigitalcollection&fl%5B%5D=identifier&rows=1&page={0}&output=json&save=yes'.format(page))
r = requests.get('https://archive.org/advancedsearch.php?q=collection%3Aarchiveitdigitalcollection&fl%5B%5D=identifier&rows=1000&page={0}&output=json&save=yes'.format(page))
if r.status_code != 200:
return 0
contents = r.content
jsonContents = json.loads(contents)
items = [x['identifier'] for x in jsonContents['response']['docs']]
if len(items) == 0:
return 0
print 'loading %s items, %s - %s' % (len(items), items[0], items[-1])
for item in items:
injectItem(item)
return len(items)
def main():
#getPage(4)
from multiprocessing.pool import ThreadPool
pool = ThreadPool(processes=5)
print pool.map(getPage, xrange(1,1200))
if __name__ == '__main__':
main()
|
apache-2.0
| 7,445,202,696,927,137,000 | 30.548387 | 177 | 0.642127 | false |
kdheepak89/pelican-alias
|
pelican_alias.py
|
1
|
1886
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os.path
import logging
from pelican import signals
logger = logging.getLogger(__name__)
class AliasGenerator(object):
TEMPLATE = """<!DOCTYPE html><html><head><meta charset="utf-8" />
<meta http-equiv="refresh" content="0;url=/{destination_path}" />
</head></html>"""
def __init__(self, context, settings, path, theme, output_path, *args):
self.output_path = output_path
self.context = context
self.alias_delimiter = settings.get('ALIAS_DELIMITER', ',')
def create_alias(self, page, alias):
# If path starts with a /, remove it
if alias[0] == '/':
relative_alias = alias[1:]
else:
relative_alias = alias
path = os.path.join(self.output_path, relative_alias)
directory, filename = os.path.split(path)
try:
os.makedirs(directory)
except OSError:
pass
if filename == '':
path = os.path.join(path, 'index.html')
logger.info('[alias] Writing to alias file %s' % path)
with open(path, 'w') as fd:
fd.write(self.TEMPLATE.format(destination_path=page.url))
def generate_output(self, writer):
pages = (
self.context['pages'] + self.context['articles'] +
self.context.get('hidden_pages', []))
for page in pages:
aliases = page.metadata.get('alias', [])
if type(aliases) != list:
aliases = aliases.split(self.alias_delimiter)
for alias in aliases:
alias = alias.strip()
logger.info('[alias] Processing alias %s' % alias)
self.create_alias(page, alias)
def get_generators(generators):
return AliasGenerator
def register():
signals.get_generators.connect(get_generators)
|
mit
| 8,837,711,083,222,607,000 | 28.46875 | 75 | 0.582715 | false |
mozilla/pulseguardian
|
pulseguardian/management.py
|
1
|
3411
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Wrapper functions around the RabbitMQ management plugin's REST API."""
import json
from urllib.parse import quote
import requests
from pulseguardian import config
class PulseManagementException(Exception):
pass
def _api_request(path, method='GET', data=None):
if not config.rabbit_management_url:
raise PulseManagementException("No RabbitMQ management URL "
"configured.")
session = requests.Session()
url = '{0}{1}'.format(config.rabbit_management_url, path)
request = requests.Request(method, url,
auth=(config.rabbit_user,
config.rabbit_password),
data=json.dumps(data)).prepare()
request.headers['Content-type'] = 'application/json'
response = session.send(request)
session.close()
if response is None or not response.content:
return None
try:
return response.json()
except ValueError:
raise PulseManagementException(
"Error when calling '{0} {1}' with data={2}. "
"Received: {3}".format(method, path, data, response.content))
# Queues
def queues(vhost=None):
if vhost:
vhost = quote(vhost, '')
return _api_request('queues/{0}'.format(vhost))
else:
return _api_request('queues')
def queue(vhost, queue):
vhost = quote(vhost, '')
queue = quote(queue, '')
return _api_request('queues/{0}/{1}'.format(vhost, queue))
def queue_bindings(vhost, queue):
vhost = quote(vhost, '')
queue = quote(queue, '')
bindings = _api_request('queues/{0}/{1}/bindings'.format(vhost, queue))
return [b for b in bindings if b["source"]]
def delete_queue(vhost, queue):
vhost = quote(vhost, '')
queue = quote(queue, '')
_api_request('queues/{0}/{1}'.format(vhost, queue),
method='DELETE')
def delete_all_queues():
for queue_data in queues():
delete_queue(queue_data['vhost'], queue_data['name'])
def bindings(vhost):
"""All bindings for all queues"""
if vhost:
vhost = quote(vhost, '')
bindings = _api_request('bindings/{0}'.format(vhost))
else:
bindings = _api_request('bindings')
return [b for b in bindings if b["source"]]
# Users
def user(username):
username = quote(username, '')
return _api_request('users/{0}'.format(username))
def create_user(username, password, tags=''):
username = quote(username, '')
data = dict(password=password, tags=tags)
_api_request('users/{0}'.format(username), method='PUT',
data=data)
def delete_user(username):
username = quote(username, '')
_api_request('users/{0}'.format(username), method='DELETE')
# Permissions
def set_permission(username, vhost, configure='', write='', read=''):
username = quote(username, '')
vhost = quote(vhost, '')
data = dict(configure=configure, write=write, read=read)
_api_request('permissions/{0}/{1}'.format(
vhost, username), method='PUT', data=data)
# Channels
def channel(channel):
channel = quote(channel, '')
return _api_request('channels/{0}'.format(channel))
|
mpl-2.0
| 2,166,029,995,657,118,200 | 26.731707 | 75 | 0.617121 | false |
greyshell/greyEnum
|
core_concepts/demo_unittest/test_blance_bracket.py
|
1
|
1123
|
#!/usr/bin/env python3
# author: greyshell
# how to run: python -m unittest test_blance_bracket.TestSolution
import unittest
from blance_bracket import solution
class TestSolution(unittest.TestCase):
def test_solution_case_1(self):
self.assertEqual(solution(['(', '(']), False)
def test_solution_case_2(self):
self.assertEqual(solution(['(', ')']), True)
def test_solution_case_3(self):
self.assertEqual(solution([')', '(']), False)
def test_solution_case_4(self):
self.assertEqual(solution(['(', '(', ')', ')']), True)
def test_solution_case_5(self):
self.assertEqual(solution([')', ')']), False)
def test_solution_case_6(self):
self.assertEqual(solution(['(', '(', ')', '(', ')']), False)
def test_solution_case_7(self):
self.assertEqual(solution(['(', ')', '(']), False)
def test_solution_case_8(self):
self.assertEqual(solution(['(', ')', ')']), False)
def test_solution_case_9(self):
self.assertEqual(solution(['(', '(', ')', '(', ')', ')']), True)
if __name__ == '__main__':
unittest.main()
|
mit
| 7,019,673,380,520,369,000 | 27.075 | 72 | 0.577026 | false |
cmorgan/toyplot
|
docs/conf.py
|
1
|
12462
|
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
# -*- coding: utf-8 -*-
#
# toyplot documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 18 18:22:53 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Provide stubs for external dependencies, so we can generate our reference
# documentation without having to install them.
class module_proxy(object):
__all__ = []
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return module_proxy()
@classmethod
def __getattr__(cls, name):
if name in ("__file__", "__path__"):
return "/dev/null"
elif name[0] == name[0].upper():
proxy_type = type(name, (), {})
proxy_type.__module__ = __name__
return proxy_type
else:
return module_proxy()
for module_name in [
"cairo",
"numpy",
"numpy.ma",
"numpy.testing",
"pango",
"pangocairo",
"PyQt5",
"PyQt5.QtCore",
"PyQt5.QtGui",
"PyQt5.QtPrintSupport",
"PyQt5.QtWebKitWidgets",
"PyQt5.QtWidgets"]:
sys.modules[module_name] = module_proxy()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.mathjax",
"sphinxcontrib.napoleon",
]
napoleon_use_param = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Toyplot'
copyright = u"""2014, Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
rights in this software"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import toyplot
version = toyplot.__version__
# The full version, including alpha/beta/rc tags.
release = toyplot.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed
# from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to
# specify it
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['css']
html_style = "toyplot.css"
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'toyplotdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'toyplot.tex', u'Toyplot Documentation',
u'Sandia National Laboratories', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'toyplot', u'Toyplot Documentation',
[u'Sandia National Laboratories'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index',
'toyplot',
u'Toyplot Documentation',
u'Sandia National Laboratories',
'toyplot',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'toyplot'
epub_author = u'Sandia National Laboratories'
epub_publisher = u'Sandia National Laboratories'
epub_copyright = u'Copyright 2014 Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software.'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'toyplot'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
bsd-3-clause
| -7,070,915,137,279,565,000 | 30.233083 | 186 | 0.695153 | false |
DouglasLeeTucker/DECam_PGCM
|
bin/rawdata_se_objects_split.py
|
1
|
5996
|
#!/usr/bin/env python
"""
rawdata_se_objects_split.py
Example:
rawdata_se_objects_split.py --help
rawdata_se_objects_split.py --inputFileListFile inputfilelist.csv
--outputFileListFile outputfilelist.csv
--verbose 2
"""
##################################
def main():
import os
import argparse
import time
"""Create command line arguments"""
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--inputFileListFile', help='name of CSV file containing list of input files', default='inputfilelist.csv')
parser.add_argument('--outputFileListFile', help='name of CSV file containing list of filter bands and output files', default='outputfilelist.csv')
parser.add_argument('--verbose', help='verbosity level of output to screen (0,1,2,...)', default=0, type=int)
args = parser.parse_args()
if args.verbose > 0: print args
# Execute method...
status = rawdata_se_objects_split(args)
##################################
# rawdata_se_objects_split
#
# Based on sepBands from y2a1_tertiaries.py
#
def rawdata_se_objects_split(args):
import os
import datetime
import numpy as np
import pandas as pd
from astropy.table import Table
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'rawdata_se_objects_split'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
# Read in inputFileListFile...
inputFileListFile = args.inputFileListFile
if os.path.isfile(inputFileListFile)==False:
print """Input filelist file %s does not exist...""" % (inputFileListFile)
print """Exiting rawdata_se_objects_split method with return code 1"""
return 1
inputFileListDF = pd.read_csv(inputFileListFile,
header=None,
names=['FILENAME'],
comment='#')
inputFileListDF['FILENAME'] = inputFileListDF['FILENAME'].str.strip()
inputFileListSeries = inputFileListDF['FILENAME']
inputFileList = inputFileListSeries.values.tolist()
if args.verbose>1:
print 'Input file list:'
print inputFileList
# Read in outputFileListFile and convert it to a python
# dictionary, ensuring there are no extraneous white spaces
# in the file names listed...
outputFileListFile = args.outputFileListFile
if os.path.isfile(outputFileListFile)==False:
print """Output filelist file %s does not exist...""" % (outputFileListFile)
print """Exiting rawdata_se_objects_split method with return code 1"""
return 1
outputFileListDF = pd.read_csv(outputFileListFile,
header=None,
names=['BAND','FILENAME'],
index_col='BAND',
comment='#')
outputFileListDF['FILENAME'] = outputFileListDF['FILENAME'].str.strip()
outputFileListSeries = outputFileListDF['FILENAME']
outputFileListDict = outputFileListSeries.to_dict()
# Also, grab the band list from the outputFileListFile series...
bandList = outputFileListSeries.index.values.tolist()
if args.verbose>1:
print 'Output file list dictionary:'
print outputFileListDict
print 'Band list:'
print bandList
# Loop through inputFileList...
firstFile=True
for inputFile in inputFileList:
if args.verbose > 1:
print """Working on input file %s...""" % inputFile
print datetime.datetime.now()
# Read in file...
t = Table.read(inputFile)
# Convert astropy Table to pandas dataframe...
df = t.to_pandas()
# Verify that 'BAND' is a column in the dataframe;
# otherwise, skip...
if 'BAND' not in df.columns:
print """Could not find 'BAND' in header of %s... Skipping""" \
% (inputFile)
del df
continue
# Verify that 'FILENAME' is a column in the dataframe;
# otherwise, skip...
if 'FILENAME' not in df.columns:
print """Could not find 'FILENAME' in header of %s... Skipping""" \
% (inputFile)
del df
continue
# Trim leading and trailing white space from the FILENAME column...
df['FILENAME'] = df['FILENAME'].str.strip()
# Trim leading and trailing white space from the BAND column...
df['BAND'] = df['BAND'].str.strip()
# If this is the first (valid) file, create initial
# output files (empty except for the CSV header)...
if firstFile is True:
for band in bandList:
outputFile = outputFileListDict[band]
# Create a mask with all entries equal to False...
mask = pd.Series(np.zeros(df.BAND.size, dtype=bool))
df[mask].to_csv(outputFile,index=False)
firstFile = False
# Loop through band list, appending the rows from
# each band to the appropriate output file...
for band in bandList:
outputFile = outputFileListDict[band]
mask = (df.BAND == band)
# Faster if we move the "open" to outside the loop?:
with open(outputFile, 'a') as f:
df[mask].to_csv(f, index=False, header=False)
f.close()
# Clean up some space before moving to next file...
del df
#del t
if args.verbose > 1:
print datetime.datetime.now()
if args.verbose>0: print
return 0
##################################
if __name__ == "__main__":
main()
##################################
|
gpl-3.0
| 670,206,658,665,338,400 | 32.497207 | 151 | 0.564376 | false |
mablae/weblate
|
weblate/accounts/views.py
|
1
|
14376
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.shortcuts import render, get_object_or_404, redirect
from django.views.decorators.cache import cache_page
from django.http import HttpResponse
from django.contrib.auth import logout
from django.conf import settings
from django.contrib import messages
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.core.mail.message import EmailMultiAlternatives
from django.utils import translation
from django.contrib.auth.models import User
from django.contrib.auth import views as auth_views
from django.views.generic import TemplateView
try:
# Not supported in Django 1.6
# pylint: disable=E0611
from django.contrib.auth import update_session_auth_hash
except ImportError:
# Fallback for Django 1.6
update_session_auth_hash = None
from urllib import urlencode
from weblate.accounts.forms import (
RegistrationForm, PasswordForm, PasswordChangeForm, EmailForm, ResetForm,
LoginForm, HostingForm, CaptchaRegistrationForm
)
from social.backends.utils import load_backends
from social.apps.django_app.utils import BACKENDS
from social.apps.django_app.views import complete
import weblate
from weblate.accounts.avatar import get_avatar_image, get_fallback_avatar_url
from weblate.accounts.models import set_lang, remove_user, Profile
from weblate.trans.models import Change, Project, SubProject
from weblate.accounts.forms import (
ProfileForm, SubscriptionForm, UserForm, ContactForm,
SubscriptionSettingsForm
)
from weblate import appsettings
CONTACT_TEMPLATE = '''
Message from %(name)s <%(email)s>:
%(message)s
'''
HOSTING_TEMPLATE = '''
%(name)s <%(email)s> wants to host %(project)s
Project: %(project)s
Website: %(url)s
Repository: %(repo)s
Filemask: %(mask)s
Additional message:
%(message)s
'''
class RegistrationTemplateView(TemplateView):
'''
Class for rendering registration pages.
'''
def get_context_data(self, **kwargs):
'''
Creates context for rendering page.
'''
context = super(RegistrationTemplateView, self).get_context_data(
**kwargs
)
context['title'] = _('User registration')
return context
def mail_admins_contact(request, subject, message, context, sender):
'''
Sends a message to the admins, as defined by the ADMINS setting.
'''
weblate.logger.info(
'contact from from %s: %s',
sender,
subject,
)
if not settings.ADMINS:
messages.error(
request,
_('Message could not be sent to administrator!')
)
weblate.logger.error(
'ADMINS not configured, can not send message!'
)
return
mail = EmailMultiAlternatives(
u'%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject % context),
message % context,
to=[a[1] for a in settings.ADMINS],
headers={'Reply-To': sender},
)
mail.send(fail_silently=False)
messages.success(
request,
_('Message has been sent to administrator.')
)
def deny_demo(request):
"""
Denies editing of demo account on demo server.
"""
messages.warning(
request,
_('You can not change demo account on the demo server.')
)
return redirect('profile')
@login_required
def user_profile(request):
profile = request.user.profile
form_classes = [
ProfileForm,
SubscriptionForm,
SubscriptionSettingsForm,
]
if request.method == 'POST':
# Parse POST params
forms = [form(request.POST, instance=profile) for form in form_classes]
forms.append(UserForm(request.POST, instance=request.user))
if appsettings.DEMO_SERVER and request.user.username == 'demo':
return deny_demo(request)
if min([form.is_valid() for form in forms]):
# Save changes
for form in forms:
form.save()
# Change language
set_lang(request, request.user.profile)
# Redirect after saving (and possibly changing language)
response = redirect('profile')
# Set language cookie and activate new language (for message below)
lang_code = profile.language
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
translation.activate(lang_code)
messages.success(request, _('Your profile has been updated.'))
return response
else:
forms = [form(instance=profile) for form in form_classes]
forms.append(UserForm(instance=request.user))
social = request.user.social_auth.all()
social_names = [assoc.provider for assoc in social]
all_backends = set(load_backends(BACKENDS).keys())
new_backends = [
x for x in all_backends
if x == 'email' or x not in social_names
]
license_projects = SubProject.objects.filter(
project__in=Project.objects.all_acl(request.user)
).exclude(
license=''
)
response = render(
request,
'accounts/profile.html',
{
'form': forms[0],
'subscriptionform': forms[1],
'subscriptionsettingsform': forms[2],
'userform': forms[3],
'profile': profile,
'title': _('User profile'),
'licenses': license_projects,
'associated': social,
'new_backends': new_backends,
}
)
response.set_cookie(
settings.LANGUAGE_COOKIE_NAME,
profile.language
)
return response
@login_required
def user_remove(request):
if appsettings.DEMO_SERVER and request.user.username == 'demo':
return deny_demo(request)
if request.method == 'POST':
remove_user(request.user)
logout(request)
messages.success(
request,
_('Your account has been removed.')
)
return redirect('home')
return render(
request,
'accounts/removal.html',
)
def get_initial_contact(request):
'''
Fills in initial contact form fields from request.
'''
initial = {}
if request.user.is_authenticated():
initial['name'] = request.user.first_name
initial['email'] = request.user.email
return initial
def contact(request):
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
mail_admins_contact(
request,
'%(subject)s',
CONTACT_TEMPLATE,
form.cleaned_data,
form.cleaned_data['email'],
)
return redirect('home')
else:
initial = get_initial_contact(request)
if 'subject' in request.GET:
initial['subject'] = request.GET['subject']
form = ContactForm(initial=initial)
return render(
request,
'accounts/contact.html',
{
'form': form,
'title': _('Contact'),
}
)
def hosting(request):
'''
Form for hosting request.
'''
if not appsettings.OFFER_HOSTING:
return redirect('home')
if request.method == 'POST':
form = HostingForm(request.POST)
if form.is_valid():
mail_admins_contact(
request,
'Hosting request for %(project)s',
HOSTING_TEMPLATE,
form.cleaned_data,
form.cleaned_data['email'],
)
return redirect('home')
else:
initial = get_initial_contact(request)
form = HostingForm(initial=initial)
return render(
request,
'accounts/hosting.html',
{
'form': form,
'title': _('Hosting'),
}
)
def user_page(request, user):
'''
User details page.
'''
user = get_object_or_404(User, username=user)
profile = get_object_or_404(Profile, user=user)
# Filter all user activity
all_changes = Change.objects.last_changes(request.user).filter(
user=user,
)
# Last user activity
last_changes = all_changes[:10]
# Filter where project is active
user_projects_ids = set(all_changes.values_list(
'translation__subproject__project', flat=True
))
user_projects = Project.objects.filter(id__in=user_projects_ids)
return render(
request,
'accounts/user.html',
{
'page_profile': profile,
'page_user': user,
'last_changes': last_changes,
'last_changes_url': urlencode(
{'user': user.username.encode('utf-8')}
),
'user_projects': user_projects,
}
)
@cache_page(3600 * 24)
def user_avatar(request, user, size):
'''
User avatar page.
'''
user = get_object_or_404(User, username=user)
if user.email == 'noreply@weblate.org':
return redirect(get_fallback_avatar_url(size))
return HttpResponse(
content_type='image/png',
content=get_avatar_image(user, size)
)
def weblate_login(request):
'''
Login handler, just wrapper around login.
'''
# Redirect logged in users to profile
if request.user.is_authenticated():
return redirect('profile')
return auth_views.login(
request,
template_name='accounts/login.html',
authentication_form=LoginForm,
extra_context={
'login_backends': [
x for x in load_backends(BACKENDS).keys() if x != 'email'
],
'title': _('Login'),
}
)
@login_required
def weblate_logout(request):
'''
Logout handler, just wrapper around standard logout.
'''
messages.info(request, _('Thanks for using Weblate!'))
return auth_views.logout(
request,
next_page=settings.LOGIN_URL,
)
def register(request):
'''
Registration form.
'''
if appsettings.REGISTRATION_CAPTCHA:
form_class = CaptchaRegistrationForm
else:
form_class = RegistrationForm
if request.method == 'POST':
form = form_class(request.POST)
if form.is_valid() and appsettings.REGISTRATION_OPEN:
return complete(request, 'email')
else:
form = form_class()
backends = set(load_backends(BACKENDS).keys())
return render(
request,
'accounts/register.html',
{
'registration_email': 'email' in backends,
'registration_backends': backends - set(['email']),
'title': _('User registration'),
'form': form,
}
)
@login_required
def email_login(request):
'''
Connect email.
'''
if request.method == 'POST':
form = EmailForm(request.POST)
if form.is_valid():
return complete(request, 'email')
else:
form = EmailForm()
return render(
request,
'accounts/email.html',
{
'title': _('Register email'),
'form': form,
}
)
@login_required
def password(request):
'''
Password change / set form.
'''
if appsettings.DEMO_SERVER and request.user.username == 'demo':
return deny_demo(request)
do_change = False
if not request.user.has_usable_password():
do_change = True
change_form = None
elif request.method == 'POST':
change_form = PasswordChangeForm(request.POST)
if change_form.is_valid():
cur_password = change_form.cleaned_data['password']
do_change = request.user.check_password(cur_password)
if not do_change:
messages.error(
request,
_('You have entered an invalid password.')
)
else:
change_form = PasswordChangeForm()
if request.method == 'POST':
form = PasswordForm(request.POST)
if form.is_valid() and do_change:
# Clear flag forcing user to set password
if 'show_set_password' in request.session:
del request.session['show_set_password']
request.user.set_password(
form.cleaned_data['password1']
)
request.user.save()
# Update session hash for Django 1.7
if update_session_auth_hash:
update_session_auth_hash(request, request.user)
messages.success(
request,
_('Your password has been changed.')
)
return redirect('profile')
else:
form = PasswordForm()
return render(
request,
'accounts/password.html',
{
'title': _('Change password'),
'change_form': change_form,
'form': form,
}
)
def reset_password(request):
'''
Password reset handling.
'''
if request.method == 'POST':
form = ResetForm(request.POST)
if form.is_valid():
user = form.cleaned_data['email']
user.set_unusable_password()
user.save()
if not request.session.session_key:
request.session.create()
request.session['password_reset'] = True
return complete(request, 'email')
else:
form = ResetForm()
return render(
request,
'accounts/reset.html',
{
'title': _('Password reset'),
'form': form,
}
)
|
gpl-3.0
| 3,484,964,092,832,657,000 | 25.715613 | 79 | 0.593683 | false |
krnflake/python-hubicclient
|
hubicclient/__init__.py
|
1
|
1073
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Rackspace
# flake8: noqa
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenStack Swift Python client binding.
"""
from .client import *
# At setup.py time, we haven't installed anything yet, so there
# is nothing that is able to set this version property. Squelching
# that exception here should be fine- if there are problems with
# pkg_resources in a real install, that will manifest itself as
# an error still
try:
from hubicclient import version
__version__ = version.version_string
except Exception:
pass
|
apache-2.0
| -9,025,697,195,523,056,000 | 32.53125 | 69 | 0.744641 | false |
arpitmathur/Airline
|
src/Test_graph.py
|
1
|
3161
|
__author__ = 'Arpit'
import unittest
import json
from Airline.src.Graph import Graph
from Airline.src.UIConsole import UIConsole
class Test_graph(unittest.TestCase):
#This function is run before every test case is executed
#def setUp(self):
#The parser function
def parse_file(self, json_file):
with open(json_file) as map_data:
data = json.load(map_data)
return data
#Testing creating a vertex. Each metro in the JSON file should get created as a vertex of the graph
def test_create_vertices(self, json_file):
data = self.parse_file("map_data_json")
graph=Graph()
graph.create_vertices(data)
print(graph.cityCodeList)
list = ['SCL', 'LIM', 'MEX', 'BOG', 'BUE', 'SAO', 'LOS', 'FIH', 'JNB', 'KRT', 'CAI', 'ALG', 'MAD', 'LON', 'PAR', 'MIL', 'ESS', 'LED', 'MOW', 'IST', 'BGW', 'THR', 'RUH', 'KHI', 'DEL', 'BOM', 'MAA', 'CCU', 'BKK', 'HKG', 'SHA', 'PEK', 'ICN', 'TYO', 'OSA', 'TPE', 'MNL', 'SGN', 'JKT', 'SYD', 'LAX', 'SFO', 'CHI', 'ATL', 'MIA', 'WAS', 'NYC', 'YYZ', 'CMI']
self.assertEquals(list, graph.cityCodeList)
#Testing whether the longest flight distance is the same as 12051
def test_longest_flight(self):
data = self.parse_file('map_data.json')
graph = Graph()
graph.create_vertices(data)
graph.create_edges(data)
self.assertEquals(12051, graph.longest_flight(data))
#Testing whether the shortest flight duration is the same as 334
def test_shortest_flight(self):
data = self.parse_file('map_data.json')
graph = Graph()
graph.create_vertices(data)
graph.create_edges(data)
self.assertEquals(334, graph.shortest_flight(data))
#Testing whether the Average distance covered by all CSAir flights is 2300.276595744681
def test_average_distance(self):
data = self.parse_file('map_data.json')
gObj = Graph()
gObj.create_vertices(data)
gObj.create_edges(data)
self.assertEquals(2300.276595744681, gObj.average_distance(data))
#Testing whether the biggest city population is equal to 34000000
def test_biggest_city(self):
data = self.parse_file('map_data.json')
graph = Graph()
graph.create_vertices(data)
graph.create_edges(data)
self.assertEquals(34000000, graph.biggest_city(data))
#Testing whether the smallest city population is equal to 589900
def test_smallest_city(self):
data = self.parse_file('map_data.json')
graph = Graph()
graph.create_vertices(data)
graph.create_edges(data)
self.assertEquals(589900, graph.smallest_city(data))
#The average city size is equal to 11796143.75
def test_average_city_size(self):
data = self.parse_file('map_data.json')
graph = Graph()
graph.create_vertices(data)
graph.create_edges(data)
self.assertEquals(11796143.75, graph.shortest_flight(data))
#def test_csAir_continents(self):
# self.fail()
#def test_find_city(self):
# self.fail()
if __name__ == '__main__':
unittest.main()
|
mit
| -7,213,381,536,477,382,000 | 36.642857 | 358 | 0.627649 | false |
xinghai-sun/models
|
deep_speech_2/data_utils/speech.py
|
1
|
5631
|
"""Contains the speech segment class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from data_utils.audio import AudioSegment
class SpeechSegment(AudioSegment):
"""Speech segment abstraction, a subclass of AudioSegment,
with an additional transcript.
:param samples: Audio samples [num_samples x num_channels].
:type samples: ndarray.float32
:param sample_rate: Audio sample rate.
:type sample_rate: int
:param transcript: Transcript text for the speech.
:type transript: basestring
:raises TypeError: If the sample data type is not float or int.
"""
def __init__(self, samples, sample_rate, transcript):
AudioSegment.__init__(self, samples, sample_rate)
self._transcript = transcript
def __eq__(self, other):
"""Return whether two objects are equal.
"""
if not AudioSegment.__eq__(self, other):
return False
if self._transcript != other._transcript:
return False
return True
def __ne__(self, other):
"""Return whether two objects are unequal."""
return not self.__eq__(other)
@classmethod
def from_file(cls, filepath, transcript):
"""Create speech segment from audio file and corresponding transcript.
:param filepath: Filepath or file object to audio file.
:type filepath: basestring|file
:param transcript: Transcript text for the speech.
:type transript: basestring
:return: Audio segment instance.
:rtype: AudioSegment
"""
audio = AudioSegment.from_file(filepath)
return cls(audio.samples, audio.sample_rate, transcript)
@classmethod
def from_bytes(cls, bytes, transcript):
"""Create speech segment from a byte string and corresponding
transcript.
:param bytes: Byte string containing audio samples.
:type bytes: str
:param transcript: Transcript text for the speech.
:type transript: basestring
:return: Audio segment instance.
:rtype: AudioSegment
"""
audio = AudioSegment.from_bytes(bytes)
return cls(audio.samples, audio.sample_rate, transcript)
@classmethod
def concatenate(cls, *segments):
"""Concatenate an arbitrary number of speech segments together, both
audio and transcript will be concatenated.
:param *segments: Input speech segments to be concatenated.
:type *segments: tuple of SpeechSegment
:return: Speech segment instance.
:rtype: SpeechSegment
:raises ValueError: If the number of segments is zero, or if the
sample_rate of any two segments does not match.
:raises TypeError: If any segment is not SpeechSegment instance.
"""
if len(segments) == 0:
raise ValueError("No speech segments are given to concatenate.")
sample_rate = segments[0]._sample_rate
transcripts = ""
for seg in segments:
if sample_rate != seg._sample_rate:
raise ValueError("Can't concatenate segments with "
"different sample rates")
if type(seg) is not cls:
raise TypeError("Only speech segments of the same type "
"instance can be concatenated.")
transcripts += seg._transcript
samples = np.concatenate([seg.samples for seg in segments])
return cls(samples, sample_rate, transcripts)
@classmethod
def slice_from_file(cls, filepath, transcript, start=None, end=None):
"""Loads a small section of an speech without having to load
the entire file into the memory which can be incredibly wasteful.
:param filepath: Filepath or file object to audio file.
:type filepath: basestring|file
:param start: Start time in seconds. If start is negative, it wraps
around from the end. If not provided, this function
reads from the very beginning.
:type start: float
:param end: End time in seconds. If end is negative, it wraps around
from the end. If not provided, the default behvaior is
to read to the end of the file.
:type end: float
:param transcript: Transcript text for the speech. if not provided,
the defaults is an empty string.
:type transript: basestring
:return: SpeechSegment instance of the specified slice of the input
speech file.
:rtype: SpeechSegment
"""
audio = AudioSegment.slice_from_file(filepath, start, end)
return cls(audio.samples, audio.sample_rate, transcript)
@classmethod
def make_silence(cls, duration, sample_rate):
"""Creates a silent speech segment of the given duration and
sample rate, transcript will be an empty string.
:param duration: Length of silence in seconds.
:type duration: float
:param sample_rate: Sample rate.
:type sample_rate: float
:return: Silence of the given duration.
:rtype: SpeechSegment
"""
audio = AudioSegment.make_silence(duration, sample_rate)
return cls(audio.samples, audio.sample_rate, "")
@property
def transcript(self):
"""Return the transcript text.
:return: Transcript text for the speech.
:rtype: basestring
"""
return self._transcript
|
apache-2.0
| -7,060,951,834,344,462,000 | 38.377622 | 78 | 0.626887 | false |
showerst/openstates
|
openstates/vt/__init__.py
|
1
|
2574
|
from billy.utils.fulltext import pdfdata_to_text, text_after_line_numbers
from .bills import VTBillScraper
from .legislators import VTLegislatorScraper
from .committees import VTCommitteeScraper
from .events import VTEventScraper
metadata = dict(
name='Vermont',
abbreviation='vt',
capitol_timezone='America/New_York',
legislature_name='Vermont General Assembly',
legislature_url='http://legislature.vermont.gov/',
chambers = {
'upper': {'name': 'Senate', 'title': 'Senator', 'term': 2},
'lower': {'name': 'House', 'title': 'Representative', 'term': 2},
},
terms=[{'name': '2009-2010',
'start_year': 2009,
'end_year': 2010,
'sessions': ['2009-2010']},
{'name': '2011-2012',
'start_year': 2011,
'end_year': 2012,
'sessions': ['2011-2012']},
{'name': '2013-2014',
'start_year': 2013,
'end_year': 2014,
'sessions': ['2013-2014']},
{'name': '2015-2016',
'start_year': 2015,
'end_year': 2016,
'sessions': ['2015-2016']},
],
session_details={'2009-2010': {'type': 'primary',
'display_name': '2009-2010 Regular Session',
'_scraped_name': '2009-2010 Session',
},
'2011-2012': {'type': 'primary',
'display_name': '2011-2012 Regular Session',
'_scraped_name': '2011-2012 Session',
},
'2013-2014': {'type': 'primary',
'display_name': '2013-2014 Regular Session',
'_scraped_name': '2013-2014 Session',
},
'2015-2016': {'type': 'primary',
'display_name': '2015-2016 Regular Session',
'_scraped_name': '2015-2016 Session',
},
},
feature_flags=['influenceexplorer'],
_ignored_scraped_sessions= ['2009 Special Session']
)
def session_list():
from billy.scrape.utils import url_xpath
return url_xpath(
'http://legislature.vermont.gov/bill/search/2016',
'//fieldset/div[@id="selected_session"]/div/select/option/text()')
def extract_text(doc, data):
return text_after_line_numbers(pdfdata_to_text(data))
|
gpl-3.0
| 2,855,948,928,439,591,400 | 39.857143 | 79 | 0.480575 | false |
BrianHicks/perch
|
docs/conf.py
|
1
|
8318
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import perch
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Perch'
copyright = u'2013, Brian Hicks'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = perch.__version__
# The full version, including alpha/beta/rc tags.
release = perch.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'perchdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'perch.tex', u'Perch Documentation',
u'Brian Hicks', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'perch', u'Perch Documentation',
[u'Brian Hicks'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'perch', u'Perch Documentation',
u'Brian Hicks', 'perch', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
bsd-3-clause
| 7,839,504,932,566,316,000 | 30.996154 | 80 | 0.706179 | false |
yfpeng/dcache
|
setup.py
|
1
|
1810
|
# Always prefer setuptools over distutils
# To use a consistent encoding
from codecs import open
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='dcache',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.dev9',
description='Build a cache of files in the folder tree for efficiency and emergency purposes',
long_description=long_description,
# The project's main homepage.
url='https://github.com/yfpeng/dcache',
# Author details
author='Yifan Peng',
author_email='yifan.peng@nih.gov',
license='BSD 3-Clause License',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
# Specify the Python versions you support here.
'Programming Language :: Python',
'Topic :: Software Development',
],
keywords='cache folder',
packages=find_packages(exclude=["tests.*", "tests"]),
install_requires=[
'docutils==0.13.1',
'future==0.16.0',
'docopt'],
)
|
bsd-3-clause
| 1,978,977,201,031,246,300 | 29.166667 | 98 | 0.655249 | false |
tsl143/addons-server
|
src/olympia/landfill/tests/test_categories.py
|
1
|
1384
|
# -*- coding: utf-8 -*-
from olympia.amo.tests import TestCase
from olympia.addons.models import Category
from olympia.constants.applications import APPS
from olympia.constants.base import ADDON_EXTENSION, ADDON_PERSONA
from olympia.constants.categories import CATEGORIES
from olympia.landfill.categories import generate_categories
class CategoriesTests(TestCase):
def test_categories_themes_generation(self):
data = generate_categories(APPS['firefox'], ADDON_PERSONA)
assert len(data) == Category.objects.all().count()
assert len(data) == 15
def test_categories_addons_generation(self):
data = generate_categories(APPS['android'], ADDON_EXTENSION)
assert len(data) == Category.objects.all().count()
assert len(data) == 11
category = Category.objects.get(
id=CATEGORIES[APPS['android'].id][ADDON_EXTENSION]['shopping'].id)
assert unicode(category.name) == u'Shopping'
# Re-generating should not create any more.
data = generate_categories(APPS['android'], ADDON_EXTENSION)
assert len(data) == Category.objects.all().count()
assert len(data) == 11
# Name should still be the same.
category = Category.objects.get(
id=CATEGORIES[APPS['android'].id][ADDON_EXTENSION]['shopping'].id)
assert unicode(category.name) == u'Shopping'
|
bsd-3-clause
| -1,616,217,810,804,299,800 | 39.705882 | 78 | 0.684249 | false |
zqqf16/clipboard
|
clipboard/handler.py
|
1
|
1304
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tornado.web
from model import Entry
from datetime import datetime
import json
class IndexHandler(tornado.web.RequestHandler):
def get(self):
entries = [e for e in Entry.select().order_by(Entry.date.desc())]
self.render('clipboard.html', entries=entries)
def post(self):
content = self.get_argument('content', None)
date = datetime.now()
e = Entry.create(content=content, date=date)
e.save()
self.redirect('/')
#REST api
class MainHandler(tornado.web.RequestHandler):
def get(self):
entries = [e.to_dict() for e in Entry.select().order_by(Entry.date.desc())]
self.write(json.dumps(entries))
def post(self):
content = self.get_argument('content', None)
date = datetime.now()
e = Entry.create(content=content, date=date)
e.save()
self.write({'status':'success'})
class SingleHandler(tornado.web.RequestHandler):
def get(self, id):
e = Entry.get(Entry.id == id)
self.write(e.to_dict())
def delete(self, id):
e = Entry.get(Entry.id == id)
if not e:
self.write({'status':'error'})
return
e.delete_instance()
self.write({'status':'success'})
|
mit
| -7,280,940,398,504,245,000 | 27.347826 | 83 | 0.60046 | false |
yeeking/snakestep
|
scripts/clock.py
|
1
|
2213
|
import time
import thread
import threading
# really basic implementation of a busy waiting clock
class Clock():
def __init__(self):
self.bpm(120)
# listeners stores things that listen for clock ticks
self._listeners = []
# when did we last tick?
self._last_tick = time.time()
self._running = False
def bpm(self, bpm):
# 4 ticks a beat
self._tick_length = 60.0/bpm / 4 * 1000.0
def start(self):
#print "Clock::start"
#thread.start_new_thread(self._run, ())
self._run()
def stop(self):
# print "Clock::stop"
self._running = False
# add something that wants to know when the clock ticks
def add_listener(self, listener):
self._listeners.append(listener)
def _run(self):
self._running = True
waited = self._tick_length
while self._running:
now = time.time()
self._tick()
# how long did it take to tick?
lostms = (time.time() - now) * 0.001
#if lostms > self._tick_length:
# print "Clock::tick took too long!"
#else:
# self._busy_sleep(self._tick_length - lostms)
self._busy_sleep(self._tick_length - lostms)
return
def _tick(self):
this_tick = time.time()
# print "Clock::tick diff: "+str(this_tick - self._last_tick)
self._last_tick = this_tick
for listener in self._listeners:
# print "CLock::ticking a listener"
listener.tick()
def _busy_sleep(self, timems):
timenow = time.time()
#timed = timems / 1000.0
timestop = time.time() + (timems * 0.001)
timewait = (timems - 30) * 0.001
#timewaited = 0
if timewait > 0:
time.sleep(timewait)
while True:
time.sleep(0)
if time.time() >= timestop:
break;
#timewaited = time.time() - timenow
# print "waited "+str(timewaited)
#return timewaited
#clock = Clock()
#thread.start_new_thread(clock.start(), ())
#while 1:
# pass
|
mit
| 5,506,961,174,629,579,000 | 28.905405 | 68 | 0.525983 | false |
wavewave/madgraph-auto-model
|
modelrepo/heftNLO/write_param_card.py
|
1
|
6811
|
__date__ = "02 Aug 2012"
__author__ = 'olivier.mattelaer@uclouvain.be'
from function_library import *
class ParamCardWriter(object):
header = \
"""######################################################################\n""" + \
"""## PARAM_CARD AUTOMATICALY GENERATED BY THE UFO #####################\n""" + \
"""######################################################################\n"""
def __init__(self, filename, list_of_parameters=None, generic=False):
"""write a valid param_card.dat"""
if not list_of_parameters:
from parameters import all_parameters
list_of_parameters = [param for param in all_parameters if \
param.nature=='external']
self.generic_output = generic
if generic:
self.define_not_dep_param(list_of_parameters)
self.fsock = open(filename, 'w')
self.fsock.write(self.header)
self.write_card(list_of_parameters)
self.fsock.close()
def define_not_dep_param(self, list_of_parameters):
"""define self.dep_mass and self.dep_width in case that they are
requested in the param_card.dat"""
from particles import all_particles
self.dep_mass = [(part, part.mass) for part in all_particles \
if part.pdg_code > 0 and \
part.mass not in list_of_parameters]
self.dep_width = [(part, part.width) for part in all_particles\
if part.pdg_code > 0 and \
part.width not in list_of_parameters]
@staticmethod
def order_param(obj1, obj2):
""" order parameter of a given block """
maxlen = min([len(obj1.lhacode), len(obj2.lhacode)])
for i in range(maxlen):
if obj1.lhacode[i] < obj2.lhacode[i]:
return -1
elif obj1.lhacode[i] == obj2.lhacode[i]:
return 0
else:
return 1
#identical up to the first finish
if len(obj1.lhacode) > len(obj2.lhacode):
return 1
elif len(obj1.lhacode) == len(obj2.lhacode):
return 0
else:
return -1
def write_card(self, all_ext_param):
""" """
# list all lhablock
all_lhablock = set([param.lhablock for param in all_ext_param])
# ordonate lhablock alphabeticaly
all_lhablock = list(all_lhablock)
all_lhablock.sort()
# put at the beginning SMINPUT + MASS + DECAY
for name in ['DECAY', 'MASS','SMINPUTS']:
if name in all_lhablock:
all_lhablock.remove(name)
all_lhablock.insert(0, name)
for lhablock in all_lhablock:
self.write_block(lhablock)
need_writing = [ param for param in all_ext_param if \
param.lhablock == lhablock]
need_writing.sort(self.order_param)
[self.write_param(param, lhablock) for param in need_writing]
if self.generic_output:
if lhablock in ['MASS', 'DECAY']:
self.write_dep_param_block(lhablock)
if self.generic_output:
self.write_qnumber()
def write_block(self, name):
""" write a comment for a block"""
self.fsock.writelines(
"""\n###################################""" + \
"""\n## INFORMATION FOR %s""" % name.upper() +\
"""\n###################################\n"""
)
if name!='DECAY':
self.fsock.write("""Block %s \n""" % name)
def write_param(self, param, lhablock):
lhacode=' '.join(['%3s' % key for key in param.lhacode])
if lhablock != 'DECAY':
text = """ %s %e # %s \n""" % (lhacode, complex(param.value).real, param.name )
else:
text = '''DECAY %s %e \n''' % (lhacode, complex(param.value).real)
self.fsock.write(text)
def write_dep_param_block(self, lhablock):
import cmath
from parameters import all_parameters
for parameter in all_parameters:
exec("%s = %s" % (parameter.name, parameter.value))
text = "## Not dependent paramater.\n"
text += "## Those values should be edited following analytical the \n"
text += "## analytical expression. Some generator could simply ignore \n"
text += "## those values and use the analytical expression\n"
if lhablock == 'MASS':
data = self.dep_mass
prefix = " "
else:
data = self.dep_width
prefix = "DECAY "
for part, param in data:
if isinstance(param.value, str):
value = complex(eval(param.value)).real
else:
value = param.value
text += """%s %s %f # %s : %s \n""" %(prefix, part.pdg_code,
value, part.name, param.value)
self.fsock.write(text)
sm_pdg = [1,2,3,4,5,6,11,12,13,13,14,15,16,21,22,23,24,25]
data="""Block QNUMBERS %(pdg)d # %(name)s
1 %(charge)d # 3 times electric charge
2 %(spin)d # number of spin states (2S+1)
3 %(color)d # colour rep (1: singlet, 3: triplet, 8: octet)
4 %(antipart)d # Particle/Antiparticle distinction (0=own anti)\n"""
def write_qnumber(self):
""" write qnumber """
from particles import all_particles
import particles
print particles.__file__
text="""#===========================================================\n"""
text += """# QUANTUM NUMBERS OF NEW STATE(S) (NON SM PDG CODE)\n"""
text += """#===========================================================\n\n"""
for part in all_particles:
if part.pdg_code in self.sm_pdg or part.pdg_code < 0:
continue
text += self.data % {'pdg': part.pdg_code,
'name': part.name,
'charge': 3 * part.charge,
'spin': part.spin,
'color': part.color,
'antipart': part.name != part.antiname and 1 or 0}
self.fsock.write(text)
if '__main__' == __name__:
ParamCardWriter('./param_card.dat', generic=True)
print 'write ./param_card.dat'
|
bsd-2-clause
| 6,206,704,422,216,953,000 | 36.629834 | 93 | 0.466745 | false |
graik/biskit
|
biskit/__init__.py
|
1
|
2482
|
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2007 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
__version__ = '3.0.0.a'
import logging
## public classes
try:
## default error handler
from biskit.errorHandler import ErrorHandler
EHandler = ErrorHandler()
from biskit.logFile import LogFile, StdLog, ErrLog
from biskit.errors import BiskitError
## from Blast2Seq import Blast2Seq
## from EDParser import EZDParser
from biskit.pdbModel import PDBModel, PDBProfiles, PDBError
from biskit.xplorModel import XplorModel
from biskit.profileCollection import ProfileCollection, ProfileError
## from ProfileMirror import ProfileMirror
from biskit.pdbCleaner import PDBCleaner, CleanerError
## from ModelList import ModelList
## from CommandLine import CommandLine
from .amberResidues import AmberResidueType, AmberPrepParser
from .amberResidueLibrary import AmberResidueLibrary,\
AmberResidueLibraryError
from .atomCharger import AtomCharger
from .pdbDope import PDBDope
## from Ramachandran import Ramachandran
from .colorspectrum import ColorSpectrum, ColorError, colorRange
from .matrixPlot import MatrixPlot
from biskit.core.localpath import LocalPath, LocalPathError
from biskit.core.dictlist import DictList
## ## PVM-dependent modules
## from QualMaster import QualMaster
## from StructureMaster import StructMaster
## from StructureSlave import StructureSlave
## from TrajFlexMaster import TrajFlexMaster, FlexError
except Exception as why:
logging.warning('Could not import all biskit modules: ' + repr(why))
raise
## clean up namespace
del logging
|
gpl-3.0
| -3,805,283,938,140,448,000 | 31.657895 | 72 | 0.735294 | false |
smallyear/linuxLearn
|
salt/salt/modules/chronos.py
|
1
|
2964
|
# -*- coding: utf-8 -*-
'''
Module providing a simple management interface to a chronos cluster.
Currently this only works when run through a proxy minion.
.. versionadded:: 2015.8.2
'''
from __future__ import absolute_import
import json
import logging
import salt.utils
import salt.utils.http
__proxyenabled__ = ['chronos']
log = logging.getLogger(__file__)
def __virtual__():
# only valid in proxy minions for now
return salt.utils.is_proxy() and 'proxy' in __opts__
def _base_url():
'''
Return the proxy configured base url.
'''
base_url = "http://locahost:4400"
if 'proxy' in __opts__:
base_url = __opts__['proxy'].get('base_url', base_url)
return base_url
def _jobs():
'''
Return the currently configured jobs.
'''
response = salt.utils.http.query(
"{0}/scheduler/jobs".format(_base_url()),
decode_type='json',
decode=True,
)
jobs = {}
for job in response['dict']:
jobs[job.pop('name')] = job
return jobs
def jobs():
'''
Return a list of the currently installed job names.
CLI Example:
.. code-block:: bash
salt chronos-minion-id chronos.jobs
'''
job_names = _jobs().keys()
job_names.sort()
return {'jobs': job_names}
def has_job(name):
'''
Return whether the given job is currently configured.
CLI Example:
.. code-block:: bash
salt chronos-minion-id chronos.has_job my-job
'''
return name in _jobs()
def job(name):
'''
Return the current server configuration for the specified job.
CLI Example:
.. code-block:: bash
salt chronos-minion-id chronos.job my-job
'''
jobs = _jobs()
if name in jobs:
return {'job': jobs[name]}
return None
def update_job(name, config):
'''
Update the specified job with the given configuration.
CLI Example:
.. code-block:: bash
salt chronos-minion-id chronos.update_job my-job '<config yaml>'
'''
if 'name' not in config:
config['name'] = name
data = json.dumps(config)
try:
response = salt.utils.http.query(
"{0}/scheduler/iso8601".format(_base_url()),
method='POST',
data=data,
header_dict={
'Content-Type': 'application/json',
},
)
log.debug('update response: %s', response)
return {'success': True}
except Exception as ex:
log.error('unable to update chronos job: %s', ex.message)
return {
'exception': {
'message': ex.message,
}
}
def rm_job(name):
'''
Remove the specified job from the server.
CLI Example:
.. code-block:: bash
salt chronos-minion-id chronos.rm_job my-job
'''
response = salt.utils.http.query(
"{0}/scheduler/job/{1}".format(_base_url(), name),
method='DELETE',
)
return True
|
apache-2.0
| -3,986,236,424,849,965,000 | 21.454545 | 72 | 0.575236 | false |
CottageLabs/OpenArticleGauge
|
openarticlegauge/models.py
|
1
|
30181
|
"""
Data model objects, some of which extend the DAO for storage purposes
"""
import json, redis, logging
from datetime import datetime
from openarticlegauge import config
from openarticlegauge.dao import DomainObject
from openarticlegauge.slavedriver import celery
from werkzeug import generate_password_hash, check_password_hash
from flask.ext.login import UserMixin
log = logging.getLogger(__name__)
class ModelException(Exception):
"""
Exception to be thrown when there is a problem constructing or manipulating model objects
"""
def __init__(self, message):
self.message = message
super(ModelException, self).__init__(self, message)
class LookupException(Exception):
"""
Exception to be thrown when there is a problem looking up a record
"""
def __init__(self, message):
self.message = message
super(LookupException, self).__init__(self, message)
class BufferException(Exception):
"""
Exception to be thrown when there is a problem with the storage buffer
"""
def __init__(self, message):
self.message = message
super(BufferException, self).__init__(self, message)
class Account(DomainObject, UserMixin):
__type__ = 'account'
@classmethod
def pull_by_email(cls, email):
res = cls.query(q='email:"' + email + '"')
if res.get('hits',{}).get('total',0) == 1:
return cls(**res['hits']['hits'][0]['_source'])
else:
return None
def set_password(self, password):
self.data['password'] = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.data['password'], password)
@property
def email(self):
return self.data.get("email")
@property
def is_super(self):
#return not self.is_anonymous() and self.id in app.config.get('SUPER_USER', [])
return False
class Record(DomainObject):
__type__ = 'record'
@classmethod
def check_archive(cls, identifier):
"""
Check the archive layer for an object with the given (canonical) identifier,
which can be found in the bibjson['identifier']['canonical'] field
arguments:
identifier -- the identifier of the record to look up. This should be the canonical identifier of the record
Return a bibjson record or None if none is found
"""
result = {}
if config.BUFFERING:
# before checking remote, check the buffer queue if one is enabled
log.debug("checking buffer for " + str(identifier))
result = cls._check_buffer(identifier)
if result:
log.debug(str(identifier) + " found in buffer")
if not result:
# by just making an ID and GETting and POSTing to it, we can do things faster.
log.debug("checking remote archive for " + str(identifier))
_id = identifier.replace('/','_')
result = cls.pull(_id)
if result:
log.debug(str(identifier) + " found in remote archive")
else:
log.debug(str(identifier) + " did not yield a result in the archive")
try:
return result.data
except:
return result
@classmethod
def store(cls, bibjson):
"""
Store the provided bibjson record in the archive (overwriting any item which
has the same canonical identifier). Depending on the configuration, this method
may put the item into a buffer to be written out to storage at some later time
in the future.
arguments:
bibjson -- the bibjson record to be stored. The record must contain a canonical identifier in ['identifier'][n]['canonical']
"""
# normalise the canonical identifier for elastic search
identifier = None # record this for logging
for idobj in bibjson.get('identifier',[]):
if 'canonical' in idobj.keys():
bibjson['id'] = idobj['canonical'].replace('/','_')
identifier = idobj['canonical']
if config.BUFFERING:
log.info("placing item " + identifier + " into the storage buffer")
# just add to the buffer, no need to actually save anything
cls._add_to_buffer(bibjson)
return
else:
log.info("placing item " + identifier + " directly into storage")
log.debug("saving bibjson: "+ str(bibjson))
# no buffering, just save this one record
r = cls(**bibjson)
r.save()
@classmethod
def _add_to_buffer(cls, bibjson):
"""
add the given bibjson record to the storage buffer
arguments:
bibjson -- the bibjson record to be stored. The record must contain a canonical identifier in ['identifier'][n]['canonical']
"""
canonical = None
for identifier in bibjson.get("identifier", []):
if "canonical" in identifier:
canonical = identifier['canonical']
break
if canonical is None:
raise BufferException("cannot buffer an item without a canonical form of the identifier")
client = redis.StrictRedis(host=config.REDIS_BUFFER_HOST, port=config.REDIS_BUFFER_PORT, db=config.REDIS_BUFFER_DB)
s = json.dumps(bibjson)
client.set("id_" + canonical, s)
@classmethod
def _check_buffer(cls, canonical):
"""
Check the storage buffer for an item identified by the supplied canonical identifier
arguments:
identifier -- the identifier of the record to look up. This should be the canonical identifier of the record
Return a bibjson record or None if none is found
"""
# query the redis cache for the bibjson record and return it
client = redis.StrictRedis(host=config.REDIS_BUFFER_HOST, port=config.REDIS_BUFFER_PORT, db=config.REDIS_BUFFER_DB)
record = client.get("id_" + canonical)
if record is None or record == "":
return None
return json.loads(record)
@classmethod
def flush_buffer(cls, key_timeout=0, block_size=1000):
"""
Flush the current storage buffer out to the long-term storage (Elasticsearch). This method
will return after all of the records have been flushed successfully to storage, and will
not wait until the key_timeout period has passed (this will happen asynchronously)
keyword arguments:
key_timeout -- the length of time to live (in seconds) to allocate to each record in the storage buffer. This is to
allow Elasticsearch time to receive and index the records and make them available - while it is
doing that, the records will remain available in the storage buffer
block_size -- maximum size of the list of records to send to the long-term storage in one HTTP request.
if there are more records than the block size, multiple HTTP requests will be made, none of which
will be larger than the block_size.
returns:
False if there is nothing in the buffer to flush
True if there are items in the buffer to flush and they are successfully flushed
"""
client = redis.StrictRedis(host=config.REDIS_BUFFER_HOST, port=config.REDIS_BUFFER_PORT, db=config.REDIS_BUFFER_DB)
# get all of the id keys
ids = client.keys("id_*")
if len(ids) == 0:
log.info("storage buffer contains 0 items to be flushed ... returning")
return False
log.info("flushing storage buffer of " + str(len(ids)) + " objects")
# retrieve all of the bibjson records associated with those keys
bibjson_records = []
i = 0
for identifier in ids:
# obtain, decode and register the bibjson record to be archived
s = client.get(identifier)
obj = json.loads(s)
bibjson_records.append(obj)
# if we've reached the block size, do a bulk write
i += 1
if i >= block_size:
# bulk load the records
cls.bulk(bibjson_records)
# reset the registers
i = 0
bibjson_records = []
if len(bibjson_records) > 0:
# bulk load the remaining records
cls.bulk(bibjson_records)
# set a timeout on the identifiers affected, if desired. If the key_timeout is 0, this is effectively
# the same as deleting those keys
for identifier in ids:
client.expire(identifier, key_timeout)
return True
class Issue(DomainObject):
__type__ = 'issue'
class Log(DomainObject):
__type__ = 'log'
class Error(DomainObject):
__type__ = "error"
class License(DomainObject):
__type__ = 'license'
class LicenseStatement(DomainObject):
__type__ = 'license_statement'
@property
def edit_id(self): return self.data['id']
@property
def license_statement(self): return self.data.get("license_statement")
@license_statement.setter
def license_statement(self, value): self.data['license_statement'] = value
@property
def license_type(self): return self.data.get("license_type")
@license_type.setter
def license_type(self, value): self.data['license_type'] = value
@property
def version(self): return self.data.get("version")
@version.setter
def version(self, value): self.data['version'] = value
@property
def example_doi(self): return self.data.get("example_doi")
@example_doi.setter
def example_doi(self, value): self.data['example_doi'] = value
@classmethod
def find_by_statement(cls, statement):
return cls.q2obj(terms={'license_statement.exact': [statement]}, size=1000000, consistent_order=True)
def save(self, **kwargs):
t = self.find_by_statement(self.license_statement)
if len(t) == 1:
# just one such statement exists - edit it instead
self.data['id'] = t[0]['id']
super(LicenseStatement, self).save(**kwargs)
class Publisher(DomainObject):
__type__ = 'publisher'
@property
def journal_urls(self): return self.data.get("journal_urls", [])
@journal_urls.setter
def journal_urls(self, data): self.data['journal_urls'] = data
@property
def publisher_name(self): return self.data.get("publisher_name", '')
@publisher_name.setter
def publisher_name(self, value): self.data['publisher_name'] = value
@property
def licenses(self):
return self.data.get('licenses', [])
@licenses.setter
def licenses(self, data): self.data['licenses'] = data
def add_license(self, lobj):
lics = self.licenses
lics.append(lobj)
def remove_license(self, license_statement):
lics = self.licenses
try:
del lics[license_statement]
except KeyError:
pass
@classmethod
def all_journal_urls(cls):
return cls.facets2flatlist(
facets= { 'journal_urls': { "field": "journal_urls.exact", "size": 10000 } },
size=0
)['journal_urls']
@classmethod
def find_by_journal_url(cls, url):
return cls.q2obj(terms={'journal_urls.exact': [url]}, size=1000000, consistent_order=True)
class ResultSet(object):
"""
Model object to represent the return object from the API. It represents the following data structure:
{
"requested" : number_requested_in_batch,
"results" : [
the list of bibjson record objects already known
],
"errors" : [
a list of JSON objects with an "identifier" key and an "error" key
],
"processing" : [
a list of bibjson identifier objects that are on the queue
]
}
"""
def __init__(self, bibjson_ids=[]):
"""
Construct a new ResultSet object around the list of bibjson_ids supplied to the API
arguments
bibjson_ids -- list of bibjson identifier objects
"""
self.requested = len(bibjson_ids)
self.results = []
self.errors = []
self.processing = []
self.bibjson_ids = bibjson_ids
def add_result_record(self, record):
"""
Add the given record to the result set. This will inspect the content of the record and
add it to the appropriate part of the response
arguments
record -- OAG record object. See the high level documentation for details on its structure
"""
# get the bibjson if it exists
bibjson = self._get_bibjson(record)
# now find out if it is queued or if the bibjson record is None
# and use this information to increment the counters
# if record.get("error") is not None:
if record.has_error():
# self.errors.append({"identifier" : record.get('identifier'), "error" : record.error})
self.errors.append({"identifier" : record.identifier, "error" : record.error})
# elif record.get('queued', False) or bibjson is None:
elif record.queued or bibjson is None:
# self.processing.append({"identifier" : record.get('identifier')})
self.processing.append({"identifier" : record.identifier })
else:
self.results.append(bibjson)
def json(self):
"""
Get a JSON representation of this object
returns a JSON serialisation of the object
"""
obj = {
"requested" : self.requested,
"results" : self.results,
"errors" : self.errors,
"processing" : self.processing
}
return json.dumps(obj)
def _get_bibjson(self, record):
"""
Get the bibjson from the supplied record. This involves finding the bibjson
record in record['bibjson'] and reconciling this with any identifiers in record['identifier']
to ensure that all relevant identifiers are represented.
arguments:
record -- OAG record object. See the high level documentation for details on its structure
"""
# first get the bibjson record
# bibjson = record.get('bibjson')
bibjson = record.bibjson
if bibjson is None:
return None
# ensure that the identifier is in the bibjson record
# FIXME: this is pretty blunt, could be a lot smarter, and ultimately unnecessary anyway
if not bibjson.has_key("identifier"):
bibjson["identifier"] = []
found = False
for identifier in bibjson['identifier']:
# if identifier.has_key("canonical") and identifier['canonical'] == record['identifier']['canonical']:
if identifier.has_key("canonical") and identifier['canonical'] == record.canonical:
found = True
break
if not found:
# bibjson['identifier'].append(record['identifier'])
bibjson['identifier'].append(record.identifier)
return bibjson
class MessageObject(object):
"""
{
"identifier" : {
"id" : "<raw id provided by the client>",
"type" : "<type of identifier, e.g doi or pmid>",
"canonical" : "<canonical form of the identifier>"
},
"queued" : True/False,
"provider" : {
"url" : ["<provider url, e.g. dereferenced doi>", "..."],
"doi" : "<provider doi>"
},
"bibjson" : {<bibjson object - see http://bibjson.org>},
"error" : "<any error message>",
"licensed" : True/False
}
"licensed" allows us to tell in the back end if the record had a licence
attached to it prior to being stripped down to be sent into the back-end.
"""
def __init__(self, bid=None, record=None, bibjson=None):
"""
Create a new Message object using combinations of the provided arguments.
bid alone creates a new record with that identifier
record alone seeds this object with a full record object
bibjson alone seeds this object with a record containing that bibjson
record + bibjson seeds this object with the provided record, but its bibjson is overwritten
arguments
bid -- bibjson identifier object or just an id string
record -- full internal representation of the message object
bibjson -- bibjson record
"""
self.record = None
if bid:
if isinstance(bid, dict):
if "id" not in bid:
raise ModelException("MessageObject must be constructed with an id, or a valid bibjson identifier")
else:
bid = {"id" : bid}
self.record = { "identifier" : bid }
if record:
self.record = record
if bibjson:
if self.record is None:
self.record = {}
self.record["bibjson"] = bibjson
if self.record is None:
self.record = {}
###############################################
## Representation functions
###############################################
def json(self, **kwargs):
return json.dumps(self.record, **kwargs)
def __str__(self):
return str(self.record)
###############################################
## Operational functions
###############################################
def merge(self, bibjson):
ls = bibjson.get("license", [])
for l in ls:
self.add_license_object(l)
def prep_for_backend(self):
self.set_licensed_flag()
self.remove_bibjson()
return self.record
def add_identifier_to_bibjson(self):
"""
Take the supplied bibjson identifier object and ensure that it has been added
to the supplied bibjson object. The bibjson object may already contain the
identifier object, in which case this method will not make any changes.
"""
"""
# FIXME: this is pretty blunt, could be a lot smarter
if not bibjson.has_key("identifier"):
bibjson["identifier"] = []
found = False
for identifier in bibjson['identifier']:
if identifier.has_key("canonical") and identifier['canonical'] == bibjson['identifier']['canonical']:
found = True
break
if not found:
bibjson['identifier'].append(identifier)
"""
# prep the bibjson record to receive an identifier
if "bibjson" not in self.record:
self.record["bibjson"] = {}
if "identifier" not in self.record["bibjson"]:
self.record["bibjson"]["identifier"] = []
incoming = self.record.get("identifier", {}).get("canonical")
existing = [ident.get("canonical") for ident in self.record.get("bibjson", {}).get("identifier", []) if ident.get("canonical") is not None]
if incoming is None:
raise ModelException("can't add identifier to bibjson unless it has a canonical form")
if incoming not in existing:
self.record["bibjson"]["identifier"].append(self.record.get("identifier"))
###############################################
## Various simple property getter/setters
###############################################
# identifier stuff
@property
def identifier(self):
return self.record.get("identifier")
@property
def id(self):
return self.record.get("identifier", {}).get("id")
@id.setter
def id(self, val):
if "identifier" not in self.record:
self.record["identifier"] = {}
self.record["identifier"]["id"] = val
def has_id(self):
return "id" in self.record.get("identifier", {})
@property
def identifier_type(self):
return self.record.get("identifier", {}).get("type")
@identifier_type.setter
def identifier_type(self, type):
if "identifier" not in self.record:
self.record["identifier"] = {}
self.record["identifier"]["type"] = type
def has_type(self):
return "type" in self.record.get("identifier", {})
@property
def canonical(self):
return self.record.get("identifier", {}).get("canonical")
@canonical.setter
def canonical(self, canonical):
if "identifier" not in self.record:
self.record["identifier"] = {}
self.record["identifier"]["canonical"] = canonical
# queue
@property
def queued(self):
return self.record.get("queued", False)
@queued.setter
def queued(self, val):
self.record["queued"] = val
# provider
@property
def provider(self):
return self.record.get("provider")
def has_provider(self):
return "provider" in self.record
@property
def provider_doi(self):
return self.record.get("provider", {}).get("doi")
@property
def doi_without_prefix(self):
doi = self.provider_doi
# is it a string? could get None too
# convention dictates to test for capability, not identity though
if getattr(doi, 'startswith', None):
# if it can be lowercased, then do it - higher chance of matching the prefix
if getattr(doi, 'lower', None):
if doi.lower().startswith('doi:'):
return doi[4:]
# we can use startswith, but can't lowercase it (?!) - just check for the prefix
if doi.startswith('doi:') or doi.startswith('DOI:'):
return doi[4:]
return doi
@property
def provider_urls(self):
return self.record.get("provider", {}).get("url", [])
def add_provider_url(self, url):
"""
Record a provider url in the record
arguments:
url -- the url to be added to the provider record
"""
if not "provider" in self.record:
self.record['provider'] = {}
if not "url" in self.record["provider"]:
self.record["provider"]["url"] = []
if url not in self.record['provider']['url']:
self.record['provider']['url'].append(url)
def add_provider_urls(self, urls):
"""
Record a list of provider urls in the record
arguments:
urls -- the urls to be added to the provider record
"""
for url in urls:
self.add_provider_url(url)
def set_provider_doi(self, doi):
"""
Record a DOI in the provider part of the record
arguments:
doi -- the doi to be added to the provider record
"""
if not "provider" in self.record:
self.record['provider'] = {}
self.record["provider"]["doi"] = doi
# bibjson
@property
def bibjson(self):
return self.record.get("bibjson")
@bibjson.setter
def bibjson(self, bj):
self.record["bibjson"] = bj
def has_bibjson(self):
return "bibjson" in self.record
def remove_bibjson(self):
if "bibjson" in self.record:
del self.record["bibjson"]
# error
@property
def error(self):
return self.record.get("error")
@error.setter
def error(self, val):
self.record["error"] = val
def has_error(self):
return "error" in self.record
# licensed flag
def set_licensed_flag(self):
self.record["licensed"] = len(self.record.get("bibjson", {}).get("license", [])) > 0
def was_licensed(self):
return self.record.get("licensed", False)
# license specifically
@property
def license(self):
return self.record.get("bibjson", {}).get("license", [])
def has_license(self):
return "license" in self.record.get("bibjson", {}) and len(self.record.get("bibjson", {}).get("license", [])) > 0
def add_license_object(self, license):
if "bibjson" not in self.record:
self.record["bibjson"] = {}
if "license" not in self.record['bibjson']:
self.record['bibjson']['license'] = []
self.record['bibjson']['license'].append(license)
def add_license(self,
description="",
title="",
url="",
version="",
jurisdiction="",
type="",
open_access=False,
BY="",
NC="",
ND="",
SA="",
error_message="",
suggested_solution="",
category="",
provenance_description="",
agent=config.agent,
source="",
source_size=-1,
date=datetime.strftime(datetime.now(), config.date_format),
handler="",
handler_version=""):
"""
Add a licence with the supplied keyword parameters to the record in the appropriate format.
The format of the licence is as follows:
{
"description": "",
"title": "",
"url": licence_url,
"version": "",
"jurisdiction": "",
"type": "failed-to-obtain-license",
"open_access": False,
"BY": "",
"NC": "",
"ND": "",
"SA": "",
"error_message": why,
"suggested_solution": suggested_solution,
"provenance": {
"category": "page_scrape",
"description": self.gen_provenance_description_fail(source_url),
"agent": config.agent,
"source": source_url,
"source_size" : source_size,
"date": datetime.strftime(datetime.now(), config.date_format),
"handler" : self._short_name,
"handler_version" : self.__version__
}
}
keyword_arguments:
see the top level documentation for details on the meaning of each field - they map consistently to the parts
of the licence record
"""
if "bibjson" not in self.record:
self.record["bibjson"] = {}
if "license" not in self.record['bibjson']:
self.record['bibjson']['license'] = []
self.record['bibjson']['license'].append(
{
"description": description,
"title": title,
"url": url,
"version": version,
"jurisdiction": jurisdiction,
"type": type,
"open_access": open_access,
"BY": BY,
"NC": NC,
"ND": ND,
"SA": SA,
"error_message": error_message,
"suggested_solution": suggested_solution,
"provenance": {
"category": category,
"description": provenance_description,
"agent": agent,
"source": source,
"source_size" : source_size,
"date": date,
"handler" : handler,
"handler_version" : handler_version
}
}
)
@celery.task(name="openarticlegauge.models.flush_buffer")
def flush_buffer():
"""
Celery task for flushing the storage buffer. This should be promoted onto a
processing queue by Celery Beat (see the celeryconfig). This will process will
lock the buffer so that parallel execution is not possible.
returns
False if no buffering is necessary (configuration) or possible (locked)
True if buffering has been handled
"""
# if we are not buffering, don't do anything
if not config.BUFFERING:
log.info("BUFFERING = False ; flush_buffer is superfluous, aborting")
return False
# check to see if we are already running a buffering process
client = redis.StrictRedis(host=config.REDIS_BUFFER_HOST, port=config.REDIS_BUFFER_PORT, db=config.REDIS_BUFFER_DB)
lock = client.get("flush_buffer_lock")
if lock is not None:
log.warn("flush_buffer ran before previous iteration had completed - consider increasing the gaps between the run times for this scheduled task")
return False
# if we are not already running the buffering process, then carry on...
# set a lock on this process, so that it doesn't run twice at the same time (see checks above)
client.set("flush_buffer_lock", "lock")
# call flush on the record objects that are buffered
Record.flush_buffer(key_timeout=config.BUFFER_GRACE_PERIOD, block_size=config.BUFFER_BLOCK_SIZE)
# set an expiry time on the lock, which is consistent with the expiry time applied to the
# buffered items. This means this method will only run again once all the previously buffered
# items have been removed from the buffer zone.
client.expire("flush_buffer_lock", config.BUFFER_GRACE_PERIOD)
# return true to indicate that the function ran
return True
|
bsd-3-clause
| -2,530,888,789,660,063,000 | 33.650976 | 153 | 0.565753 | false |
lavish205/olympia
|
src/olympia/amo/celery.py
|
1
|
6347
|
"""Loads and instantiates Celery, registers our tasks, and performs any other
necessary Celery-related setup. Also provides Celery-related utility methods,
in particular exposing a shortcut to the @task decorator."""
from __future__ import absolute_import
import datetime
from django.conf import settings
from django.core.cache import cache
from celery import Celery, group
from celery.signals import task_failure, task_postrun, task_prerun
from django_statsd.clients import statsd
from kombu import serialization
from post_request_task.task import (
PostRequestTask, _start_queuing_tasks, _send_tasks_and_stop_queuing)
from raven import Client
from raven.contrib.celery import register_logger_signal, register_signal
import olympia.core.logger
from olympia.amo.utils import chunked, utc_millesecs_from_epoch
log = olympia.core.logger.getLogger('z.task')
class AMOTask(PostRequestTask):
"""A custom celery Task base class that inherits from `PostRequestTask`
to delay tasks and adds a special hack to still perform a serialization
roundtrip in eager mode, to mimic what happens in production in tests.
The serialization is applied both to apply_async() and apply() to work
around the fact that celery groups have their own apply_async() method that
directly calls apply() on each task in eager mode.
Note that we should never somehow be using eager mode with actual workers,
that would cause them to try to serialize data that has already been
serialized...
"""
abstract = True
def _serialize_args_and_kwargs_for_eager_mode(
self, args=None, kwargs=None, **options):
producer = options.get('producer')
with app.producer_or_acquire(producer) as eager_producer:
serializer = options.get(
'serializer', eager_producer.serializer
)
body = args, kwargs
content_type, content_encoding, data = serialization.dumps(
body, serializer
)
args, kwargs = serialization.loads(
data, content_type, content_encoding
)
return args, kwargs
def apply_async(self, args=None, kwargs=None, **options):
if app.conf.task_always_eager:
args, kwargs = self._serialize_args_and_kwargs_for_eager_mode(
args=args, kwargs=kwargs, **options)
return super(AMOTask, self).apply_async(
args=args, kwargs=kwargs, **options)
def apply(self, args=None, kwargs=None, **options):
if app.conf.task_always_eager:
args, kwargs = self._serialize_args_and_kwargs_for_eager_mode(
args=args, kwargs=kwargs, **options)
return super(AMOTask, self).apply(args=args, kwargs=kwargs, **options)
app = Celery('olympia', task_cls=AMOTask)
task = app.task
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
# Hook up Sentry in celery.
raven_client = Client(settings.SENTRY_DSN)
# register a custom filter to filter out duplicate logs
register_logger_signal(raven_client)
# hook into the Celery error handler
register_signal(raven_client)
# After upgrading raven we can specify loglevel=logging.INFO to override
# the default (which is ERROR).
register_logger_signal(raven_client)
@task_failure.connect
def process_failure_signal(exception, traceback, sender, task_id,
signal, args, kwargs, einfo, **kw):
"""Catch any task failure signals from within our worker processes and log
them as exceptions, so they appear in Sentry and ordinary logging
output."""
exc_info = (type(exception), exception, traceback)
log.error(
u'Celery TASK exception: {0.__name__}: {1}'.format(*exc_info),
exc_info=exc_info,
extra={
'data': {
'task_id': task_id,
'sender': sender,
'args': args,
'kwargs': kwargs
}
})
@task_prerun.connect
def start_task_timer(task_id, task, **kw):
timer = TaskTimer()
log.info('starting task timer; id={id}; name={name}; '
'current_dt={current_dt}'
.format(id=task_id, name=task.name,
current_dt=timer.current_datetime))
# Cache start time for one hour. This will allow us to catch crazy long
# tasks. Currently, stats indexing tasks run around 20-30 min.
expiration = 60 * 60
cache.set(timer.cache_key(task_id), timer.current_epoch_ms, expiration)
@task_postrun.connect
def track_task_run_time(task_id, task, **kw):
timer = TaskTimer()
start_time = cache.get(timer.cache_key(task_id))
if start_time is None:
log.info('could not track task run time; id={id}; name={name}; '
'current_dt={current_dt}'
.format(id=task_id, name=task.name,
current_dt=timer.current_datetime))
else:
run_time = timer.current_epoch_ms - start_time
log.info('tracking task run time; id={id}; name={name}; '
'run_time={run_time}; current_dt={current_dt}'
.format(id=task_id, name=task.name,
current_dt=timer.current_datetime,
run_time=run_time))
statsd.timing('tasks.{}'.format(task.name), run_time)
cache.delete(timer.cache_key(task_id))
class TaskTimer(object):
def __init__(self):
self.current_datetime = datetime.datetime.now()
self.current_epoch_ms = utc_millesecs_from_epoch(
self.current_datetime)
def cache_key(self, task_id):
return 'task_start_time.{}'.format(task_id)
def create_subtasks(task, qs, chunk_size, countdown=None, task_args=None):
"""
Splits a task depending on a queryset into a bunch of subtasks of the
specified chunk_size, passing a chunked queryset and optional additional
arguments to each."""
if task_args is None:
task_args = ()
job = group([
task.subtask(args=(chunk,) + task_args)
for chunk in chunked(qs, chunk_size)
])
if countdown is not None:
job.apply_async(countdown=countdown)
else:
job.apply_async()
def pause_all_tasks():
_start_queuing_tasks()
def resume_all_tasks():
_send_tasks_and_stop_queuing()
|
bsd-3-clause
| -9,087,680,736,191,423,000 | 33.308108 | 79 | 0.648968 | false |
multiplechoice/workplace
|
jobs/spiders/tvinna.py
|
1
|
1467
|
import dateutil.parser
import scrapy
import scrapy.spiders
from jobs.common import clean_html
from jobs.items import JobsItem
class TvinnaSpider(scrapy.spiders.XMLFeedSpider):
name = "tvinna"
start_urls = ["http://www.tvinna.is/feed/?post_type=job_listing"]
itertag = "item"
namespaces = [
("atom", "http://www.w3.org/2005/Atom"),
("content", "http://purl.org/rss/1.0/modules/content/"),
("dc", "http://purl.org/dc/elements/1.1/"),
("slash", "http://purl.org/rss/1.0/modules/slash/"),
("sy", "http://purl.org/rss/1.0/modules/syndication/"),
("wfw", "http://wellformedweb.org/CommentAPI/"),
]
def parse_node(self, response, node):
item = JobsItem()
item["spider"] = self.name
item["title"] = node.xpath("title/text()").extract_first()
item["url"] = url = node.xpath("link/text()").extract_first()
time_posted = node.xpath("pubDate/text()").extract_first()
item["posted"] = dateutil.parser.parse(time_posted).isoformat()
item["description"] = clean_html(
node.xpath("content:encoded/text()").extract_first()
)
request = scrapy.Request(url, callback=self.parse_specific_job)
request.meta["item"] = item
yield request
def parse_specific_job(self, response):
item = response.meta["item"]
item["company"] = response.css(".company a::text").extract_first()
yield item
|
apache-2.0
| 836,780,782,285,987,300 | 35.675 | 74 | 0.608044 | false |
g3rd/django-akamai-storage
|
akamai_storage/forms/models.py
|
1
|
1830
|
from django.forms.models import BaseModelForm, ErrorList, model_to_dict
class AkamaiBaseModelForm(BaseModelForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(data, files, auto_id, prefix, object_data,
error_class, label_suffix, empty_permitted)
# Apply ``limit_choices_to`` to each field.
for field_name in self.fields:
formfield = self.fields[field_name]
if hasattr(formfield, 'queryset'):
limit_choices_to = formfield.limit_choices_to
if limit_choices_to is not None:
if callable(limit_choices_to):
limit_choices_to = limit_choices_to()
formfield.queryset = formfield.queryset.complex_filter(limit_choices_to)
|
mit
| -6,193,288,522,580,875,000 | 49.833333 | 92 | 0.603279 | false |
foolcage/fooltrader
|
fooltrader/spiders/america/sp500_spider.py
|
1
|
3418
|
# -*- coding: utf-8 -*-
import pandas as pd
import scrapy
from scrapy import Request
from scrapy import Selector
from scrapy import signals
from fooltrader.contract.files_contract import get_kdata_path
from fooltrader.utils.utils import index_df_with_time, to_time_str, to_float
class Sp500Spider(scrapy.Spider):
name = "sp500_spider"
def __init__(self, name=None, **kwargs):
super().__init__(name, **kwargs)
self.security_item = {'id': 'index_nasdaq_sp500',
'code': 'SP500',
'name': 'SP500',
'listDate': '1871-01-01',
'timestamp': '1871-01-01',
'exchange': 'nasdaq',
'type': 'index'}
self.df_close = pd.DataFrame()
self.df_pe = pd.DataFrame()
def start_requests(self):
pe_url = 'http://www.multpl.com/table?f=m'
price_url = 'http://www.multpl.com/s-p-500-historical-prices/table/by-month'
yield Request(url=pe_url,
callback=self.download_sp500_pe)
yield Request(url=price_url,
callback=self.download_sp500_price)
def download_sp500_price(self, response):
trs = response.xpath('//*[@id="datatable"]/tr').extract()
price_jsons = []
try:
for tr in trs[1:]:
tds = Selector(text=tr).xpath('//td//text()').extract()
tds = [x.strip() for x in tds if x.strip()]
price_jsons.append({"timestamp": to_time_str(tds[0]),
"close": to_float(tds[1])})
if price_jsons:
self.df_close = self.df_close.append(price_jsons, ignore_index=True)
self.df_close = index_df_with_time(self.df_close)
except Exception as e:
self.logger.exception('error when getting sp500 price url={} error={}'.format(response.url, e))
def download_sp500_pe(self, response):
trs = response.xpath('//*[@id="datatable"]/tr').extract()
price_jsons = []
try:
for tr in trs[1:]:
tds = Selector(text=tr).xpath('//td//text()').extract()
tds = [x.strip() for x in tds if x.strip()]
price_jsons.append({"timestamp": to_time_str(tds[0]),
"pe": to_float(tds[1])})
if price_jsons:
self.df_pe = self.df_pe.append(price_jsons, ignore_index=True)
self.df_pe = index_df_with_time(self.df_pe)
except Exception as e:
self.logger.exception('error when getting sp500 pe url={} error={}'.format(response.url, e))
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(Sp500Spider, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
return spider
def spider_closed(self, spider, reason):
self.df_pe['close'] = self.df_close['close']
self.df_pe['code'] = self.security_item['code']
self.df_pe['securityId'] = self.security_item['id']
self.df_pe['name'] = self.security_item['name']
self.df_pe.to_csv(get_kdata_path(self.security_item), index=False)
spider.logger.info('Spider closed: %s,%s\n', spider.name, reason)
|
mit
| -1,803,275,329,661,801,200 | 37.840909 | 107 | 0.547689 | false |
Azure/azure-sdk-for-python
|
sdk/core/azure-core/tests/testserver_tests/test_rest_http_request.py
|
1
|
11042
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# -------------------------------------------------------------------------
# NOTE: These tests are heavily inspired from the httpx test suite: https://github.com/encode/httpx/tree/master/tests
# Thank you httpx for your wonderful tests!
import io
import pytest
import sys
import collections
from typing import Generator
from azure.core.rest import HttpRequest
@pytest.fixture
def assert_iterator_body():
def _comparer(request, final_value):
content = b"".join([p for p in request.content])
assert content == final_value
return _comparer
def test_request_repr():
request = HttpRequest("GET", "http://example.org")
assert repr(request) == "<HttpRequest [GET], url: 'http://example.org'>"
def test_no_content():
request = HttpRequest("GET", "http://example.org")
assert "Content-Length" not in request.headers
def test_content_length_header():
request = HttpRequest("POST", "http://example.org", content=b"test 123")
assert request.headers["Content-Length"] == "8"
def test_iterable_content(assert_iterator_body):
class Content:
def __iter__(self):
yield b"test 123" # pragma: nocover
request = HttpRequest("POST", "http://example.org", content=Content())
assert request.headers == {}
assert_iterator_body(request, b"test 123")
def test_generator_with_transfer_encoding_header(assert_iterator_body):
def content():
yield b"test 123" # pragma: nocover
request = HttpRequest("POST", "http://example.org", content=content())
assert request.headers == {}
assert_iterator_body(request, b"test 123")
def test_generator_with_content_length_header(assert_iterator_body):
def content():
yield b"test 123" # pragma: nocover
headers = {"Content-Length": "8"}
request = HttpRequest(
"POST", "http://example.org", content=content(), headers=headers
)
assert request.headers == {"Content-Length": "8"}
assert_iterator_body(request, b"test 123")
def test_url_encoded_data():
request = HttpRequest("POST", "http://example.org", data={"test": "123"})
assert request.headers["Content-Type"] == "application/x-www-form-urlencoded"
assert request.content == {'test': '123'} # httpx makes this just b'test=123'. set_formdata_body is still keeping it as a dict
def test_json_encoded_data():
request = HttpRequest("POST", "http://example.org", json={"test": 123})
assert request.headers["Content-Type"] == "application/json"
assert request.content == '{"test": 123}'
def test_headers():
request = HttpRequest("POST", "http://example.org", json={"test": 123})
assert request.headers == {
"Content-Type": "application/json",
"Content-Length": "13",
}
def test_ignore_transfer_encoding_header_if_content_length_exists():
"""
`Transfer-Encoding` should be ignored if `Content-Length` has been set explicitly.
See https://github.com/encode/httpx/issues/1168
"""
def streaming_body(data):
yield data # pragma: nocover
data = streaming_body(b"abcd")
headers = {"Content-Length": "4"}
request = HttpRequest("POST", "http://example.org", data=data, headers=headers)
assert "Transfer-Encoding" not in request.headers
assert request.headers["Content-Length"] == "4"
def test_override_accept_encoding_header():
headers = {"Accept-Encoding": "identity"}
request = HttpRequest("GET", "http://example.org", headers=headers)
assert request.headers["Accept-Encoding"] == "identity"
"""Test request body"""
def test_empty_content():
request = HttpRequest("GET", "http://example.org")
assert request.content is None
def test_string_content():
request = HttpRequest("PUT", "http://example.org", content="Hello, world!")
assert request.headers == {"Content-Length": "13", "Content-Type": "text/plain"}
assert request.content == "Hello, world!"
# Support 'data' for compat with requests.
request = HttpRequest("PUT", "http://example.org", data="Hello, world!")
assert request.headers == {"Content-Length": "13", "Content-Type": "text/plain"}
assert request.content == "Hello, world!"
# content length should not be set for GET requests
request = HttpRequest("GET", "http://example.org", data="Hello, world!")
assert request.headers == {"Content-Length": "13", "Content-Type": "text/plain"}
assert request.content == "Hello, world!"
@pytest.mark.skipif(sys.version_info < (3, 0),
reason="In 2.7, b'' is the same as a string, so will have text/plain content type")
def test_bytes_content():
request = HttpRequest("PUT", "http://example.org", content=b"Hello, world!")
assert request.headers == {"Content-Length": "13"}
assert request.content == b"Hello, world!"
# Support 'data' for compat with requests.
request = HttpRequest("PUT", "http://example.org", data=b"Hello, world!")
assert request.headers == {"Content-Length": "13"}
assert request.content == b"Hello, world!"
# should still be set regardless of method
request = HttpRequest("GET", "http://example.org", data=b"Hello, world!")
assert request.headers == {"Content-Length": "13"}
assert request.content == b"Hello, world!"
def test_iterator_content(assert_iterator_body):
# NOTE: in httpx, content reads out the actual value. Don't do that (yet) in azure rest
def hello_world():
yield b"Hello, "
yield b"world!"
request = HttpRequest("POST", url="http://example.org", content=hello_world())
assert isinstance(request.content, collections.Iterable)
assert_iterator_body(request, b"Hello, world!")
assert request.headers == {}
# Support 'data' for compat with requests.
request = HttpRequest("POST", url="http://example.org", data=hello_world())
assert isinstance(request.content, collections.Iterable)
assert_iterator_body(request, b"Hello, world!")
assert request.headers == {}
# transfer encoding should still be set for GET requests
request = HttpRequest("GET", url="http://example.org", data=hello_world())
assert isinstance(request.content, collections.Iterable)
assert_iterator_body(request, b"Hello, world!")
assert request.headers == {}
def test_json_content():
request = HttpRequest("POST", url="http://example.org", json={"Hello": "world!"})
assert request.headers == {
"Content-Length": "19",
"Content-Type": "application/json",
}
assert request.content == '{"Hello": "world!"}'
def test_urlencoded_content():
# NOTE: not adding content length setting and content testing bc we're not adding content length in the rest code
# that's dealt with later in the pipeline.
request = HttpRequest("POST", url="http://example.org", data={"Hello": "world!"})
assert request.headers == {
"Content-Type": "application/x-www-form-urlencoded",
}
@pytest.mark.parametrize(("key"), (1, 2.3, None))
def test_multipart_invalid_key(key):
data = {key: "abc"}
files = {"file": io.BytesIO(b"<file content>")}
with pytest.raises(TypeError) as e:
HttpRequest(
url="http://127.0.0.1:8000/",
method="POST",
data=data,
files=files,
)
assert "Invalid type for data name" in str(e.value)
assert repr(key) in str(e.value)
@pytest.mark.skipif(sys.version_info < (3, 0),
reason="In 2.7, b'' is the same as a string, so check doesn't fail")
def test_multipart_invalid_key_binary_string():
data = {b"abc": "abc"}
files = {"file": io.BytesIO(b"<file content>")}
with pytest.raises(TypeError) as e:
HttpRequest(
url="http://127.0.0.1:8000/",
method="POST",
data=data,
files=files,
)
assert "Invalid type for data name" in str(e.value)
assert repr(b"abc") in str(e.value)
@pytest.mark.parametrize(("value"), (object(), {"key": "value"}))
def test_multipart_invalid_value(value):
data = {"text": value}
files = {"file": io.BytesIO(b"<file content>")}
with pytest.raises(TypeError) as e:
HttpRequest("POST", "http://127.0.0.1:8000/", data=data, files=files)
assert "Invalid type for data value" in str(e.value)
def test_empty_request():
request = HttpRequest("POST", url="http://example.org", data={}, files={})
assert request.headers == {}
assert not request.content # in core, we don't convert urlencoded dict to bytes representation in content
def test_read_content(assert_iterator_body):
def content():
yield b"test 123"
request = HttpRequest("POST", "http://example.org", content=content())
assert_iterator_body(request, b"test 123")
# in this case, request._data is what we end up passing to the requests transport
assert isinstance(request._data, collections.Iterable)
def test_complicated_json(client):
# thanks to Sean Kane for this test!
input = {
'EmptyByte': '',
'EmptyUnicode': '',
'SpacesOnlyByte': ' ',
'SpacesOnlyUnicode': ' ',
'SpacesBeforeByte': ' Text',
'SpacesBeforeUnicode': ' Text',
'SpacesAfterByte': 'Text ',
'SpacesAfterUnicode': 'Text ',
'SpacesBeforeAndAfterByte': ' Text ',
'SpacesBeforeAndAfterUnicode': ' Text ',
'啊齄丂狛': 'ꀕ',
'RowKey': 'test2',
'啊齄丂狛狜': 'hello',
"singlequote": "a''''b",
"doublequote": 'a""""b',
"None": None,
}
request = HttpRequest("POST", "/basic/complicated-json", json=input)
r = client.send_request(request)
r.raise_for_status()
# NOTE: For files, we don't allow list of tuples yet, just dict. Will uncomment when we add this capability
# def test_multipart_multiple_files_single_input_content():
# files = [
# ("file", io.BytesIO(b"<file content 1>")),
# ("file", io.BytesIO(b"<file content 2>")),
# ]
# request = HttpRequest("POST", url="http://example.org", files=files)
# assert request.headers == {
# "Content-Length": "271",
# "Content-Type": "multipart/form-data; boundary=+++",
# }
# assert request.content == b"".join(
# [
# b"--+++\r\n",
# b'Content-Disposition: form-data; name="file"; filename="upload"\r\n',
# b"Content-Type: application/octet-stream\r\n",
# b"\r\n",
# b"<file content 1>\r\n",
# b"--+++\r\n",
# b'Content-Disposition: form-data; name="file"; filename="upload"\r\n',
# b"Content-Type: application/octet-stream\r\n",
# b"\r\n",
# b"<file content 2>\r\n",
# b"--+++--\r\n",
# ]
# )
|
mit
| -4,604,077,631,568,320,000 | 35.140984 | 131 | 0.621394 | false |
flennerhag/mlens
|
mlens/ensemble/tests/test_a_sklearn.py
|
1
|
7958
|
"""
Test Scikit-learn
"""
import numpy as np
from mlens.ensemble import SuperLearner, Subsemble, BlendEnsemble, TemporalEnsemble
from mlens.testing.dummy import return_pickled
try:
from sklearn.utils.estimator_checks import check_estimator
from sklearn.linear_model import Lasso, LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.datasets import load_boston
has_sklearn = True
except ImportError:
has_sklearn = False
if has_sklearn:
X, y = load_boston(True)
estimators = [Lasso(),
GradientBoostingRegressor(),
LinearRegression(),
KNeighborsRegressor(),
SVR(gamma='scale'),
RandomForestRegressor(n_estimators=100),
]
est_prep = {'prep1': estimators,
'prep2': estimators,
'prep3': estimators}
prep_1 = [PCA()]
prep_2 = [PolynomialFeatures(), StandardScaler()]
prep = {'prep1': prep_1,
'prep2': prep_2,
'prep3': []}
def get_ensemble(cls, backend, preprocessing, **kwargs):
"""Get ensemble."""
if preprocessing:
est = est_prep
else:
est = estimators
ens = cls(backend=backend, **kwargs)
ens.add(est, preprocessing)
ens.add(LinearRegression(), meta=True)
return ens
def test_super_learner_s_m():
"""[SuperLearner] Test scikit-learn comp - mp | np"""
ens = get_ensemble(SuperLearner, 'multiprocessing', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_super_learner_f_m():
"""[SuperLearner] Test scikit-learn comp - mp | p"""
ens = get_ensemble(SuperLearner, 'multiprocessing', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_super_learner_s_t():
"""[SuperLearner] Test scikit-learn comp - th | np"""
ens = get_ensemble(SuperLearner, 'threading', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_super_learner_f_t():
"""[SuperLearner] Test scikit-learn comp - th | p"""
ens = get_ensemble(SuperLearner, 'threading', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_subsemble_s_m():
"""[Subsemble] Test scikit-learn comp - mp | np"""
ens = get_ensemble(Subsemble, 'multiprocessing', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_subsemble_f_m():
"""[Subsemble] Test scikit-learn comp - mp | p"""
ens = get_ensemble(Subsemble, 'multiprocessing', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_subsemble_s_t():
"""[Subsemble] Test scikit-learn comp - th | np"""
ens = get_ensemble(Subsemble, 'threading', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_subsemble_f_t():
"""[Subsemble] Test scikit-learn comp - th | p"""
ens = get_ensemble(Subsemble, 'threading', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_blend_s_m():
"""[BlendEnsemble] Test scikit-learn comp - mp | np"""
ens = get_ensemble(BlendEnsemble, 'multiprocessing', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_blend_f_m():
"""[BlendEnsemble] Test scikit-learn comp - mp | p"""
ens = get_ensemble(BlendEnsemble, 'multiprocessing', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_blend_s_m():
"""[BlendEnsemble] Test scikit-learn comp - th | np"""
ens = get_ensemble(BlendEnsemble, 'threading', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_blend_f_m():
"""[BlendEnsemble] Test scikit-learn comp - th | p"""
ens = get_ensemble(BlendEnsemble, 'threading', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_temporal_s_m():
"""[TemporalEnsemble] Test scikit-learn comp - mp | np"""
ens = get_ensemble(TemporalEnsemble, 'multiprocessing', None, step_size=10)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_temporal_f_m():
"""[TemporalEnsemble] Test scikit-learn comp - mp | p"""
ens = get_ensemble(TemporalEnsemble, 'multiprocessing', prep, step_size=10)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_temporal_s_m():
"""[TemporalEnsemble] Test scikit-learn comp - th | np"""
ens = get_ensemble(TemporalEnsemble, 'threading', None, step_size=10)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_temporal_f_m():
"""[TemporalEnsemble] Test scikit-learn comp - th | p"""
ens = get_ensemble(TemporalEnsemble, 'threading', prep, step_size=10)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
|
mit
| -4,493,878,293,141,852,000 | 29.258555 | 83 | 0.573637 | false |
pyfa-org/eos
|
tests/integration/restriction/restriction/test_drone_bandwidth.py
|
1
|
10465
|
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos import Drone
from eos import Restriction
from eos import Ship
from eos import State
from eos.const.eos import ModAffecteeFilter
from eos.const.eos import ModDomain
from eos.const.eos import ModOperator
from eos.const.eve import AttrId
from eos.const.eve import EffectCategoryId
from tests.integration.restriction.testcase import RestrictionTestCase
class TestDroneBandwidth(RestrictionTestCase):
"""Check functionality of drone bandwidth restriction."""
def setUp(self):
RestrictionTestCase.setUp(self)
self.mkattr(attr_id=AttrId.drone_bandwidth)
self.mkattr(attr_id=AttrId.drone_bandwidth_used)
def test_fail_single(self):
# When ship provides drone bandwidth output, but single consumer demands
# for more, error should be raised
self.fit.ship = Ship(self.mktype(
attrs={AttrId.drone_bandwidth: 40}).id)
item = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 50}).id,
state=State.online)
self.fit.drones.add(item)
# Action
error = self.get_error(item, Restriction.drone_bandwidth)
# Verification
self.assertIsNotNone(error)
self.assertEqual(error.output, 40)
self.assertEqual(error.total_use, 50)
self.assertEqual(error.item_use, 50)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_fail_multiple(self):
# When multiple consumers require less than drone bandwidth output
# alone, but in sum want more than total output, it should be erroneous
# situation
self.fit.ship = Ship(self.mktype(
attrs={AttrId.drone_bandwidth: 40}).id)
item1 = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 25}).id,
state=State.online)
self.fit.drones.add(item1)
item2 = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 20}).id,
state=State.online)
self.fit.drones.add(item2)
# Action
error1 = self.get_error(item1, Restriction.drone_bandwidth)
# Verification
self.assertIsNotNone(error1)
self.assertEqual(error1.output, 40)
self.assertEqual(error1.total_use, 45)
self.assertEqual(error1.item_use, 25)
# Action
error2 = self.get_error(item2, Restriction.drone_bandwidth)
# Verification
self.assertIsNotNone(error2)
self.assertEqual(error2.output, 40)
self.assertEqual(error2.total_use, 45)
self.assertEqual(error2.item_use, 20)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_fail_modified(self):
# Make sure modified drone bandwidth values are taken
self.fit.ship = Ship(self.mktype(
attrs={AttrId.drone_bandwidth: 50}).id)
src_attr = self.mkattr()
modifier = self.mkmod(
affectee_filter=ModAffecteeFilter.item,
affectee_domain=ModDomain.self,
affectee_attr_id=AttrId.drone_bandwidth_used,
operator=ModOperator.post_mul,
affector_attr_id=src_attr.id)
effect = self.mkeffect(
category_id=EffectCategoryId.passive,
modifiers=[modifier])
item = Drone(
self.mktype(
attrs={AttrId.drone_bandwidth_used: 50, src_attr.id: 2},
effects=[effect]).id,
state=State.online)
self.fit.drones.add(item)
# Action
error = self.get_error(item, Restriction.drone_bandwidth)
# Verification
self.assertIsNotNone(error)
self.assertEqual(error.output, 50)
self.assertEqual(error.total_use, 100)
self.assertEqual(error.item_use, 100)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_fail_ship_absent(self):
# When stats module does not specify output, make sure it's assumed to
# be 0
item = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 5}).id,
state=State.online)
self.fit.drones.add(item)
# Action
error = self.get_error(item, Restriction.drone_bandwidth)
# Verification
self.assertIsNotNone(error)
self.assertEqual(error.output, 0)
self.assertEqual(error.total_use, 5)
self.assertEqual(error.item_use, 5)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_fail_ship_attr_absent(self):
self.fit.ship = Ship(self.mktype().id)
item = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 50}).id,
state=State.online)
self.fit.drones.add(item)
# Action
error = self.get_error(item, Restriction.drone_bandwidth)
# Verification
self.assertIsNotNone(error)
self.assertEqual(error.output, 0)
self.assertEqual(error.total_use, 50)
self.assertEqual(error.item_use, 50)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_fail_ship_not_loaded(self):
self.fit.ship = Ship(self.allocate_type_id())
item = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 5}).id,
state=State.online)
self.fit.drones.add(item)
# Action
error = self.get_error(item, Restriction.drone_bandwidth)
# Verification
self.assertIsNotNone(error)
self.assertEqual(error.output, 0)
self.assertEqual(error.total_use, 5)
self.assertEqual(error.item_use, 5)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_mix_usage_zero(self):
# If some item has zero usage and drone bandwidth error is still raised,
# check it's not raised for item with zero usage
self.fit.ship = Ship(self.mktype(
attrs={AttrId.drone_bandwidth: 50}).id)
item1 = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 100}).id,
state=State.online)
self.fit.drones.add(item1)
item2 = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 0}).id,
state=State.online)
self.fit.drones.add(item2)
# Action
error1 = self.get_error(item1, Restriction.drone_bandwidth)
# Verification
self.assertIsNotNone(error1)
self.assertEqual(error1.output, 50)
self.assertEqual(error1.total_use, 100)
self.assertEqual(error1.item_use, 100)
# Action
error2 = self.get_error(item2, Restriction.drone_bandwidth)
# Verification
self.assertIsNone(error2)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_pass(self):
# When total consumption is less than output, no errors should be raised
self.fit.ship = Ship(self.mktype(
attrs={AttrId.drone_bandwidth: 50}).id)
item1 = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 25}).id,
state=State.online)
self.fit.drones.add(item1)
item2 = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 20}).id,
state=State.online)
self.fit.drones.add(item2)
# Action
error1 = self.get_error(item1, Restriction.drone_bandwidth)
# Verification
self.assertIsNone(error1)
# Action
error2 = self.get_error(item2, Restriction.drone_bandwidth)
# Verification
self.assertIsNone(error2)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_pass_item_state(self):
# When item isn't online, it shouldn't consume anything
self.fit.ship = Ship(self.mktype(
attrs={AttrId.drone_bandwidth: 40}).id)
item = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 50}).id,
state=State.offline)
self.fit.drones.add(item)
# Action
error = self.get_error(item, Restriction.drone_bandwidth)
# Verification
self.assertIsNone(error)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_pass_item_attr_absent(self):
self.fit.ship = Ship(self.mktype(
attrs={AttrId.drone_bandwidth: 40}).id)
item = Drone(self.mktype().id, state=State.online)
self.fit.drones.add(item)
# Action
error = self.get_error(item, Restriction.drone_bandwidth)
# Verification
self.assertIsNone(error)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_pass_item_not_loaded(self):
self.fit.ship = Ship(self.mktype(
attrs={AttrId.drone_bandwidth: 0}).id)
item = Drone(self.allocate_type_id(), state=State.online)
self.fit.drones.add(item)
# Action
error = self.get_error(item, Restriction.drone_bandwidth)
# Verification
self.assertIsNone(error)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
|
lgpl-3.0
| 2,386,085,293,114,946,600 | 37.903346 | 80 | 0.624367 | false |
sadanandb/pmt
|
src/tactic/ui/container/pop_window_wdg.py
|
1
|
40581
|
############################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ["PopWindowWdg","ResizeScrollWdg"]
from pyasm.web import DivWdg, HtmlElement, WebContainer
from pyasm.widget import IconWdg
from tactic.ui.common import BaseRefreshWdg
class PopWindowWdg(BaseRefreshWdg):
def get_args_keys(my):
return {
'top_id': 'Element ID for top popup window element',
'title': 'Text for Title Bar banner',
'width': 'Width of content area',
'height': 'Height of content area'
}
def init(my):
raise Exception("DEPRECATED")
my.z_start = 200
my.top_id = my.kwargs.get('top_id')
my.title = my.kwargs.get('title')
if not my.title:
my.title = "-- No Title --"
# defaults for content size ...
my.content_width = 400
my.content_height = 200
width = my.kwargs.get('width')
height = my.kwargs.get('height')
if width:
if type(width) == str:
my.content_width = int( width.replace('px','') )
else:
my.content_width = width
if height:
if type(height) == str:
my.content_height = int( height.replace('px','') )
else:
my.content_height = height
my.added_widgets = []
def set_style(my, new_style_str):
return ("overflow: hidden; border-width: 0px; padding: 0px; margin: 0px; %s" % new_style_str)
def add(my, wdg):
my.added_widgets.append( wdg )
def get_display(my):
is_IE = WebContainer.get_web().is_IE()
cw = my.content_width
ch = my.content_height
title_h = 20 # title bar height
shd_w = 8 # drop shadow width
shd_h = 8 # drop shadow height
full_w = cw + (2 * shd_w)
full_h = ch + title_h + (2 * shd_h)
border_sz = 1 # border size for inside content area
# top DIV element for this widget ...
popwin_div = DivWdg()
if my.top_id:
popwin_div.set_id( my.top_id )
popwin_div.add_class("SPT_PWIN_TOP_DIV SPT_RSW_OUTER_TOP SPT_PUW spt_popup spt_pwin_DX spt_pwin_DY")
popwin_div.set_z_start( my.z_start )
popwin_div.add_styles( my.set_style( "display: none; position: absolute; top: 200px; left: 300px; " \
"width: %spx; height: %spx;" % (full_w, full_h)) )
left_div = DivWdg()
left_div.add_class("spt_pwin_A spt_pwin_DY")
left_div.add_styles( my.set_style("float: left; width: %spx; height: %spx;" % (shd_w, full_h)) )
center_div = DivWdg()
center_div.add_class("spt_pwin_B spt_pwin_DX spt_pwin_DY")
center_div.add_styles( my.set_style("float: left; width: %spx; height: %spx;" % (cw, full_h)) )
right_div = DivWdg()
right_div.add_class("spt_pwin_C spt_pwin_DY")
right_div.add_styles( my.set_style("float: left; width: %spx; height: %spx;" % (shd_w, full_h)) )
popwin_div.add( left_div )
popwin_div.add( center_div )
popwin_div.add( right_div )
# Do LEFT side ...
#
lt_div = DivWdg()
lm_div = DivWdg()
lb_div = DivWdg()
lt_div.add_styles( my.set_style("width: %spx; height: %spx;" % (shd_w, shd_h)) )
lt_div.add_class("css_shadow_top_left spt_pwin_A_1")
adj_h = ch + title_h
if not is_IE:
adj_h = adj_h + (2 * border_sz)
lm_div.add_styles( my.set_style("width: %spx; height: %spx;" % (shd_w, adj_h)) )
lm_div.add_class("css_shadow_left spt_pwin_A_2 spt_pwin_DY")
lb_div.add_styles( my.set_style("width: %spx; height: %spx;" % (shd_w, shd_h)) )
lb_div.add_class("css_shadow_bottom_left spt_pwin_A_3")
left_div.add( lt_div )
left_div.add( lm_div )
left_div.add( lb_div )
# Do Center/Middle bit ...
#
center_top_div = DivWdg()
center_resize_scroll_wdg = ResizeScrollWdg( width=cw, height=ch, scroll_bar_size_str='thick',
scroll_expansion='outside', affects_outside_flag=True,
exclude_top_border=True )
for wdg in my.added_widgets:
center_resize_scroll_wdg.add( wdg )
center_bottom_div = DivWdg()
center_top_div.add_styles( my.set_style("width: %spx; height: %spx;" % (cw, shd_h)) )
center_top_div.add_class("css_shadow_top spt_pwin_B_1 spt_pwin_DX")
center_title_div = DivWdg()
center_title_div.add_class("spt_pwin_B_title SPT_PWIN_TITLE_BAR spt_pwin_DX")
center_title_div.add_behavior( { 'type':'drag', 'drag_el': 'spt.popup.get_popup(@);',
'options': {'z_sort': 'bring_forward'} } )
border_adj_cw = cw
if not is_IE:
border_adj_cw = cw - (2 * border_sz)
center_title_div.add_styles( my.set_style("cursor: move; border: %spx solid black; " \
"background-color: #202020; color: white; width: %spx; " \
"height: %spx;" % \
(border_sz, border_adj_cw, title_h)) )
title_div = DivWdg()
title_div.add_styles( "width: 100%; height: 100%; padding: 4px;" )
title_div.add( my.title )
center_title_div.add( title_div )
center_bottom_div.add_styles( my.set_style("width: %spx; height: %spx;" % (cw, shd_h)) )
center_bottom_div.add_class("css_shadow_bottom spt_pwin_B_3 spt_pwin_DX")
center_div.add( center_top_div )
center_div.add( center_title_div )
center_div.add( center_resize_scroll_wdg )
center_div.add( center_bottom_div )
# Do RIGHT side ...
#
rt_div = DivWdg()
rm_div = DivWdg()
rb_div = DivWdg()
rt_div.add_styles( my.set_style("width: %spx; height: %spx;" % (shd_w, shd_h)) )
rt_div.add_class("css_shadow_top_right spt_pwin_C_1")
adj_h = ch + title_h
if not is_IE:
adj_h = adj_h + (2 * border_sz)
rm_div.add_styles( my.set_style("width: %spx; height: %spx;" % (shd_w, adj_h)) )
rm_div.add_class("css_shadow_right spt_pwin_C_2 spt_pwin_DY")
rb_div.add_styles( my.set_style("width: %spx; height: %spx;" % (shd_w, shd_h)) )
rb_div.add_class("css_shadow_bottom_right spt_pwin_C_3")
right_div.add( rt_div )
right_div.add( rm_div )
right_div.add( rb_div )
return popwin_div
class ResizeScrollWdg(BaseRefreshWdg):
def get_args_keys(my):
return {
'width': 'Width of content area',
'height': 'Height of content area',
'scroll_bar_size_str': 'String value of either "thin", "medium", "thick", to define which size of' \
' scroll bar size to generate. Defaults to "medium". ' \
'PopWindowWdg to use "thick".',
'scroll_expansion': 'String value that is either "inside" or "outside" - for direction from content' \
' area border to expand in when scroll bars appear. Defaults to "inside".',
'no_resize': 'set to True if you do not want resize, and just want scroll bars. Defaults to False.',
'affects_outside_flag': 'set to True if scroll bars and resizing affects immediately surrounding' \
' elements, like usage in PopWindowWdg. Defaults to False.',
'exclude_top_border': 'if True then the drawing of a top border is skipped. Defaults to False.',
'min_content_w': 'Integer number in pixels for minimum content width. Defaults to 150.',
'min_content_h': 'Integer number in pixels for minimum content height. Defaults to 100.',
'max_content_w': 'Integer number in pixels for maximum content width. Defaults to 0, meaning' \
' no maximum content width.',
'max_content_h': 'Integer number in pixels for maximum content height. Defaults to 0, meaning' \
' no maximum content height.',
'set_max_to_content_size': 'Set to True if you want max size to be maximum resize dimensions to' \
' be same as the content dimensions.'
}
def init(my):
# defaults for content size ...
my.content_width = 0
my.content_height = 0
width = my.kwargs.get('width')
height = my.kwargs.get('height')
if width:
if type(width) == str:
my.content_width = int( width.replace('px','') )
else:
my.content_width = width
if height:
if type(height) == str:
my.content_height = int( height.replace('px','') )
else:
my.content_height = height
my.affects_outside_flag = False
if my.kwargs.get('affects_outside_flag'):
my.affects_outside_flag = True
my.scroll_bar_size_str = "medium"
if my.kwargs.get('scroll_bar_size_str'):
my.scroll_bar_size_str = my.kwargs.get('scroll_bar_size_str')
my.scroll_expansion = "inside"
if my.kwargs.get('scroll_expansion'):
my.scroll_expansion = my.kwargs.get('scroll_expansion')
my.exclude_top_border = my.kwargs.get('exclude_top_border')
my.no_resize = my.kwargs.get('no_resize')
my.min_content_w = my.kwargs.get('min_content_w')
if not my.min_content_w:
my.min_content_w = 150
my.min_content_h = my.kwargs.get('min_content_h')
if not my.min_content_h:
my.min_content_h = 100
my.max_content_w = my.kwargs.get('max_content_w')
if not my.max_content_w:
my.max_content_w = 0
my.max_content_h = my.kwargs.get('max_content_h')
if not my.max_content_h:
my.max_content_h = 0
my.set_max_to_content_size = my.kwargs.get('set_max_to_content_size')
my.added_widgets = []
def _get_scroll_bar_pixel_size(my):
size_map = { 'thin': 8, 'medium': 12, 'thick': 16 }
pixel_sz = size_map.get('medium')
if size_map.get( my.scroll_bar_size_str ):
pixel_sz = size_map.get( my.scroll_bar_size_str )
return pixel_sz
def set_style(my, new_style_str):
return ("overflow: hidden; border-width: 0px; padding: 0px; margin: 0px; %s" % new_style_str)
def add(my, wdg):
my.added_widgets.append( wdg )
def get_display(my):
is_IE = WebContainer.get_web().is_IE()
cw = my.content_width
ch = my.content_height
title_h = 20 # title bar height
shd_w = 8 # drop shadow width
shd_h = 8 # drop shadow height
full_w = cw + (2 * shd_w)
full_h = ch + title_h + (2 * shd_h)
resize_sz = 4 # resize control size (width of resize bar)
if my.no_resize:
resize_sz = 1
scroll_sz = my._get_scroll_bar_pixel_size()
in_cw = cw - resize_sz
in_ch = ch - resize_sz
border_sz = 1 # border size for inside content area
border_adj_cw = cw
border_adj_ch = ch
border_adj_in_cw = in_cw
border_adj_in_ch = in_ch
if not is_IE:
border_adj_in_cw = in_cw - border_sz # only using left border, so only one to subtract
if not my.exclude_top_border:
border_adj_in_ch = in_ch - border_sz
border_adj_ch = ch - border_sz
top_div = DivWdg()
top_div.add_styles( my.set_style("width: %spx; height: %spx;" % (cw, ch)) )
top_div.add_behavior( {
'type': 'load',
'cbjs_action': my.get_onload_js()
} )
top_div.add_class("spt_pwin_B_2 SPT_RSW_TOP spt_pwin_DX spt_pwin_DY")
if not my.affects_outside_flag:
top_div.add_class("SPT_RSW_OUTER_TOP")
top_div.set_attr("spt_min_content_w","%d" % my.min_content_w)
top_div.set_attr("spt_min_content_h","%d" % my.min_content_h)
top_div.set_attr("spt_max_content_w","%d" % my.max_content_w)
top_div.set_attr("spt_max_content_h","%d" % my.max_content_h)
top_div.set_attr("spt_set_max_to_content_size","false")
if my.set_max_to_content_size:
top_div.set_attr("spt_set_max_to_content_size","true")
top_div.set_attr("spt_scroll_size", scroll_sz)
top_div.set_attr("spt_scroll_expansion", my.scroll_expansion)
top_div.set_attr("spt_content_w", cw)
top_div.set_attr("spt_content_h", ch)
B2_i = DivWdg()
B2_i.add_class("spt_pwin_B_2_i spt_pwin_DX spt_pwin_DY spt_popup_content")
if not my.exclude_top_border:
B2_i.add_styles( "border-top: 1px solid black;" )
B2_ii = DivWdg()
B2_ii.add_class("spt_pwin_B_2_ii spt_controls_DY spt_pwin_DX")
B2_i.add_styles( my.set_style("width: %spx; height: %spx;" % (cw, border_adj_in_ch)) )
B2_ii.add_styles( my.set_style("width: %spx; height: %spx;" % (cw, resize_sz)) )
top_div.add( B2_i )
top_div.add( B2_ii )
# ---------------------------------------------------------------------------------------------------------
# -- COMMENTED OUT CODE below ... here for reference ... this worked in all browsers except IE (providing a
# -- visual floating handle at bottom right of ResizeScrollWdg for resizing in both dimensions ...
# -- disabling this as it totally messes up in IE
#
# if not my.no_resize:
# resize_bvr = {
# "type": 'drag',
# "cb_set_prefix": "spt.resize_scroll.drag_resize"
# }
# resize_img = HtmlElement.img()
# resize_img.set_attr("src","/context/icons/common/corner_resize_icon.png")
# resize_img.add_class("SPT_PWIN_RESIZE_IMG")
# resize_img.add_styles("position: absolute; right: 2px; bottom: 2px; cursor: se-resize;")
#
# resize_img.add_behavior( resize_bvr )
# top_div.add( resize_img ) # requires top_div to have "position: relative;"
# ---------------------------------------------------------------------------------------------------------
# NOTE: IE includes border in clientHeight/clientWidth of div so don't need to compensate for pixel border
# in specifying width and height ... however all other browsers you need to adjust by subtracting
# 2 from width and 2 from height for a 1 pixel border all arond
B2_i_a = DivWdg() # this is the content div
B2_i_a.add_class("spt_pwin_B_2_i_a SPT_RSW_CONTENT_BOX spt_pwin_DX spt_pwin_DY")
B2_i_a.add_behavior( {'type': 'wheel', 'cbfn_action': 'spt.resize_scroll.wheel_scroll'} )
B2_i_a.add_behavior( {'type': 'load', 'cbjs_action': 'spt.resize_scroll.adjust_for_scroll( bvr.src_el );'} )
content_styles = [
# commenting out: presumption to want a color here?
#"background-color: #666666;",
"border-left: %spx solid black;" % border_sz,
"border-right: none;",
"border-bottom: none;",
"border-top: none;",
"float: left;",
"width: %spx;" % border_adj_in_cw,
"height: %spx;" % border_adj_in_ch
]
B2_i_a.add_styles( my.set_style( ' '.join( content_styles ) ) )
actual_contents = DivWdg()
actual_contents.add_class("SPT_RSW_CONTENTS")
actual_contents.add_styles("float: left;") # apparently the only way to shrink-wrap across browsers!
panning_bvr = {'type': 'panning_scroll',
'cbjs_motion': 'spt.resize_scroll.adjust_scroll_draggables( bvr.src_el );',
'cbjs_action': 'spt.resize_scroll.adjust_scroll_draggables( bvr.src_el );'
}
actual_contents.add_behavior( panning_bvr )
for wdg in my.added_widgets:
actual_contents.add( wdg )
B2_i_a.add( actual_contents )
B2_i_b = DivWdg() # this is the RIGHT CONTROLS [vertical scroll bar]
B2_i_b.add_class("spt_pwin_B_2_i_b SPT_CONTROLS_RIGHT spt_controls_DX spt_pwin_DY")
B2_i_b.add_styles( my.set_style("background-color: #444; float: left; width: %spx; " \
"height: %spx;" % (resize_sz, border_adj_in_ch)) )
y_scroll = DivWdg()
y_scroll.add_class("SPT_SCROLL_Y spt_pwin_DY")
y_scroll_w = scroll_sz
if not is_IE:
y_scroll_w -= 1
y_scroll.add_styles( my.set_style("background-color: #949494; float: left; width: %spx; " \
"display: none; " \
"height: %spx;" % (y_scroll_w, border_adj_in_ch)) )
y_scroll_drag = DivWdg()
y_scroll_drag.add_class("SPT_SCROLL_Y_DRAG")
# 336699 = menu highlight blue, 434343 = mid-grey of dark skin/theme
y_scroll_drag.add_styles(" left: 0px; top: 0px;"\
"border-top: 1px outset #ccc; " \
"border-left: 1px outset #ccc; " \
"border-right: 1px solid #333; " \
"border-bottom: 1px solid #333; " \
) # scroll bar color
y_scroll_drag.add_behavior({'type': 'drag', 'cbfn_motion': 'spt.resize_scroll.drag_y_scroll_draggable_motion'})
y_scroll_drag.add_color('background', 'background2', -30)
y_scroll.add( y_scroll_drag )
B2_i_b.add( y_scroll )
x_resize = DivWdg()
x_resize.add_class("SPT_RESIZE_X spt_pwin_DY")
x_resize.add_styles( my.set_style("float: left; width: %spx; " \
"height: %spx;" % (resize_sz, border_adj_in_ch)) )
x_resize.add_color('background','background2')
if not my.no_resize:
x_resize.add_styles( "cursor: e-resize;" );
x_resize.add_behavior( {"type": 'drag', "cb_set_prefix": "spt.resize_scroll.drag_x_resize"} )
B2_i_b.add( x_resize )
B2_i.add( B2_i_a )
B2_i.add( B2_i_b )
B2_ii_a = DivWdg() # this is the BOTTOM CONTROLS [horizontal scroll bar]
B2_ii_a.add_class("spt_pwin_B_2_ii_a SPT_CONTROLS_BOTTOM spt_controls_DY spt_pwin_DX")
B2_ii_a.add_styles( my.set_style("background-color: black; float: left; width: %spx; " \
"height: %spx;" % (in_cw, resize_sz)) )
x_scroll = DivWdg()
x_scroll.add_class("SPT_SCROLL_X spt_pwin_DX")
x_scroll_h = scroll_sz
x_scroll_w = in_cw
if not is_IE:
x_scroll_h -= 1
x_scroll_w -= 1
x_scroll.add_styles( my.set_style("background-color: #949494; float: left; width: %spx; " \
"display: none; " \
"height: %spx;" % (x_scroll_w, x_scroll_h)) )
x_scroll_drag = DivWdg()
x_scroll_drag.add_class("SPT_SCROLL_X_DRAG")
# 336699 = menu highlight blue, 434343 = mid-grey of dark skin/theme
x_scroll_drag.add_styles("background-color: #434343; left: 0px; top: 0px;"\
"border-top: 1px outset #ccc; " \
"border-left: 1px outset #ccc; " \
"border-right: 1px solid #333; " \
"border-bottom: 1px solid #333; " \
) # scroll bar color
x_scroll_drag.add_behavior({'type': 'drag', 'cbfn_motion': 'spt.resize_scroll.drag_x_scroll_draggable_motion'})
x_scroll_drag.add_color('background', 'background2', -30)
x_scroll.add( x_scroll_drag )
B2_ii_a.add( x_scroll )
y_resize = DivWdg()
y_resize.add_class("SPT_RESIZE_Y spt_pwin_DX")
y_resize.add_styles( my.set_style("float: left; width: %spx; " \
"height: %spx;" % (in_cw, resize_sz)) )
y_resize.add_color('background','background2')
if not my.no_resize:
y_resize.add_styles( "cursor: s-resize;" )
y_resize.add_behavior( {"type": 'drag', "cb_set_prefix": "spt.resize_scroll.drag_y_resize"} )
B2_ii_a.add( y_resize )
B2_ii_b = DivWdg() # this is the resize handle
B2_ii_b.add_class("spt_pwin_B_2_ii_b SPT_RESIZE_HANDLE spt_controls_DX spt_controls_DY")
if my.no_resize:
B2_ii_b.add_styles( my.set_style("background-color: black; float: left; " \
"overflow: hidden; position: relative; " \
"width: %spx; height: %spx;" % (resize_sz, resize_sz)) )
else:
#{
# RESIZE control in X and Y ...
resize_bvr = {
"type": 'drag',
"cb_set_prefix": "spt.resize_scroll.drag_resize"
}
if not is_IE:
resize_sz -= 2
B2_ii_b.add_styles( my.set_style("background-color: #434343; float: left; cursor: se-resize; " \
"border-top: 1px solid black; " \
"border-bottom: 1px solid black; " \
"border-left: 1px solid black; " \
"border-right: 1px solid black; " \
"overflow: visible; " \
"position: relative; " \
"width: %spx; height: %spx;" % (resize_sz, resize_sz)) )
# also make sure that the resize handle div has the resize behavior (along with the resize_img)
B2_ii_b.add_behavior( resize_bvr );
resize_img = HtmlElement.img()
resize_img.set_attr("src","/context/icons/common/corner_resize_icon.png")
resize_img.add_class("SPT_PWIN_RESIZE_IMG")
resize_img.add_styles("cursor: se-resize;")
resize_img.add_styles("position: absolute; top: 0px; left: 0px;")
B2_ii_b.add( resize_img )
#}
B2_ii.add( B2_ii_a )
B2_ii.add( B2_ii_b )
return top_div
def get_onload_js(my):
return r'''
spt.popwin = {};
spt.resize_scroll = {};
spt.resize_scroll.resize = function( activating_el, dx, dy )
{
var rsw_top_el = activating_el.getParent(".SPT_RSW_TOP");
var content_box_el = rsw_top_el.getElement(".SPT_RSW_CONTENT_BOX");
var content_el = content_box_el.getElement(".SPT_RSW_CONTENTS");
var cb_w = content_box_el.clientWidth;
var cb_h = content_box_el.clientHeight;
var min_content_w = parseInt( rsw_top_el.getProperty("spt_min_content_w") );
var min_content_h = parseInt( rsw_top_el.getProperty("spt_min_content_h") );
var max_content_w = parseInt( rsw_top_el.getProperty("spt_max_content_w") );
var max_content_h = parseInt( rsw_top_el.getProperty("spt_max_content_h") );
var content_w = rsw_top_el.getProperty("spt_content_w");
var content_h = rsw_top_el.getProperty("spt_content_h");
if( content_w ) { content_w = parseInt( content_w ); }
else { content_w = 0; }
if( content_h ) { content_h = parseInt( content_h ); }
else { content_h = 0; }
display_content_w = spt.get_render_display_width( content_el );
display_content_h = spt.get_render_display_height( content_el );
if( ! content_w ) {
content_w = display_content_w;
}
if( ! content_h ) {
content_h = display_content_h;
}
var set_max_to_content_size = rsw_top_el.getProperty("spt_set_max_to_content_size");
if( spt.is_TRUE( set_max_to_content_size ) ) {
max_content_w = display_content_w;
max_content_h = display_content_h;
}
var scr_left = content_box_el.scrollLeft;
var scr_top = content_box_el.scrollTop;
var top_el = rsw_top_el.getParent(".SPT_PWIN_TOP_DIV");
if( ! top_el ) {
top_el = rsw_top_el;
}
if( max_content_w && (cb_w + dx > max_content_w) ) {
dx = max_content_w - cb_w;
}
var modify_w = false;
if( dx && (cb_w+dx) >= min_content_w ) {
modify_w = true;
if( max_content_w && (cb_w + dx > max_content_w) ) {
modify_w = false;
}
}
if( modify_w ) {
var dx_el_list = top_el.getElements(".spt_pwin_DX");
if( top_el.hasClass("spt_pwin_DX") ) {
dx_el_list.push( top_el );
}
for( var c=0; c < dx_el_list.length; c++ ) {
var el = dx_el_list[c];
var el_w = parseInt( el.getStyle("width") );
el.setStyle("width", (el_w+dx) + "px");
}
if( scr_left && dx > 0 && cb_w + dx + scr_left > max_content_w ) {
var new_scr_left = scr_left - dx;
if( new_scr_left < 0 ) {
new_scr_left = 0;
}
content_box_el.scrollLeft = new_scr_left;
}
}
if( max_content_h && (cb_h + dy > max_content_h) ) {
dy = max_content_h - cb_h;
}
var modify_h = false;
if( dy && (cb_h+dy) >= min_content_h ) {
modify_h = true;
if( max_content_h && (cb_h + dy > max_content_h) ) {
modify_h = false;
}
}
if( modify_h ) {
var dy_el_list = top_el.getElements(".spt_pwin_DY");
if( top_el.hasClass("spt_pwin_DY") ) {
dy_el_list.push( top_el );
}
for( var c=0; c < dy_el_list.length; c++ ) {
var el = dy_el_list[c];
var el_h = parseInt( el.getStyle("height") );
el.setStyle("height", (el_h+dy) + "px");
}
if( scr_top && dy > 0 && cb_h + dy + scr_top > max_content_h ) {
var new_scr_top = scr_top - dy;
if( new_scr_top < 0 ) {
new_scr_top = 0;
}
content_box_el.scrollTop = new_scr_top;
}
}
spt.resize_scroll.adjust_scroll_draggables( activating_el );
}
// spt.resize_scroll.drag_resize_setup = function( evt, bvr, mouse_411 )
// {
// }
spt.resize_scroll.drag_resize_motion = function( evt, bvr, mouse_411 )
{
var dx = mouse_411.curr_x - mouse_411.last_x;
var dy = mouse_411.curr_y - mouse_411.last_y;
spt.resize_scroll.resize( bvr.src_el, dx, dy );
}
spt.resize_scroll.drag_resize_action = function( evt, bvr, mouse_411 )
{
spt.resize_scroll.adjust_for_scroll( bvr.src_el );
}
// spt.resize_scroll.drag_x_resize_setup = function( evt, bvr, mouse_411 )
// {
// }
spt.resize_scroll.drag_x_resize_motion = function( evt, bvr, mouse_411 )
{
var dx = mouse_411.curr_x - mouse_411.last_x;
var dy = 0;
spt.resize_scroll.resize( bvr.src_el, dx, dy );
}
spt.resize_scroll.drag_x_resize_action = function( evt, bvr, mouse_411 )
{
spt.resize_scroll.adjust_for_scroll( bvr.src_el );
}
// spt.resize_scroll.drag_y_resize_setup = function( evt, bvr, mouse_411 )
// {
// }
spt.resize_scroll.drag_y_resize_motion = function( evt, bvr, mouse_411 )
{
var dx = 0;
var dy = mouse_411.curr_y - mouse_411.last_y;
spt.resize_scroll.resize( bvr.src_el, dx, dy );
}
spt.resize_scroll.drag_y_resize_action = function( evt, bvr, mouse_411 )
{
spt.resize_scroll.adjust_for_scroll( bvr.src_el );
}
spt.resize_scroll.drag_x_scroll_draggable_motion = function( evt, bvr, mouse_411 )
{
var rsw_top_el = bvr.src_el.getParent(".SPT_RSW_TOP");
var dx = mouse_411.curr_x - mouse_411.last_x;
var content_box = rsw_top_el.getElement(".SPT_RSW_CONTENT_BOX");
var contents = content_box.getElement(".SPT_RSW_CONTENTS");
var scr_x_drag_div = rsw_top_el.getElement(".SPT_SCROLL_X_DRAG");
var cw = spt.get_render_display_width( contents );
var cb_w = content_box.clientWidth;
var sd_w = scr_x_drag_div.clientWidth;
var sd_off_x = parseInt( scr_x_drag_div.getStyle("margin-left") );
if( cb_w >= cw ) {
return;
}
var max_off_x = cb_w - sd_w;
var new_off_x = sd_off_x + dx;
if( new_off_x < 0 ) { new_off_x = 0; }
if( new_off_x > max_off_x ) { new_off_x = max_off_x; }
// now map it back to the full scrollTop ...
var new_scr_left = Math.floor( (1.0 * (new_off_x / cb_w) * cw) + 0.5 );
content_box.scrollLeft = new_scr_left;
// and set offset for the scroll draggable too ...
scr_x_drag_div.setStyle("margin-left", new_off_x+"px");
}
spt.resize_scroll.drag_y_scroll_draggable_motion = function( evt, bvr, mouse_411 )
{
var rsw_top_el = bvr.src_el.getParent(".SPT_RSW_TOP");
var dy = mouse_411.curr_y - mouse_411.last_y;
var content_box = rsw_top_el.getElement(".SPT_RSW_CONTENT_BOX");
var contents = content_box.getElement(".SPT_RSW_CONTENTS");
var scr_y_drag_div = rsw_top_el.getElement(".SPT_SCROLL_Y_DRAG");
var ch = spt.get_render_display_height( contents );
var cb_h = content_box.clientHeight;
var sd_h = scr_y_drag_div.clientHeight;
var sd_off_y = parseInt( scr_y_drag_div.getStyle("margin-top") );
if( cb_h >= ch ) {
return;
}
var max_off_y = cb_h - sd_h;
var new_off_y = sd_off_y + dy;
if( new_off_y < 0 ) { new_off_y = 0; }
if( new_off_y > max_off_y ) { new_off_y = max_off_y; }
// now map it back to the full scrollTop ...
var new_scr_top = Math.floor( (1.0 * (new_off_y / cb_h) * ch) + 0.5 );
content_box.scrollTop = new_scr_top;
// and set offset for the scroll draggable too ...
scr_y_drag_div.setStyle("margin-top", new_off_y+"px");
}
spt.resize_scroll.adjust_scroll_x_draggable = function( activating_el )
{
var rsw_top_el = activating_el.getParent(".SPT_RSW_TOP");
var content_box = rsw_top_el.getElement(".SPT_RSW_CONTENT_BOX");
var contents = content_box.getElement(".SPT_RSW_CONTENTS");
var cw = spt.get_render_display_width( contents );
var cb_w = content_box.clientWidth;
var scroll_x_div = rsw_top_el.getElement(".SPT_SCROLL_X")
var scroll_x_drag_div = rsw_top_el.getElement(".SPT_SCROLL_X_DRAG")
// adjust size of scroll draggable ...
var w = 1.0 * (cb_w / cw) * cb_w;
if( w < 6 ) {
w = 6;
}
scroll_x_drag_div.setStyle("width",w+"px");
scroll_x_drag_div.setStyle("height","40px");
// adjust offset of scroll draggable ...
var s_left = 1.0 * (content_box.scrollLeft / cw) * cb_w;
scroll_x_drag_div.setStyle("margin-left", s_left+"px");
}
spt.resize_scroll.adjust_scroll_y_draggable = function( activating_el )
{
var rsw_top_el = activating_el.getParent(".SPT_RSW_TOP");
var content_box = rsw_top_el.getElement(".SPT_RSW_CONTENT_BOX");
var contents = content_box.getElement(".SPT_RSW_CONTENTS");
var ch = spt.get_render_display_height( contents );
var cb_h = content_box.clientHeight;
var scroll_y_div = rsw_top_el.getElement(".SPT_SCROLL_Y")
var scroll_y_drag_div = rsw_top_el.getElement(".SPT_SCROLL_Y_DRAG")
// adjust size of scroll draggable ...
var h = 1.0 * (cb_h / ch) * cb_h;
if( h < 6 ) {
h = 6;
}
scroll_y_drag_div.setStyle("width","40px");
scroll_y_drag_div.setStyle("height",h+"px");
// compensate for a display artifact in Opera browser
if( spt.browser.is_Opera() ) {
scroll_y_div.setStyle("height", cb_h+"px");
}
// adjust offset of scroll draggable ...
var s_top = 1.0 * (content_box.scrollTop / ch) * cb_h;
scroll_y_drag_div.setStyle("margin-top", s_top+"px");
}
spt.resize_scroll.adjust_scroll_draggables = function( activating_el )
{
spt.resize_scroll.adjust_scroll_x_draggable( activating_el );
spt.resize_scroll.adjust_scroll_y_draggable( activating_el );
}
spt.resize_scroll.adjust_for_scroll = function( activating_el )
{
var rsw_top_el = activating_el.getParent(".SPT_RSW_TOP");
var content_box = rsw_top_el.getElement(".SPT_RSW_CONTENT_BOX");
var contents = content_box.getElement(".SPT_RSW_CONTENTS");
var cw = spt.get_render_display_width( contents );
var ch = spt.get_render_display_height( contents );
var cb_w = content_box.clientWidth;
var cb_h = content_box.clientHeight;
var scroll_x_div = rsw_top_el.getElement(".SPT_SCROLL_X")
var scroll_x_drag_div = rsw_top_el.getElement(".SPT_SCROLL_X_DRAG")
var scroll_y_div = rsw_top_el.getElement(".SPT_SCROLL_Y")
var scroll_y_drag_div = rsw_top_el.getElement(".SPT_SCROLL_Y_DRAG")
var scroll_bar_sz = parseInt( rsw_top_el.getProperty("spt_scroll_size") );
var is_scroll_x_shown = true;
if( spt.is_hidden(scroll_x_div) ) {
is_scroll_x_shown = false;
}
var is_scroll_y_shown = true;
if( spt.is_hidden(scroll_y_div) ) {
is_scroll_y_shown = false;
}
var top_el = rsw_top_el;
if( ! top_el.hasClass("SPT_RSW_OUTER_TOP") ) {
top_el = rsw_top_el.getParent(".SPT_RSW_OUTER_TOP");
}
var scroll_expansion = rsw_top_el.getProperty("spt_scroll_expansion");
var dy_adjust = 0;
if( cw > cb_w ) {
if( ! is_scroll_x_shown ) {
// display x scroll ...
dy_adjust = scroll_bar_sz;
spt.resize_scroll.adjust_control_size( top_el, "DY", dy_adjust );
spt.show( scroll_x_div );
is_scroll_x_shown = true;
}
spt.resize_scroll.adjust_scroll_x_draggable( activating_el );
} else {
if( is_scroll_x_shown ) {
// hide x scroll ...
dy_adjust = 0 - scroll_bar_sz;
spt.resize_scroll.adjust_control_size( top_el, "DY", dy_adjust );
spt.hide( scroll_x_div );
is_scroll_x_shown = false;
}
}
if( dy_adjust ) {
if( scroll_expansion == "outside" ) {
var dy_el_list = top_el.getElements(".spt_pwin_DY");
dy_el_list.push( top_el );
for( var c=0; c < dy_el_list.length; c++ ) {
var el = dy_el_list[c];
if( el.className.contains("_B_2_i_") || el.className.contains("_B_2_i ") ) {
continue;
}
var el_h = parseInt( el.getStyle("height") );
el.setStyle("height", (el_h+dy_adjust) + "px");
}
}
else if( scroll_expansion == "inside" ) {
var dy_el_list = rsw_top_el.getElements(".spt_pwin_DY");
dy_el_list.push( rsw_top_el );
for( var c=0; c < dy_el_list.length; c++ ) {
var el = dy_el_list[c];
if( el.className.contains("_B_2_i_") || el.className.contains("_B_2_i ") ) {
var el_h = parseInt( el.getStyle("height") );
el.setStyle("height", (el_h-dy_adjust) + "px");
}
}
}
else {
log.warning("WARNING: unknown scroll_expansion value found ('" + scroll_expansion + "')");
}
}
var dx_adjust = 0;
if( ch > cb_h ) {
if( ! is_scroll_y_shown ) {
// display y scroll ...
dx_adjust = scroll_bar_sz;
spt.resize_scroll.adjust_control_size( top_el, "DX", dx_adjust );
spt.show( scroll_y_div );
is_scroll_y_shown = true;
}
spt.resize_scroll.adjust_scroll_y_draggable( activating_el );
} else {
if( is_scroll_y_shown ) {
// hide y scroll ...
dx_adjust = 0 - scroll_bar_sz;
spt.resize_scroll.adjust_control_size( top_el, "DX", dx_adjust );
spt.hide( scroll_y_div );
is_scroll_y_shown = false;
}
}
if( dx_adjust ) {
if( scroll_expansion == "outside" ) {
var dx_el_list = top_el.getElements(".spt_pwin_DX");
dx_el_list.push( top_el );
for( var c=0; c < dx_el_list.length; c++ ) {
var el = dx_el_list[c];
if( el.className.contains("_B_2_i_a ") || el.className.contains("_B_2_ii_a ") ) {
continue;
}
if( el.hasClass("SPT_SCROLL_X") || el.hasClass("SPT_RESIZE_Y") ) {
continue;
}
var el_w = parseInt( el.getStyle("width") );
el.setStyle("width", (el_w+dx_adjust) + "px");
}
}
else if( scroll_expansion == "inside" ) {
var dx_el_list = rsw_top_el.getElements(".spt_pwin_DX");
dx_el_list.push( rsw_top_el );
for( var c=0; c < dx_el_list.length; c++ ) {
var el = dx_el_list[c];
if( el.className.contains("_B_2_i_a ") || el.className.contains("_B_2_ii_a ") ) {
var el_w = parseInt( el.getStyle("width") );
el.setStyle("width", (el_w-dx_adjust) + "px");
}
}
}
else {
log.warning("WARNING: unknown scroll_expansion value found ('" + scroll_expansion + "')");
}
}
var resize_img = top_el.getElement(".SPT_PWIN_RESIZE_IMG");
if( resize_img ) {
if( is_scroll_x_shown && is_scroll_y_shown ) {
resize_img.setStyle("right","2px");
resize_img.setStyle("bottom","2px");
} else {
resize_img.setStyle("right","4px");
resize_img.setStyle("bottom","4px");
}
}
}
spt.resize_scroll.adjust_control_size = function( rsw_top_el, DX_or_DY, size_adj )
{
var top_el = rsw_top_el;
if( ! top_el.hasClass("SPT_RSW_OUTER_TOP") ) {
top_el = rsw_top_el.getParent(".SPT_RSW_OUTER_TOP");
}
var el_list = top_el.getElements( ".spt_controls_" + DX_or_DY );
var dim = "height";
if( DX_or_DY == 'DX' ) {
dim = "width";
}
for( var c=0; c < el_list.length; c++ ) {
var el = el_list[c];
el.setStyle( dim, parseInt(el.getStyle(dim)) + size_adj + "px" );
}
}
spt.resize_scroll.wheel_scroll = function( evt, bvr, mouse_411 )
{
var content_box = bvr.src_el; // expects bvr to be assigned on the element with class "SPT_RSW_CONTENT_BOX"
var contents = content_box.getElement(".SPT_RSW_CONTENTS");
var ch = spt.get_render_display_height( contents );
var cb_h = content_box.clientHeight;
if( cb_h >= ch ) {
return;
}
var max_scroll_top = ch - cb_h;
var scroll_top = content_box.scrollTop;
var delta = 30;
if( evt.wheel < 0 ) {
scroll_top += delta;
} else {
scroll_top -= delta;
}
if( scroll_top < 0 ) { scroll_top = 0; }
if( scroll_top > max_scroll_top ) { scroll_top = max_scroll_top; }
content_box.scrollTop = scroll_top;
spt.resize_scroll.adjust_for_scroll( bvr.src_el );
}
'''
|
epl-1.0
| -6,829,569,407,633,964,000 | 35.060274 | 119 | 0.520638 | false |
gwct/grampa
|
docs/scripts/generators/about_generator.py
|
1
|
17272
|
############################################################
# For GRAMPA site, 12.19
# This generates the file "about.html"
############################################################
import sys, os
sys.path.append('..')
import lib.read_chunks as RC
######################
# HTML template
######################
html_template = """
<!doctype html>
{head}
<body>
{nav}
<div class="pure-g"><div class="pure-u-1" id="divider_row"></div></div>
<div class="pure-g" id="main_row">
<div class="pure-u-3-24" id="margin"></div>
<div class="pure-u-18-24" id="main_col">
<div id="main_content">
<h1>About GRAMPA</h1>
<h4>This section is a brief explanation of the rationale and implementation of the algorithm. For a more in depth explanation,
see <a href="https://doi.org/10.1093/sysbio/syx044" target="_blank">our paper in <em>Systematic Biology</em></a>.</h4>
<h5>For information about the program and its options, see the <a href="readme.html">README</a></h5>
<div class="pure-g">
<a name="background"></a>
<div class="pure-u-1" id="jump_row">
<div id="jump_container">
Jump to section:
<a class="jump_link" href="#background">Background</a>
<a class="jump_link" href="#multrees">MUL-trees</a>
<a class="jump_link" href="#recon">LCA reconciliation</a>
<a class="jump_link" href="#grampa">GRAMPA</a>
<a class="jump_link" href="#search">GRAMPA's search</a>
</div>
</div>
</div>
<h1>Background</h1>
<p>Polyploidy leading to whole genome duplications (WGD; a doubling of the number of chromosomes in an organism) can occur in two ways:</p>
<ol>
<li><b>Autopolyploidy</b> occurs when two individuals <em>from the same species</em> produce an offspring with both sets of chromosomes from
each parent intact.</li>
<li><b>Allopolyploidy</b> occurs when two individuals <em>from different species</em> hybridize to form a new species with both sets of
chromosomes from each parental species.</li>
</ol>
<p>Many methods that study polyploidy are unable to distinguish between the two modes of polyploidization. In fact, most make an implicit assumption that
WGDs are the result of autopolyploidization and can therefore be positively mis-leading about the placement of WGDs on a phylogeny.</p>
<p>One reason prior methods have had trouble with allopolyploidy is that they treat genes resulting from allopolyploidy as paralogs when they actually
have similarities to orthologs as well.</p>
<ul>
<li><b>Paralogs</b> are genes found in the <em>same or different species</em> that can trace their most recent common ancestor back to a <em>duplication</em> event.</li>
<li><b>Orthologs</b> are genes found in the <em>same species</em> that can trace their most recent common ancestor back to a <em>speciation</em> event.</li>
</ul>
<p>The tricky thing about genes arising from allopolyploidy is that they can be found in the same or different species and yet their most recent common ancestor
is a <em>speciation</em> event -- the hybridization of the two parental species resulting in a new hybrid species. Neither of the definitions above fit
this scenario. Fortunately, a helpful paper by <a href=" http://dx.doi.org/10.1016/j.tplants.2016.02.005" target="_blank">Glover et al.</a><sup><a href="#1">[1]</a></sup>
has clarified some terms for us with respect to WGDs. They add the following two definitions:</p>
<ul>
<li><b>Ohnologs</b> are genes found in the <em>same species</em> that can trace their most recent common ancestor back to a <em>whole genome
duplication</em> event (autopolyploidy).</li>
<li><b>Homoeoelogs</b> are genes found in the <em>same species</em> that can trace their most recent common ancestor back to a <em>speciation</em> event.</li>
</ul>
<p>In other words, genes that arise from autopolyploidization are ohnologs, while genes that arise from allopolyploidization are homoeologs. Genes that arise
from small scale duplications are paralogs and genes arising from the split of two populations that diverge into two species are orthologs.
<a href=" http://dx.doi.org/10.1016/j.tplants.2016.02.005" target="_blank">Glover et al.</a><sup><a href="#1">[1]</a></sup> provide a nice table that sums this up:</p>
<img class="pure-img" id="logo_main" src="img/glover_table.png">
<p>So, if we want to be able to accurately identify WGD's and the mode of polyploidization that produced them, we need to take these geneological relationships
into account. As it turns out, the best way to do this is to simply change the way we represent our species phylogeny.
<div class="pure-g">
<a name="multrees"></a>
<div class="pure-u-1" id="jump_row">
<div id="jump_container">
Jump to section:
<a class="jump_link" href="#background">Background</a>
<a class="jump_link" href="#multrees">MUL-trees</a>
<a class="jump_link" href="#recon">LCA reconciliation</a>
<a class="jump_link" href="#grampa">GRAMPA</a>
<a class="jump_link" href="#search">GRAMPA's search</a>
</div>
</div>
</div>
<h1>MUL-trees</h1>
<p>Multi-labeled trees, or MUL-trees, are phylogenies in which the tip labels are not necessarily unique. These turn out to be extremely useful when representing
polyploid species because they are essentially showing the relationships of genomes rather than species. Therefore, they can represent both sub-genomes of
a polyploid species in a single phylogeny.</p>
<img class="pure-img" id="logo_main" src="img/fig2.png">
<p>For instance, in the figure above we can see the singly-labeled species representation of these 4 taxa on the left. This representation is the common output
of most species tree programs and is fine if no polyploidy has occurred. However, it is insufficient when studying polyploidy. Use of a singly-labeled species
phylogeny can lead to the mis-representation of allopolyploids as autopolyploids and inflate the counts of duplications and losses (see our paper for more
details). MUL-trees, on the other hand, can represent the genomes of various types of polyploidy events. The middle panel above shows a MUL-tree if taxa B
is the result of autopolyploidization. In this case, the two B sub-genomes are sister to one another. The panel on the right shows a MUL-tree if taxa B is the
result of hybridization between the A and D lineages (allopolyploidization). The two B sub-genomes are now sister to the parental lineage to which they are most
closely related.</p>
<p>Using this genome representation we were able to adapt a popular algorithm for counting gene duplications and losses, LCA reconciliation, to also be used to
study polyploids</p>
<div class="pure-g">
<a name="recon"></a>
<div class="pure-u-1" id="jump_row">
<div id="jump_container">
Jump to section:
<a class="jump_link" href="#background">Background</a>
<a class="jump_link" href="#multrees">MUL-trees</a>
<a class="jump_link" href="#recon">LCA reconciliation</a>
<a class="jump_link" href="#grampa">GRAMPA</a>
<a class="jump_link" href="#search">GRAMPA's search</a>
</div>
</div>
</div>
<a name="recon"><h1>LCA Gene tree reconciliation</h1></a>
<p>Least common ancestor (LCA) gene tree reconciliation is a method to classify nodes in a gene tree as either speciation or duplication events, given an accepted species phylogeny. It does
this by creating a <b>map</b> of each node in the gene tree to a node in the species tree. The procedure for mapping ensures that if a node in the gene tree maps
to the same node in the species tree as one of its direct descendant nodes, then that node is a duplication event.</p>
<h3>Step 1: Initialize the maps at the tips of the tree</h3>
<p>The process begins by simply using the labels in the gene tree to map them to tips in the species tree:</p>
<img class="pure-img" id="logo_main" src="img/fig3a.png">
<p>In this simple example, the genes A1, B1, B2, and C1 in the gene tree map to the species A, B, B, and C in the species tree. This is simple because the tip
nodes in the gene tree have labels that contain the name of the species from which they came.</p>
<h3>Step 2: Map the internal nodes</h3>
<p>Based on the initial mappings of the tip nodes, the internal nodes can be mapped:</p>
<img class="pure-img" id="logo_main" src="img/fig4.png">
<p>Each internal node is defined by the set of tips below it in the tree. For example, in the gene tree above, Node G1 is defined by tips C1 and B2. These tips
map to tips in the species tree C and B, respectively. In the species tree, we observe that the least common ancestor (LCA) of nodes C and B is Node S2.
Therefore, we can say that Node G1 in the gene tree maps to Node S1 in the species tree. This process is repeated for every node in the gene tree until
all nodes are mapped.</p>
<h3>Step 3: Count duplication nodes</h3>
<p>Any node in the gene tree that maps to the same node in the species tree as one of its direct descendants is classified as a duplication node:
<img class="pure-img" id="logo_main" src="img/fig5.png">
<p>Node G3 and one of its direct descendants (Node G1) both map to Node S2, therefore Node G3 is a duplication node! The number of duplication nodes are
counted to get a duplication score for this gene tree/species tree combination.</p>
<h3>Step 4: Counting losses</h3>
<p><a href="http://dx.doi.org/doi:10.1089/cmb.2006.13.320" target="_blank">Durand et al.</a><sup><a href="#2">[2]</a></sup> provided a really simple way to count losses in a gene tree, given a
set of maps to a species tree:</p>
<img class="pure-img" id="logo_main" src="img/fig6.png">
<p>So this says that to count losses along any branch in a gene tree (l_bg), you subtract depth (in the species tree) of the map of the node at the end
of the branch (M(n_g)) from the depth (in the species tree) of the map of the ancestral node. You then correct for the expected difference (-1) and
for the fact that the ancestral node might be a duplication node (IsDup). The total number of losses for all branches can be summed to get a loss
score for this gene tree/species tree comination.</p>
<h3>Step 5: Aggregating scores</h3>
<p>Given a single gene tree and a species tree, we can take the duplication score and loss score and add them up to get a total score for this reconciliation.
We can then repeat the procedure for any number of gene trees and sum the scores across gene trees to get a total parsimony score. Reconciling the same
set of gene trees to different species topologies can be a way to determine the most parsimonious species tree (e.g.
<a href="https://doi.org/10.1006/mpev.1996.0071" target="_blank">Guigo et al.</a><sup><a href="#3">[3]</a></sup>)!</p>
<div class="pure-g">
<a name="grampa"></a>
<div class="pure-u-1" id="jump_row">
<div id="jump_container">
Jump to section:
<a class="jump_link" href="#background">Background</a>
<a class="jump_link" href="#multrees">MUL-trees</a>
<a class="jump_link" href="#recon">LCA reconciliation</a>
<a class="jump_link" href="#grampa">GRAMPA</a>
<a class="jump_link" href="#search">GRAMPA's search</a>
</div>
</div>
</div>
<a name="grampa"><h1>Gene-tree Reconciliation with MUL-trees for Polyploid Analysis</h1></a>
<p>We sought out to adapt the algorithm described above to work accurately in the presence of polyploidy. That means we want to reconcile gene trees to MUL-trees.
The main problem we encountered with this adaptation is that we can no longer properly initialize the maps of polyploid genes in the gene tree because they
have multiple possible places to map to in the species tree:</p>
<img class="pure-img" id="logo_main" src="img/fig7.png">
<p>In this case, both B1 and B2 in the gene tree could possibly map to B (sister to A) or B (sister to C) in the MUL-tree! This leads to unresolvable internal
maps as well.</p>
<p>The important observation to solving this problem is to notice that the correct initial maps will always lead to the lowest reconciliation
score. Working with this observation, we can then try all possible initial maps from this gene tree to the MUL-tree and apply the parsimony principle to
the final scores to get the correct map:</p>
<img class="pure-img" id="logo_main" src="img/fig8.png">
<p>This strategy ensures that we count the correct number of duplications and losses, even in cases of allopolyploidy where genes present in multiple copies
within a species might not be due to a duplication event.</p>
<p>Of course, this type of combinatorial algorithm has exponential run time with the number of polyploid genes in the gene tree, so we came up with some ways
to speed it up. You can find more info about those heuristics in the paper (Fig. S2).</p>
<div class="pure-g">
<a name="search"></a>
<div class="pure-u-1" id="jump_row">
<div id="jump_container">
Jump to section:
<a class="jump_link" href="#background">Background</a>
<a class="jump_link" href="#multrees">MUL-trees</a>
<a class="jump_link" href="#recon">LCA reconciliation</a>
<a class="jump_link" href="#grampa">GRAMPA</a>
<a class="jump_link" href="#search">GRAMPA's search</a>
</div>
</div>
</div>
<a name="search"><h1>Using GRAMPA to detect and place a polyploidy event on a phylogeny</h1></a>
<p>Similar to the species tree search done with LCA reconciliation to singly-labeled trees, GRAMPA can test hypotheses of polyploidy by reconciling the same
set of gene trees to different MUL-trees. The MUL-tree with the lowest score indicates the polyploid species and their closest surviving parental lineages.
GRAMPA can also reconcile and compare to a singly-labeled tree, and if that tree has the lowest score then no polyploidy has occurred. This works because
penalties naturally arise when reconciling to the wrong type of tree (see Fig. S7 in our paper).</p>
<p>GRAMPA has an automatic search function so that it will try reconciliations to any number of user-specified MUL-trees. This can be done by using a singly-labeled
species tree as input and specifying the nodes from which to build MUL-trees. We designate these nodes <b>H1</b> and <b>H2</b>.</p>
<ul>
<li>The <b>H1 node</b> in the singly-labeled species tree indicates the clade of species that may be the result of a polyploidy event.</li>
<li>The <b>H2 node</b> in the singly-labeled species tree indicates the second parental lineage of the polyploid species.</li>
</ul>
<p>A user may specify H1 and H2 nodes. For each H1 node, every specified H2 node will be used to build a MUL-tree. The sub-tree rooted by H1 and the branch
that H1 subtends will be copied and placed on the branch subtended by H2:</p>
<img class="pure-img" id="logo_main" src="img/fig9.png">
<p>The set of gene-trees is reconciled to the resultant MUL-tree and a total reconciliation score obtained. Then the process is repeated for other H1 and H2
nodes (e.g. other MUL-trees) and the tree that has the lowest reconciliation score represents the optimal polyploidy scenario and is indicative of the
mode of polyploidization, the parental lineages of allopolyploids, and the number of duplications and losses for each gene tree.</p>
<a name="refs"><h1>References</h1></a>
<ol>
<a name="1"></a><li class="pub_title">
Glover NM, Redestig H, and Dessimoz C. 2016. Homoeologs: What are they and how do we infer them? <em>Trends Plant Sci.</em>
DOI: <a href=" http://dx.doi.org/10.1016/j.tplants.2016.02.005" target="_blank">10.1016/j.tplants.2016.02.005</a>
</li>
</li>
<a name="2"></a><li class="pub_title">
Durand D, Halldórsson BV, and Vernot B. 2006. A hybrid micro-macro evolutionary approach to gene tree reconstruction.
<em>J Comput Biol.</em> DOI: <a href="http://dx.doi.org/doi:10.1089/cmb.2006.13.320" target="_blank">doi:10.1089/cmb.2006.13.320</a>
</li>
<a name="3"></a><li class="pub_title">
Guigo R, Muchnik I, and Smith TF. 1996. Reconstruction of an ancient molecular phylogeny. <em>Mol Phyl Evol.</em>
DOI: <a href="https://doi.org/10.1006/mpev.1996.0071" target="_blank">10.1006/mpev.1996.0071</a>
</li>
</ol>
</div>
</div>
<div class="pure-u-3-24" id="margin"></div>
</div>
{footer}
</body>
"""
######################
# Main block
######################
pagefile = "about.html";
print("Generating " + pagefile + "...");
title = "GRAMPA - About"
head = RC.readHead(title, pagefile);
nav = RC.readNav(pagefile);
footer = RC.readFooter();
outfilename = "../../" + pagefile;
with open(outfilename, "w") as outfile:
outfile.write(html_template.format(head=head, nav=nav, footer=footer));
|
gpl-3.0
| -4,442,646,838,849,395,700 | 64.181132 | 196 | 0.688861 | false |
frouty/odoo_oph
|
addons/account/account_invoice.py
|
1
|
96788
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
import openerp.addons.decimal_precision as dp
import openerp.exceptions
from openerp import netsvc, SUPERUSER_ID
from openerp import pooler
from openerp.osv import fields, osv, orm
from openerp.tools import float_compare
from openerp.tools.translate import _
class account_invoice(osv.osv):
def _amount_all(self, cr, uid, ids, name, args, context=None):
res = {}
for invoice in self.browse(cr, uid, ids, context=context):
res[invoice.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0
}
for line in invoice.invoice_line:
res[invoice.id]['amount_untaxed'] += line.price_subtotal
for line in invoice.tax_line:
res[invoice.id]['amount_tax'] += line.amount
res[invoice.id]['amount_total'] = res[invoice.id]['amount_tax'] + res[invoice.id]['amount_untaxed']
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
type_inv = context.get('type', 'out_invoice')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
type2journal = {'out_invoice': 'sale', 'in_invoice': 'purchase', 'out_refund': 'sale_refund', 'in_refund': 'purchase_refund'}
journal_obj = self.pool.get('account.journal')
domain = [('company_id', '=', company_id)]
if isinstance(type_inv, list):
domain.append(('type', 'in', [type2journal.get(type) for type in type_inv if type2journal.get(type)]))
else:
domain.append(('type', '=', type2journal.get(type_inv, 'sale')))
res = journal_obj.search(cr, uid, domain, limit=1)
return res and res[0] or False
def _get_currency(self, cr, uid, context=None):
res = False
journal_id = self._get_journal(cr, uid, context=context)
if journal_id:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
res = journal.currency and journal.currency.id or journal.company_id.currency_id.id
return res
def _get_journal_analytic(self, cr, uid, type_inv, context=None):
type2journal = {'out_invoice': 'sale', 'in_invoice': 'purchase', 'out_refund': 'sale', 'in_refund': 'purchase'}
tt = type2journal.get(type_inv, 'sale')
result = self.pool.get('account.analytic.journal').search(cr, uid, [('type','=',tt)], context=context)
if not result:
raise osv.except_osv(_('No Analytic Journal!'),_("You must define an analytic journal of type '%s'!") % (tt,))
return result[0]
def _get_type(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('type', 'out_invoice')
def _reconciled(self, cr, uid, ids, name, args, context=None):
res = {}
wf_service = netsvc.LocalService("workflow")
for inv in self.browse(cr, uid, ids, context=context):
res[inv.id] = self.test_paid(cr, uid, [inv.id])
if not res[inv.id] and inv.state == 'paid':
wf_service.trg_validate(uid, 'account.invoice', inv.id, 'open_test', cr)
return res
def _get_reference_type(self, cr, uid, context=None):
return [('none', _('Free Reference'))]
def _amount_residual(self, cr, uid, ids, name, args, context=None):
"""Function of the field residua. It computes the residual amount (balance) for each invoice"""
if context is None:
context = {}
ctx = context.copy()
result = {}
currency_obj = self.pool.get('res.currency')
for invoice in self.browse(cr, SUPERUSER_ID, ids, context=context):
nb_inv_in_partial_rec = max_invoice_id = 0
result[invoice.id] = 0.0
if invoice.move_id:
for aml in invoice.move_id.line_id:
if aml.account_id.type in ('receivable','payable'):
if aml.currency_id and aml.currency_id.id == invoice.currency_id.id:
result[invoice.id] += aml.amount_residual_currency
else:
ctx['date'] = aml.date
result[invoice.id] += currency_obj.compute(cr, uid, aml.company_id.currency_id.id, invoice.currency_id.id, aml.amount_residual, context=ctx)
if aml.reconcile_partial_id.line_partial_ids:
#we check if the invoice is partially reconciled and if there are other invoices
#involved in this partial reconciliation (and we sum these invoices)
for line in aml.reconcile_partial_id.line_partial_ids:
if line.invoice and invoice.type == line.invoice.type:
nb_inv_in_partial_rec += 1
#store the max invoice id as for this invoice we will make a balance instead of a simple division
max_invoice_id = max(max_invoice_id, line.invoice.id)
if nb_inv_in_partial_rec:
#if there are several invoices in a partial reconciliation, we split the residual by the number
#of invoice to have a sum of residual amounts that matches the partner balance
new_value = currency_obj.round(cr, uid, invoice.currency_id, result[invoice.id] / nb_inv_in_partial_rec)
if invoice.id == max_invoice_id:
#if it's the last the invoice of the bunch of invoices partially reconciled together, we make a
#balance to avoid rounding errors
result[invoice.id] = result[invoice.id] - ((nb_inv_in_partial_rec - 1) * new_value)
else:
result[invoice.id] = new_value
#prevent the residual amount on the invoice to be less than 0
result[invoice.id] = max(result[invoice.id], 0.0)
return result
# Give Journal Items related to the payment reconciled to this invoice
# Return ids of partial and total payments related to the selected invoices
def _get_lines(self, cr, uid, ids, name, arg, context=None):
res = {}
for invoice in self.browse(cr, uid, ids, context=context):
id = invoice.id
res[id] = []
if not invoice.move_id:
continue
data_lines = [x for x in invoice.move_id.line_id if x.account_id.id == invoice.account_id.id]
partial_ids = []
for line in data_lines:
ids_line = []
if line.reconcile_id:
ids_line = line.reconcile_id.line_id
elif line.reconcile_partial_id:
ids_line = line.reconcile_partial_id.line_partial_ids
l = map(lambda x: x.id, ids_line)
partial_ids.append(line.id)
res[id] =[x for x in l if x <> line.id and x not in partial_ids]
return res
def _get_invoice_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('account.invoice.line').browse(cr, uid, ids, context=context):
result[line.invoice_id.id] = True
return result.keys()
def _get_invoice_tax(self, cr, uid, ids, context=None):
result = {}
for tax in self.pool.get('account.invoice.tax').browse(cr, uid, ids, context=context):
result[tax.invoice_id.id] = True
return result.keys()
def _compute_lines(self, cr, uid, ids, name, args, context=None):
result = {}
for invoice in self.browse(cr, uid, ids, context=context):
src = []
lines = []
if invoice.move_id:
for m in invoice.move_id.line_id:
if m.account_id != invoice.account_id:
continue
temp_lines = []
if m.reconcile_id:
temp_lines = map(lambda x: x.id, m.reconcile_id.line_id)
elif m.reconcile_partial_id:
temp_lines = map(lambda x: x.id, m.reconcile_partial_id.line_partial_ids)
lines += [x for x in temp_lines if x not in lines]
src.append(m.id)
lines = filter(lambda x: x not in src, lines)
result[invoice.id] = lines
return result
def _get_invoice_from_line(self, cr, uid, ids, context=None):
move = {}
for line in self.pool.get('account.move.line').browse(cr, uid, ids, context=context):
if line.reconcile_partial_id:
for line2 in line.reconcile_partial_id.line_partial_ids:
move[line2.move_id.id] = True
if line.reconcile_id:
for line2 in line.reconcile_id.line_id:
move[line2.move_id.id] = True
invoice_ids = []
if move:
invoice_ids = self.pool.get('account.invoice').search(cr, uid, [('move_id','in',move.keys())], context=context)
return invoice_ids
def _get_invoice_from_reconcile(self, cr, uid, ids, context=None):
move = {}
for r in self.pool.get('account.move.reconcile').browse(cr, uid, ids, context=context):
for line in r.line_partial_ids:
move[line.move_id.id] = True
for line in r.line_id:
move[line.move_id.id] = True
invoice_ids = []
if move:
invoice_ids = self.pool.get('account.invoice').search(cr, uid, [('move_id','in',move.keys())], context=context)
return invoice_ids
_name = "account.invoice"
_inherit = ['mail.thread']
_description = 'Invoice'
_order = "id desc"
_track = {
'type': {
},
'state': {
'account.mt_invoice_paid': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'paid' and obj['type'] in ('out_invoice', 'out_refund'),
'account.mt_invoice_validated': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'open' and obj['type'] in ('out_invoice', 'out_refund'),
},
}
_columns = {
'name': fields.char('Description', size=64, select=True, readonly=True, states={'draft':[('readonly',False)]}),
'origin': fields.char('Source Document', size=64, help="Reference of the document that produced this invoice.", readonly=True, states={'draft':[('readonly',False)]}),
'supplier_invoice_number': fields.char('Supplier Invoice Number', size=64, help="The reference of this invoice as provided by the supplier.", readonly=True, states={'draft':[('readonly',False)]}),
'type': fields.selection([
('out_invoice','Customer Invoice'),
('in_invoice','Supplier Invoice'),
('out_refund','Customer Refund'),
('in_refund','Supplier Refund'),
],'Type', readonly=True, select=True, change_default=True, track_visibility='always'),
'number': fields.related('move_id','name', type='char', readonly=True, size=64, relation='account.move', store=True, string='Number'),
'internal_number': fields.char('Invoice Number', size=32, readonly=True, help="Unique number of the invoice, computed automatically when the invoice is created."),
'reference': fields.char('Invoice Reference', size=64, help="The partner reference of this invoice."),
'reference_type': fields.selection(_get_reference_type, 'Payment Reference',
required=True, readonly=True, states={'draft':[('readonly',False)]}),
'comment': fields.text('Additional Information'),
'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Paid'),
('cancel','Cancelled'),
],'Status', select=True, readonly=True, track_visibility='onchange',
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed Invoice. \
\n* The \'Pro-forma\' when invoice is in Pro-forma status,invoice does not have an invoice number. \
\n* The \'Open\' status is used when user create invoice,a invoice number is generated.Its in open status till user does not pay invoice. \
\n* The \'Paid\' status is set automatically when the invoice is paid. Its related journal entries may or may not be reconciled. \
\n* The \'Cancelled\' status is used when user cancel invoice.'),
'sent': fields.boolean('Sent', readonly=True, help="It indicates that the invoice has been sent."),
'date_invoice': fields.date('Invoice Date', readonly=True, states={'draft':[('readonly',False)]}, select=True, help="Keep empty to use the current date"),
'date_due': fields.date('Due Date', readonly=True, states={'draft':[('readonly',False)]}, select=True,
help="If you use payment terms, the due date will be computed automatically at the generation "\
"of accounting entries. The payment term may compute several due dates, for example 50% now and 50% in one month, but if you want to force a due date, make sure that the payment term is not set on the invoice. If you keep the payment term and the due date empty, it means direct payment."),
'partner_id': fields.many2one('res.partner', 'Partner', change_default=True, readonly=True, required=True, states={'draft':[('readonly',False)]}, track_visibility='always'),
'payment_term': fields.many2one('account.payment.term', 'Payment Terms',readonly=True, states={'draft':[('readonly',False)]},
help="If you use payment terms, the due date will be computed automatically at the generation "\
"of accounting entries. If you keep the payment term and the due date empty, it means direct payment. "\
"The payment term may compute several due dates, for example 50% now, 50% in one month."),
'period_id': fields.many2one('account.period', 'Force Period', domain=[('state','<>','done')], help="Keep empty to use the period of the validation(invoice) date.", readonly=True, states={'draft':[('readonly',False)]}),
'account_id': fields.many2one('account.account', 'Account', required=True, readonly=True, states={'draft':[('readonly',False)]}, help="The partner account used for this invoice."),
'invoice_line': fields.one2many('account.invoice.line', 'invoice_id', 'Invoice Lines', readonly=True, states={'draft':[('readonly',False)]}),
'tax_line': fields.one2many('account.invoice.tax', 'invoice_id', 'Tax Lines', readonly=True, states={'draft':[('readonly',False)]}),
'move_id': fields.many2one('account.move', 'Journal Entry', readonly=True, select=1, ondelete='restrict', help="Link to the automatically generated Journal Items."),
'amount_untaxed': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Subtotal', track_visibility='always',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount','invoice_id'], 20),
},
multi='all'),
'amount_tax': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Tax',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount','invoice_id'], 20),
},
multi='all'),
'amount_total': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Total',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount','invoice_id'], 20),
},
multi='all'),
'currency_id': fields.many2one('res.currency', 'Currency', required=True, readonly=True, states={'draft':[('readonly',False)]}, track_visibility='always'),
'journal_id': fields.many2one('account.journal', 'Journal', required=True, readonly=True, states={'draft':[('readonly',False)]},
domain="[('type', 'in', {'out_invoice': ['sale'], 'out_refund': ['sale_refund'], 'in_refund': ['purchase_refund'], 'in_invoice': ['purchase']}.get(type, [])), ('company_id', '=', company_id)]"),
'company_id': fields.many2one('res.company', 'Company', required=True, change_default=True, readonly=True, states={'draft':[('readonly',False)]}),
'check_total': fields.float('Verification Total', digits_compute=dp.get_precision('Account'), readonly=True, states={'draft':[('readonly',False)]}),
'reconciled': fields.function(_reconciled, string='Paid/Reconciled', type='boolean',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, None, 50), # Check if we can remove ?
'account.move.line': (_get_invoice_from_line, None, 50),
'account.move.reconcile': (_get_invoice_from_reconcile, None, 50),
}, help="It indicates that the invoice has been paid and the journal entry of the invoice has been reconciled with one or several journal entries of payment."),
'partner_bank_id': fields.many2one('res.partner.bank', 'Bank Account',
help='Bank Account Number to which the invoice will be paid. A Company bank account if this is a Customer Invoice or Supplier Refund, otherwise a Partner bank account number.', readonly=True, states={'draft':[('readonly',False)]}),
'move_lines':fields.function(_get_lines, type='many2many', relation='account.move.line', string='Entry Lines'),
'residual': fields.function(_amount_residual, digits_compute=dp.get_precision('Account'), string='Balance',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line','move_id'], 50),
'account.invoice.tax': (_get_invoice_tax, None, 50),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount','invoice_id'], 50),
'account.move.line': (_get_invoice_from_line, None, 50),
'account.move.reconcile': (_get_invoice_from_reconcile, None, 50),
},
help="Remaining amount due."),
'payment_ids': fields.function(_compute_lines, relation='account.move.line', type="many2many", string='Payments', groups='base.group_user'),
'move_name': fields.char('Journal Entry', size=64, readonly=True, states={'draft':[('readonly',False)]}),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True, track_visibility='onchange', states={'draft':[('readonly',False)]}),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position', readonly=True, states={'draft':[('readonly',False)]})
}
_defaults = {
'type': _get_type,
'state': 'draft',
'journal_id': _get_journal,
'currency_id': _get_currency,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.invoice', context=c),
'reference_type': 'none',
'check_total': 0.0,
'internal_number': False,
'user_id': lambda s, cr, u, c: u,
'sent': False,
}
_sql_constraints = [
('number_uniq', 'unique(number, company_id, journal_id, type)', 'Invoice Number must be unique per Company!'),
]
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
journal_obj = self.pool.get('account.journal')
if context is None:
context = {}
if context.get('active_model', '') in ['res.partner'] and context.get('active_ids', False) and context['active_ids']:
partner = self.pool.get(context['active_model']).read(cr, uid, context['active_ids'], ['supplier','customer'])[0]
if not view_type:
view_id = self.pool.get('ir.ui.view').search(cr, uid, [('name', '=', 'account.invoice.tree')])
view_type = 'tree'
if view_type == 'form':
if partner['supplier'] and not partner['customer']:
view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.supplier.form')])
elif partner['customer'] and not partner['supplier']:
view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.form')])
if view_id and isinstance(view_id, (list, tuple)):
view_id = view_id[0]
res = super(account_invoice,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
type = context.get('journal_type', False)
for field in res['fields']:
if field == 'journal_id' and type:
journal_select = journal_obj._name_search(cr, uid, '', [('type', '=', type)], context=context, limit=None, name_get_uid=1)
res['fields'][field]['selection'] = journal_select
doc = etree.XML(res['arch'])
if context.get('type', False):
for node in doc.xpath("//field[@name='partner_bank_id']"):
if context['type'] == 'in_refund':
node.set('domain', "[('partner_id.ref_companies', 'in', [company_id])]")
elif context['type'] == 'out_refund':
node.set('domain', "[('partner_id', '=', partner_id)]")
res['arch'] = etree.tostring(doc)
if view_type == 'search':
if context.get('type', 'in_invoice') in ('out_invoice', 'out_refund'):
for node in doc.xpath("//group[@name='extended filter']"):
doc.remove(node)
res['arch'] = etree.tostring(doc)
if view_type == 'tree':
partner_string = _('Customer')
if context.get('type', 'out_invoice') in ('in_invoice', 'in_refund'):
partner_string = _('Supplier')
for node in doc.xpath("//field[@name='reference']"):
node.set('invisible', '0')
for node in doc.xpath("//field[@name='partner_id']"):
node.set('string', partner_string)
res['arch'] = etree.tostring(doc)
return res
def get_log_context(self, cr, uid, context=None):
if context is None:
context = {}
res = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'invoice_form')
view_id = res and res[1] or False
context['view_id'] = view_id
return context
def invoice_print(self, cr, uid, ids, context=None):
'''
This function prints the invoice and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
self.write(cr, uid, ids, {'sent': True}, context=context)
datas = {
'ids': ids,
'model': 'account.invoice',
'form': self.read(cr, uid, ids[0], context=context)
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'account.invoice',
'datas': datas,
'nodestroy' : True
}
def action_invoice_sent(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi invoice template message loaded by default
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'account', 'email_template_edi_invoice')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'account.invoice',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_invoice_as_sent': True,
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def confirm_paid(self, cr, uid, ids, context=None):
if context is None:
context = {}
self.write(cr, uid, ids, {'state':'paid'}, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
invoices = self.read(cr, uid, ids, ['state','internal_number'], context=context)
unlink_ids = []
for t in invoices:
if t['state'] not in ('draft', 'cancel'):
raise openerp.exceptions.Warning(_('You cannot delete an invoice which is not draft or cancelled. You should refund it instead.'))
elif t['internal_number']:
raise openerp.exceptions.Warning(_('You cannot delete an invoice after it has been validated (and received a number). You can set it back to "Draft" state and modify its content, then re-confirm it.'))
else:
unlink_ids.append(t['id'])
osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
return True
def onchange_partner_id(self, cr, uid, ids, type, partner_id,\
date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False):
partner_payment_term = False
acc_id = False
bank_id = False
fiscal_position = False
opt = [('uid', str(uid))]
if partner_id:
opt.insert(0, ('id', partner_id))
p = self.pool.get('res.partner').browse(cr, uid, partner_id)
if company_id:
if (p.property_account_receivable.company_id and (p.property_account_receivable.company_id.id != company_id)) and (p.property_account_payable.company_id and (p.property_account_payable.company_id.id != company_id)):
property_obj = self.pool.get('ir.property')
rec_pro_id = property_obj.search(cr,uid,[('name','=','property_account_receivable'),('res_id','=','res.partner,'+str(partner_id)+''),('company_id','=',company_id)])
pay_pro_id = property_obj.search(cr,uid,[('name','=','property_account_payable'),('res_id','=','res.partner,'+str(partner_id)+''),('company_id','=',company_id)])
if not rec_pro_id:
rec_pro_id = property_obj.search(cr,uid,[('name','=','property_account_receivable'),('company_id','=',company_id)])
if not pay_pro_id:
pay_pro_id = property_obj.search(cr,uid,[('name','=','property_account_payable'),('company_id','=',company_id)])
rec_line_data = property_obj.read(cr,uid,rec_pro_id,['name','value_reference','res_id'])
pay_line_data = property_obj.read(cr,uid,pay_pro_id,['name','value_reference','res_id'])
rec_res_id = rec_line_data and rec_line_data[0].get('value_reference',False) and int(rec_line_data[0]['value_reference'].split(',')[1]) or False
pay_res_id = pay_line_data and pay_line_data[0].get('value_reference',False) and int(pay_line_data[0]['value_reference'].split(',')[1]) or False
if not rec_res_id and not pay_res_id:
raise osv.except_osv(_('Configuration Error!'),
_('Cannot find a chart of accounts for this company, you should create one.'))
account_obj = self.pool.get('account.account')
rec_obj_acc = account_obj.browse(cr, uid, [rec_res_id])
pay_obj_acc = account_obj.browse(cr, uid, [pay_res_id])
p.property_account_receivable = rec_obj_acc[0]
p.property_account_payable = pay_obj_acc[0]
if type in ('out_invoice', 'out_refund'):
acc_id = p.property_account_receivable.id
partner_payment_term = p.property_payment_term and p.property_payment_term.id or False
else:
acc_id = p.property_account_payable.id
partner_payment_term = p.property_supplier_payment_term and p.property_supplier_payment_term.id or False
fiscal_position = p.property_account_position and p.property_account_position.id or False
if p.bank_ids:
bank_id = p.bank_ids[0].id
result = {'value': {
'account_id': acc_id,
'payment_term': partner_payment_term,
'fiscal_position': fiscal_position
}
}
if type in ('in_invoice', 'in_refund'):
result['value']['partner_bank_id'] = bank_id
if payment_term != partner_payment_term:
if partner_payment_term:
to_update = self.onchange_payment_term_date_invoice(
cr, uid, ids, partner_payment_term, date_invoice)
result['value'].update(to_update['value'])
else:
result['value']['date_due'] = False
if partner_bank_id != bank_id:
to_update = self.onchange_partner_bank(cr, uid, ids, bank_id)
result['value'].update(to_update['value'])
return result
def onchange_journal_id(self, cr, uid, ids, journal_id=False, context=None):
result = {}
if journal_id:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
currency_id = journal.currency and journal.currency.id or journal.company_id.currency_id.id
company_id = journal.company_id.id
result = {'value': {
'currency_id': currency_id,
'company_id': company_id,
}
}
return result
def onchange_payment_term_date_invoice(self, cr, uid, ids, payment_term_id, date_invoice):
res = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not date_invoice:
date_invoice = fields.date.context_today(self, cr, uid)
if not payment_term_id:
inv = self.browse(cr, uid, ids[0])
#To make sure the invoice due date should contain due date which is entered by user when there is no payment term defined
return {'value':{'date_due': inv.date_due and inv.date_due or date_invoice}}
pterm_list = self.pool.get('account.payment.term').compute(cr, uid, payment_term_id, value=1, date_ref=date_invoice)
if pterm_list:
pterm_list = [line[0] for line in pterm_list]
pterm_list.sort()
res = {'value':{'date_due': pterm_list[-1]}}
else:
raise osv.except_osv(_('Insufficient Data!'), _('The payment term of supplier does not have a payment term line.'))
return res
def onchange_invoice_line(self, cr, uid, ids, lines):
return {}
def onchange_partner_bank(self, cursor, user, ids, partner_bank_id=False):
return {'value': {}}
def onchange_company_id(self, cr, uid, ids, company_id, part_id, type, invoice_line, currency_id):
#TODO: add the missing context parameter when forward-porting in trunk so we can remove
# this hack!
context = self.pool['res.users'].context_get(cr, uid)
val = {}
dom = {}
obj_journal = self.pool.get('account.journal')
account_obj = self.pool.get('account.account')
inv_line_obj = self.pool.get('account.invoice.line')
if company_id and part_id and type:
acc_id = False
partner_obj = self.pool.get('res.partner').browse(cr,uid,part_id)
if partner_obj.property_account_payable and partner_obj.property_account_receivable:
if partner_obj.property_account_payable.company_id.id != company_id and partner_obj.property_account_receivable.company_id.id != company_id:
property_obj = self.pool.get('ir.property')
rec_pro_id = property_obj.search(cr, uid, [('name','=','property_account_receivable'),('res_id','=','res.partner,'+str(part_id)+''),('company_id','=',company_id)])
pay_pro_id = property_obj.search(cr, uid, [('name','=','property_account_payable'),('res_id','=','res.partner,'+str(part_id)+''),('company_id','=',company_id)])
if not rec_pro_id:
rec_pro_id = property_obj.search(cr, uid, [('name','=','property_account_receivable'),('company_id','=',company_id)])
if not pay_pro_id:
pay_pro_id = property_obj.search(cr, uid, [('name','=','property_account_payable'),('company_id','=',company_id)])
rec_line_data = property_obj.read(cr, uid, rec_pro_id, ['name','value_reference','res_id'])
pay_line_data = property_obj.read(cr, uid, pay_pro_id, ['name','value_reference','res_id'])
rec_res_id = rec_line_data and rec_line_data[0].get('value_reference',False) and int(rec_line_data[0]['value_reference'].split(',')[1]) or False
pay_res_id = pay_line_data and pay_line_data[0].get('value_reference',False) and int(pay_line_data[0]['value_reference'].split(',')[1]) or False
if not rec_res_id and not pay_res_id:
raise osv.except_osv(_('Configuration Error!'),
_('Cannot find a chart of account, you should create one from Settings\Configuration\Accounting menu.'))
if type in ('out_invoice', 'out_refund'):
acc_id = rec_res_id
else:
acc_id = pay_res_id
val= {'account_id': acc_id}
if ids:
if company_id:
inv_obj = self.browse(cr,uid,ids)
for line in inv_obj[0].invoice_line:
if line.account_id:
if line.account_id.company_id.id != company_id:
result_id = account_obj.search(cr, uid, [('name','=',line.account_id.name),('company_id','=',company_id)])
if not result_id:
raise osv.except_osv(_('Configuration Error!'),
_('Cannot find a chart of account, you should create one from Settings\Configuration\Accounting menu.'))
inv_line_obj.write(cr, uid, [line.id], {'account_id': result_id[-1]})
else:
if invoice_line:
for inv_line in invoice_line:
obj_l = account_obj.browse(cr, uid, inv_line[2]['account_id'])
if obj_l.company_id.id != company_id:
raise osv.except_osv(_('Configuration Error!'),
_('Invoice line account\'s company and invoice\'s company does not match.'))
else:
continue
if company_id and type:
journal_mapping = {
'out_invoice': 'sale',
'out_refund': 'sale_refund',
'in_refund': 'purchase_refund',
'in_invoice': 'purchase',
}
journal_type = journal_mapping[type]
journal_ids = obj_journal.search(cr, uid, [('company_id','=',company_id), ('type', '=', journal_type)])
if journal_ids:
val['journal_id'] = journal_ids[0]
ir_values_obj = self.pool.get('ir.values')
res_journal_default = ir_values_obj.get(cr, uid, 'default', 'type=%s' % (type), ['account.invoice'])
for r in res_journal_default:
if r[1] == 'journal_id' and r[2] in journal_ids:
val['journal_id'] = r[2]
if not val.get('journal_id', False):
journal_type_map = dict(obj_journal._columns['type'].selection)
journal_type_label = self.pool['ir.translation']._get_source(cr, uid, None, ('code','selection'),
context.get('lang'),
journal_type_map.get(journal_type))
raise osv.except_osv(_('Configuration Error!'),
_('Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.') % ('"%s"' % journal_type_label))
dom = {'journal_id': [('id', 'in', journal_ids)]}
else:
journal_ids = obj_journal.search(cr, uid, [])
return {'value': val, 'domain': dom}
# go from canceled state to draft state
def action_cancel_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state':'draft'})
wf_service = netsvc.LocalService("workflow")
for inv_id in ids:
wf_service.trg_delete(uid, 'account.invoice', inv_id, cr)
wf_service.trg_create(uid, 'account.invoice', inv_id, cr)
return True
# Workflow stuff
#################
# return the ids of the move lines which has the same account than the invoice
# whose id is in ids
def move_line_id_payment_get(self, cr, uid, ids, *args):
if not ids: return []
result = self.move_line_id_payment_gets(cr, uid, ids, *args)
return result.get(ids[0], [])
def move_line_id_payment_gets(self, cr, uid, ids, *args):
res = {}
if not ids: return res
cr.execute('SELECT i.id, l.id '\
'FROM account_move_line l '\
'LEFT JOIN account_invoice i ON (i.move_id=l.move_id) '\
'WHERE i.id IN %s '\
'AND l.account_id=i.account_id',
(tuple(ids),))
for r in cr.fetchall():
res.setdefault(r[0], [])
res[r[0]].append( r[1] )
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default.update({
'state':'draft',
'number':False,
'move_id':False,
'move_name':False,
'internal_number': False,
'period_id': False,
'sent': False,
})
if 'date_invoice' not in default:
default.update({
'date_invoice':False
})
if 'date_due' not in default:
default.update({
'date_due':False
})
return super(account_invoice, self).copy(cr, uid, id, default, context)
def test_paid(self, cr, uid, ids, *args):
res = self.move_line_id_payment_get(cr, uid, ids)
if not res:
return False
ok = True
for id in res:
cr.execute('select reconcile_id from account_move_line where id=%s', (id,))
ok = ok and bool(cr.fetchone()[0])
return ok
def button_reset_taxes(self, cr, uid, ids, context=None):
if context is None:
context = {}
ctx = context.copy()
ait_obj = self.pool.get('account.invoice.tax')
for id in ids:
cr.execute("DELETE FROM account_invoice_tax WHERE invoice_id=%s AND manual is False", (id,))
partner = self.browse(cr, uid, id, context=ctx).partner_id
if partner.lang:
ctx.update({'lang': partner.lang})
for taxe in ait_obj.compute(cr, uid, id, context=ctx).values():
ait_obj.create(cr, uid, taxe)
# Update the stored value (fields.function), so we write to trigger recompute
self.pool.get('account.invoice').write(cr, uid, ids, {'invoice_line':[]}, context=ctx)
return True
def button_compute(self, cr, uid, ids, context=None, set_total=False):
self.button_reset_taxes(cr, uid, ids, context)
for inv in self.browse(cr, uid, ids, context=context):
if set_total:
self.pool.get('account.invoice').write(cr, uid, [inv.id], {'check_total': inv.amount_total})
return True
def _convert_ref(self, cr, uid, ref):
return (ref or '').replace('/','')
def _get_analytic_lines(self, cr, uid, id, context=None):
if context is None:
context = {}
inv = self.browse(cr, uid, id)
cur_obj = self.pool.get('res.currency')
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
if inv.type in ('out_invoice', 'in_refund'):
sign = 1
else:
sign = -1
iml = self.pool.get('account.invoice.line').move_line_get(cr, uid, inv.id, context=context)
for il in iml:
if il['account_analytic_id']:
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
else:
ref = self._convert_ref(cr, uid, inv.number)
if not inv.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal!") % (inv.journal_id.name,))
il['analytic_lines'] = [(0,0, {
'name': il['name'],
'date': inv['date_invoice'],
'account_id': il['account_analytic_id'],
'unit_amount': il['quantity'],
'amount': cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, il['price'], context={'date': inv.date_invoice}) * sign,
'product_id': il['product_id'],
'product_uom_id': il['uos_id'],
'general_account_id': il['account_id'],
'journal_id': inv.journal_id.analytic_journal_id.id,
'ref': ref,
})]
return iml
def action_date_assign(self, cr, uid, ids, *args):
for inv in self.browse(cr, uid, ids):
res = self.onchange_payment_term_date_invoice(cr, uid, inv.id, inv.payment_term.id, inv.date_invoice)
if res and res['value']:
self.write(cr, uid, [inv.id], res['value'])
return True
def finalize_invoice_move_lines(self, cr, uid, invoice_browse, move_lines):
"""finalize_invoice_move_lines(cr, uid, invoice, move_lines) -> move_lines
Hook method to be overridden in additional modules to verify and possibly alter the
move lines to be created by an invoice, for special cases.
:param invoice_browse: browsable record of the invoice that is generating the move lines
:param move_lines: list of dictionaries with the account.move.lines (as for create())
:return: the (possibly updated) final move_lines to create for this invoice
"""
return move_lines
def check_tax_lines(self, cr, uid, inv, compute_taxes, ait_obj):
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id
if not inv.tax_line:
for tax in compute_taxes.values():
ait_obj.create(cr, uid, tax)
else:
tax_key = []
for tax in inv.tax_line:
if tax.manual:
continue
key = (tax.tax_code_id.id, tax.base_code_id.id, tax.account_id.id, tax.account_analytic_id.id)
tax_key.append(key)
if not key in compute_taxes:
raise osv.except_osv(_('Warning!'), _('Global taxes defined, but they are not in invoice lines !'))
base = compute_taxes[key]['base']
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
if float_compare(abs(base - tax.base), company_currency.rounding, precision_digits=precision) == 1:
raise osv.except_osv(_('Warning!'), _('Tax base different!\nClick on compute to update the tax base.'))
for key in compute_taxes:
if not key in tax_key:
raise osv.except_osv(_('Warning!'), _('Taxes are missing!\nClick on compute button.'))
def compute_invoice_totals(self, cr, uid, inv, company_currency, ref, invoice_move_lines, context=None):
if context is None:
context={}
total = 0
total_currency = 0
cur_obj = self.pool.get('res.currency')
for i in invoice_move_lines:
if inv.currency_id.id != company_currency:
context.update({'date': inv.date_invoice or fields.date.context_today(self, cr, uid, context=context)})
i['currency_id'] = inv.currency_id.id
i['amount_currency'] = i['price']
i['price'] = cur_obj.compute(cr, uid, inv.currency_id.id,
company_currency, i['price'],
context=context)
else:
i['amount_currency'] = False
i['currency_id'] = False
i['ref'] = ref
if inv.type in ('out_invoice','in_refund'):
total += i['price']
total_currency += i['amount_currency'] or i['price']
i['price'] = - i['price']
else:
total -= i['price']
total_currency -= i['amount_currency'] or i['price']
return total, total_currency, invoice_move_lines
def inv_line_characteristic_hashcode(self, invoice, invoice_line):
"""Overridable hashcode generation for invoice lines. Lines having the same hashcode
will be grouped together if the journal has the 'group line' option. Of course a module
can add fields to invoice lines that would need to be tested too before merging lines
or not."""
return "%s-%s-%s-%s-%s"%(
invoice_line['account_id'],
invoice_line.get('tax_code_id',"False"),
invoice_line.get('product_id',"False"),
invoice_line.get('analytic_account_id',"False"),
invoice_line.get('date_maturity',"False"))
def group_lines(self, cr, uid, iml, line, inv):
"""Merge account move lines (and hence analytic lines) if invoice line hashcodes are equals"""
if inv.journal_id.group_invoice_lines:
line2 = {}
for x, y, l in line:
tmp = self.inv_line_characteristic_hashcode(inv, l)
if tmp in line2:
am = line2[tmp]['debit'] - line2[tmp]['credit'] + (l['debit'] - l['credit'])
line2[tmp]['debit'] = (am > 0) and am or 0.0
line2[tmp]['credit'] = (am < 0) and -am or 0.0
line2[tmp]['tax_amount'] += l['tax_amount']
line2[tmp]['analytic_lines'] += l['analytic_lines']
else:
line2[tmp] = l
line = []
for key, val in line2.items():
line.append((0,0,val))
return line
def action_move_create(self, cr, uid, ids, context=None):
"""Creates invoice related analytics and financial move lines"""
ait_obj = self.pool.get('account.invoice.tax')
cur_obj = self.pool.get('res.currency')
period_obj = self.pool.get('account.period')
payment_term_obj = self.pool.get('account.payment.term')
journal_obj = self.pool.get('account.journal')
move_obj = self.pool.get('account.move')
if context is None:
context = {}
for inv in self.browse(cr, uid, ids, context=context):
if not inv.journal_id.sequence_id:
raise osv.except_osv(_('Error!'), _('Please define sequence on the journal related to this invoice.'))
if not inv.invoice_line:
raise osv.except_osv(_('No Invoice Lines!'), _('Please create some invoice lines.'))
if inv.move_id:
continue
ctx = context.copy()
ctx.update({'lang': inv.partner_id.lang})
if not inv.date_invoice:
self.write(cr, uid, [inv.id], {'date_invoice': fields.date.context_today(self, cr, uid, context=context)}, context=ctx)
inv.refresh()
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
# create the analytical lines
# one move line per invoice line
iml = self._get_analytic_lines(cr, uid, inv.id, context=ctx)
# check if taxes are all computed
compute_taxes = ait_obj.compute(cr, uid, inv.id, context=ctx)
self.check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)
# I disabled the check_total feature
if self.pool['res.users'].has_group(cr, uid, 'account.group_supplier_inv_check_total'):
if (inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding/2.0)):
raise osv.except_osv(_('Bad Total!'), _('Please verify the price of the invoice!\nThe encoded total does not match the computed total.'))
if inv.payment_term:
total_fixed = total_percent = 0
for line in inv.payment_term.line_ids:
if line.value == 'fixed':
total_fixed += line.value_amount
if line.value == 'procent':
total_percent += line.value_amount
total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)
if (total_fixed + total_percent) > 100:
raise osv.except_osv(_('Error!'), _("Cannot create the invoice.\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'."))
# one move line per tax line
iml += ait_obj.move_line_get(cr, uid, inv.id)
entry_type = ''
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
entry_type = 'journal_pur_voucher'
if inv.type == 'in_refund':
entry_type = 'cont_voucher'
else:
ref = self._convert_ref(cr, uid, inv.number)
entry_type = 'journal_sale_vou'
if inv.type == 'out_refund':
entry_type = 'cont_voucher'
diff_currency_p = inv.currency_id.id <> company_currency
# create one move line for the total and possibly adjust the other lines amount
total = 0
total_currency = 0
total, total_currency, iml = self.compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)
acc_id = inv.account_id.id
name = inv['name'] or inv['supplier_invoice_number'] or '/'
totlines = False
if inv.payment_term:
totlines = payment_term_obj.compute(cr,
uid, inv.payment_term.id, total, inv.date_invoice or False, context=ctx)
if totlines:
res_amount_currency = total_currency
i = 0
ctx.update({'date': inv.date_invoice})
for t in totlines:
if inv.currency_id.id != company_currency:
amount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t[1], context=ctx)
else:
amount_currency = False
# last line add the diff
res_amount_currency -= amount_currency or 0
i += 1
if i == len(totlines):
amount_currency += res_amount_currency
iml.append({
'type': 'dest',
'name': name,
'price': t[1],
'account_id': acc_id,
'date_maturity': t[0],
'amount_currency': diff_currency_p \
and amount_currency or False,
'currency_id': diff_currency_p \
and inv.currency_id.id or False,
'ref': ref,
})
else:
iml.append({
'type': 'dest',
'name': name,
'price': total,
'account_id': acc_id,
'date_maturity': inv.date_due or False,
'amount_currency': diff_currency_p \
and total_currency or False,
'currency_id': diff_currency_p \
and inv.currency_id.id or False,
'ref': ref
})
part = self.pool.get("res.partner")._find_accounting_partner(inv.partner_id)
line = map(lambda x:(0,0,self.line_get_convert(cr, uid, x, part.id, inv.date_invoice, context=ctx)),iml)
line = self.group_lines(cr, uid, iml, line, inv)
journal_id = inv.journal_id.id
journal = journal_obj.browse(cr, uid, journal_id, context=ctx)
if journal.centralisation:
raise osv.except_osv(_('User Error!'),
_('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))
line = self.finalize_invoice_move_lines(cr, uid, inv, line)
move = {
'ref': inv.reference and inv.reference or inv.name,
'line_id': line,
'journal_id': journal_id,
'date': inv.date_invoice,
'narration': inv.comment,
'company_id': inv.company_id.id,
}
period_id = inv.period_id and inv.period_id.id or False
ctx.update(company_id=inv.company_id.id,
account_period_prefer_normal=True)
if not period_id:
period_ids = period_obj.find(cr, uid, inv.date_invoice, context=ctx)
period_id = period_ids and period_ids[0] or False
if period_id:
move['period_id'] = period_id
for i in line:
i[2]['period_id'] = period_id
ctx.update(invoice=inv)
move_id = move_obj.create(cr, uid, move, context=ctx)
new_move_name = move_obj.browse(cr, uid, move_id, context=ctx).name
# make the invoice point to that move
self.write(cr, uid, [inv.id], {'move_id': move_id,'period_id':period_id, 'move_name':new_move_name}, context=ctx)
# Pass invoice in context in method post: used if you want to get the same
# account move reference when creating the same invoice after a cancelled one:
move_obj.post(cr, uid, [move_id], context=ctx)
self._log_event(cr, uid, ids)
return True
def invoice_validate(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'open'}, context=context)
return True
def line_get_convert(self, cr, uid, x, part, date, context=None):
return {
'date_maturity': x.get('date_maturity', False),
'partner_id': part,
'name': x['name'][:64],
'date': date,
'debit': x['price']>0 and x['price'],
'credit': x['price']<0 and -x['price'],
'account_id': x['account_id'],
'analytic_lines': x.get('analytic_lines', []),
'amount_currency': x['price']>0 and abs(x.get('amount_currency', False)) or -abs(x.get('amount_currency', False)),
'currency_id': x.get('currency_id', False),
'tax_code_id': x.get('tax_code_id', False),
'tax_amount': x.get('tax_amount', False),
'ref': x.get('ref', False),
'quantity': x.get('quantity',1.00),
'product_id': x.get('product_id', False),
'product_uom_id': x.get('uos_id', False),
'analytic_account_id': x.get('account_analytic_id', False),
}
def action_number(self, cr, uid, ids, context=None):
if context is None:
context = {}
#TODO: not correct fix but required a frech values before reading it.
self.write(cr, uid, ids, {})
for obj_inv in self.browse(cr, uid, ids, context=context):
invtype = obj_inv.type
number = obj_inv.number
move_id = obj_inv.move_id and obj_inv.move_id.id or False
reference = obj_inv.reference or ''
self.write(cr, uid, ids, {'internal_number': number})
if invtype in ('in_invoice', 'in_refund'):
if not reference:
ref = self._convert_ref(cr, uid, number)
else:
ref = reference
else:
ref = self._convert_ref(cr, uid, number)
cr.execute('UPDATE account_move SET ref=%s ' \
'WHERE id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_move_line SET ref=%s ' \
'WHERE move_id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_analytic_line SET ref=%s ' \
'FROM account_move_line ' \
'WHERE account_move_line.move_id = %s ' \
'AND account_analytic_line.move_id = account_move_line.id',
(ref, move_id))
return True
def action_cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
account_move_obj = self.pool.get('account.move')
invoices = self.read(cr, uid, ids, ['move_id', 'payment_ids'])
move_ids = [] # ones that we will need to remove
for i in invoices:
if i['move_id']:
move_ids.append(i['move_id'][0])
if i['payment_ids']:
account_move_line_obj = self.pool.get('account.move.line')
pay_ids = account_move_line_obj.browse(cr, uid, i['payment_ids'])
for move_line in pay_ids:
if move_line.reconcile_partial_id and move_line.reconcile_partial_id.line_partial_ids:
raise osv.except_osv(_('Error!'), _('You cannot cancel an invoice which is partially paid. You need to unreconcile related payment entries first.'))
# First, set the invoices as cancelled and detach the move ids
self.write(cr, uid, ids, {'state':'cancel', 'move_id':False})
if move_ids:
# second, invalidate the move(s)
account_move_obj.button_cancel(cr, uid, move_ids, context=context)
# delete the move this invoice was pointing to
# Note that the corresponding move_lines and move_reconciles
# will be automatically deleted too
account_move_obj.unlink(cr, uid, move_ids, context=context)
self._log_event(cr, uid, ids, -1.0, 'Cancel Invoice')
return True
###################
def list_distinct_taxes(self, cr, uid, ids):
invoices = self.browse(cr, uid, ids)
taxes = {}
for inv in invoices:
for tax in inv.tax_line:
if not tax['name'] in taxes:
taxes[tax['name']] = {'name': tax['name']}
return taxes.values()
def _log_event(self, cr, uid, ids, factor=1.0, name='Open Invoice'):
#TODO: implement messages system
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
types = {
'out_invoice': _('Invoice'),
'in_invoice': _('Supplier Invoice'),
'out_refund': _('Refund'),
'in_refund': _('Supplier Refund'),
}
return [(r['id'], '%s %s' % (r['number'] or types[r['type']], r['name'] or '')) for r in self.read(cr, uid, ids, ['type', 'number', 'name'], context, load='_classic_write')]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if context is None:
context = {}
ids = []
if name:
ids = self.search(cr, user, [('number','=',name)] + args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('name',operator,name)] + args, limit=limit, context=context)
return self.name_get(cr, user, ids, context)
def _refund_cleanup_lines(self, cr, uid, lines, context=None):
"""Convert records to dict of values suitable for one2many line creation
:param list(browse_record) lines: records to convert
:return: list of command tuple for one2many line creation [(0, 0, dict of valueis), ...]
"""
clean_lines = []
for line in lines:
clean_line = {}
for field in line._all_columns.keys():
if line._all_columns[field].column._type == 'many2one':
clean_line[field] = line[field].id
elif line._all_columns[field].column._type not in ['many2many','one2many']:
clean_line[field] = line[field]
elif field == 'invoice_line_tax_id':
tax_list = []
for tax in line[field]:
tax_list.append(tax.id)
clean_line[field] = [(6,0, tax_list)]
clean_lines.append(clean_line)
return map(lambda x: (0,0,x), clean_lines)
def _prepare_refund(self, cr, uid, invoice, date=None, period_id=None, description=None, journal_id=None, context=None):
"""Prepare the dict of values to create the new refund from the invoice.
This method may be overridden to implement custom
refund generation (making sure to call super() to establish
a clean extension chain).
:param integer invoice_id: id of the invoice to refund
:param dict invoice: read of the invoice to refund
:param string date: refund creation date from the wizard
:param integer period_id: force account.period from the wizard
:param string description: description of the refund from the wizard
:param integer journal_id: account.journal from the wizard
:return: dict of value to create() the refund
"""
obj_journal = self.pool.get('account.journal')
type_dict = {
'out_invoice': 'out_refund', # Customer Invoice
'in_invoice': 'in_refund', # Supplier Invoice
'out_refund': 'out_invoice', # Customer Refund
'in_refund': 'in_invoice', # Supplier Refund
}
invoice_data = {}
for field in ['name', 'reference', 'comment', 'date_due', 'partner_id', 'company_id',
'account_id', 'currency_id', 'payment_term', 'user_id', 'fiscal_position']:
if invoice._all_columns[field].column._type == 'many2one':
invoice_data[field] = invoice[field].id
else:
invoice_data[field] = invoice[field] if invoice[field] else False
invoice_lines = self._refund_cleanup_lines(cr, uid, invoice.invoice_line, context=context)
tax_lines = filter(lambda l: l['manual'], invoice.tax_line)
tax_lines = self._refund_cleanup_lines(cr, uid, tax_lines, context=context)
if journal_id:
refund_journal_ids = [journal_id]
elif invoice['type'] == 'in_invoice':
refund_journal_ids = obj_journal.search(cr, uid, [('type','=','purchase_refund')], context=context)
else:
refund_journal_ids = obj_journal.search(cr, uid, [('type','=','sale_refund')], context=context)
if not date:
date = fields.date.context_today(self, cr, uid, context=context)
invoice_data.update({
'type': type_dict[invoice['type']],
'date_invoice': date,
'state': 'draft',
'number': False,
'invoice_line': invoice_lines,
'tax_line': tax_lines,
'journal_id': refund_journal_ids and refund_journal_ids[0] or False,
})
if period_id:
invoice_data['period_id'] = period_id
if description:
invoice_data['name'] = description
return invoice_data
def refund(self, cr, uid, ids, date=None, period_id=None, description=None, journal_id=None, context=None):
new_ids = []
for invoice in self.browse(cr, uid, ids, context=context):
invoice = self._prepare_refund(cr, uid, invoice,
date=date,
period_id=period_id,
description=description,
journal_id=journal_id,
context=context)
# create the new invoice
new_ids.append(self.create(cr, uid, invoice, context=context))
return new_ids
def pay_and_reconcile(self, cr, uid, ids, pay_amount, pay_account_id, period_id, pay_journal_id, writeoff_acc_id, writeoff_period_id, writeoff_journal_id, context=None, name=''):
if context is None:
context = {}
#TODO check if we can use different period for payment and the writeoff line
assert len(ids)==1, "Can only pay one invoice at a time."
invoice = self.browse(cr, uid, ids[0], context=context)
src_account_id = invoice.account_id.id
# Take the seq as name for move
types = {'out_invoice': -1, 'in_invoice': 1, 'out_refund': 1, 'in_refund': -1}
direction = types[invoice.type]
#take the choosen date
if 'date_p' in context and context['date_p']:
date=context['date_p']
else:
date=time.strftime('%Y-%m-%d')
# Take the amount in currency and the currency of the payment
if 'amount_currency' in context and context['amount_currency'] and 'currency_id' in context and context['currency_id']:
amount_currency = context['amount_currency']
currency_id = context['currency_id']
else:
amount_currency = False
currency_id = False
pay_journal = self.pool.get('account.journal').read(cr, uid, pay_journal_id, ['type'], context=context)
if invoice.type in ('in_invoice', 'out_invoice'):
if pay_journal['type'] == 'bank':
entry_type = 'bank_pay_voucher' # Bank payment
else:
entry_type = 'pay_voucher' # Cash payment
else:
entry_type = 'cont_voucher'
if invoice.type in ('in_invoice', 'in_refund'):
ref = invoice.reference
else:
ref = self._convert_ref(cr, uid, invoice.number)
partner = self.pool['res.partner']._find_accounting_partner(invoice.partner_id)
# Pay attention to the sign for both debit/credit AND amount_currency
l1 = {
'debit': direction * pay_amount>0 and direction * pay_amount,
'credit': direction * pay_amount<0 and - direction * pay_amount,
'account_id': src_account_id,
'partner_id': partner.id,
'ref':ref,
'date': date,
'currency_id':currency_id,
'amount_currency':amount_currency and direction * amount_currency or 0.0,
'company_id': invoice.company_id.id,
}
l2 = {
'debit': direction * pay_amount<0 and - direction * pay_amount,
'credit': direction * pay_amount>0 and direction * pay_amount,
'account_id': pay_account_id,
'partner_id': partner.id,
'ref':ref,
'date': date,
'currency_id':currency_id,
'amount_currency':amount_currency and - direction * amount_currency or 0.0,
'company_id': invoice.company_id.id,
}
if not name:
name = invoice.invoice_line and invoice.invoice_line[0].name or invoice.number
l1['name'] = name
l2['name'] = name
lines = [(0, 0, l1), (0, 0, l2)]
move = {'ref': ref, 'line_id': lines, 'journal_id': pay_journal_id, 'period_id': period_id, 'date': date}
move_id = self.pool.get('account.move').create(cr, uid, move, context=context)
line_ids = []
total = 0.0
line = self.pool.get('account.move.line')
move_ids = [move_id,]
if invoice.move_id:
move_ids.append(invoice.move_id.id)
cr.execute('SELECT id FROM account_move_line '\
'WHERE move_id IN %s',
((move_id, invoice.move_id.id),))
lines = line.browse(cr, uid, map(lambda x: x[0], cr.fetchall()) )
for l in lines+invoice.payment_ids:
if l.account_id.id == src_account_id:
line_ids.append(l.id)
total += (l.debit or 0.0) - (l.credit or 0.0)
inv_id, name = self.name_get(cr, uid, [invoice.id], context=context)[0]
if (not round(total,self.pool.get('decimal.precision').precision_get(cr, uid, 'Account'))) or writeoff_acc_id:
self.pool.get('account.move.line').reconcile(cr, uid, line_ids, 'manual', writeoff_acc_id, writeoff_period_id, writeoff_journal_id, context)
else:
code = invoice.currency_id.symbol
# TODO: use currency's formatting function
msg = _("Invoice partially paid: %s%s of %s%s (%s%s remaining).") % \
(pay_amount, code, invoice.amount_total, code, total, code)
self.message_post(cr, uid, [inv_id], body=msg, context=context)
self.pool.get('account.move.line').reconcile_partial(cr, uid, line_ids, 'manual', context)
# Update the stored value (fields.function), so we write to trigger recompute
self.pool.get('account.invoice').write(cr, uid, ids, {}, context=context)
return True
class account_invoice_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict):
res = {}
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids):
price = line.price_unit * (1-(line.discount or 0.0)/100.0)
taxes = tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, price, line.quantity, product=line.product_id, partner=line.invoice_id.partner_id)
res[line.id] = taxes['total']
if line.invoice_id:
cur = line.invoice_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, res[line.id])
return res
def _price_unit_default(self, cr, uid, context=None):
if context is None:
context = {}
if context.get('check_total', False):
t = context['check_total']
for l in context.get('invoice_line', {}):
if isinstance(l, (list, tuple)) and len(l) >= 3 and l[2]:
tax_obj = self.pool.get('account.tax')
p = l[2].get('price_unit', 0) * (1-l[2].get('discount', 0)/100.0)
t = t - (p * l[2].get('quantity'))
taxes = l[2].get('invoice_line_tax_id')
if len(taxes[0]) >= 3 and taxes[0][2]:
taxes = tax_obj.browse(cr, uid, list(taxes[0][2]))
for tax in tax_obj.compute_all(cr, uid, taxes, p,l[2].get('quantity'), l[2].get('product_id', False), context.get('partner_id', False))['taxes']:
t = t - tax['amount']
return t
return 0
_name = "account.invoice.line"
_description = "Invoice Line"
_order = "invoice_id,sequence,id"
_columns = {
'name': fields.text('Description', required=True),
'origin': fields.char('Source Document', size=256, help="Reference of the document that produced this invoice."),
'sequence': fields.integer('Sequence', help="Gives the sequence of this line when displaying the invoice."),
'invoice_id': fields.many2one('account.invoice', 'Invoice Reference', ondelete='cascade', select=True),
'uos_id': fields.many2one('product.uom', 'Unit of Measure', ondelete='set null', select=True),
'product_id': fields.many2one('product.product', 'Product', ondelete='set null', select=True),
'account_id': fields.many2one('account.account', 'Account', required=True, domain=[('type','<>','view'), ('type', '<>', 'closed')], help="The income or expense account related to the selected product."),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Amount', type="float",
digits_compute= dp.get_precision('Account'), store=True),
'quantity': fields.float('Quantity', digits_compute= dp.get_precision('Product Unit of Measure'), required=True),
'discount': fields.float('Discount (%)', digits_compute= dp.get_precision('Discount')),
'invoice_line_tax_id': fields.many2many('account.tax', 'account_invoice_line_tax', 'invoice_line_id', 'tax_id', 'Taxes', domain=[('parent_id','=',False)]),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'company_id': fields.related('invoice_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'partner_id': fields.related('invoice_id','partner_id',type='many2one',relation='res.partner',string='Partner',store=True)
}
def _default_account_id(self, cr, uid, context=None):
# XXX this gets the default account for the user's company,
# it should get the default account for the invoice's company
# however, the invoice's company does not reach this point
if context is None:
context = {}
if context.get('type') in ('out_invoice','out_refund'):
prop = self.pool.get('ir.property').get(cr, uid, 'property_account_income_categ', 'product.category', context=context)
else:
prop = self.pool.get('ir.property').get(cr, uid, 'property_account_expense_categ', 'product.category', context=context)
return prop and prop.id or False
_defaults = {
'quantity': 1,
'discount': 0.0,
'price_unit': _price_unit_default,
'account_id': _default_account_id,
'sequence': 10,
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:
context = {}
res = super(account_invoice_line,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
if context.get('type', False):
doc = etree.XML(res['arch'])
for node in doc.xpath("//field[@name='product_id']"):
if context['type'] in ('in_invoice', 'in_refund'):
node.set('domain', "[('purchase_ok', '=', True)]")
else:
node.set('domain', "[('sale_ok', '=', True)]")
res['arch'] = etree.tostring(doc)
return res
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None):
if context is None:
context = {}
company_id = company_id if company_id != None else context.get('company_id',False)
context = dict(context)
context.update({'company_id': company_id, 'force_company': company_id})
if not partner_id:
raise osv.except_osv(_('No Partner Defined!'),_("You must first select a partner!") )
if not product:
if type in ('in_invoice', 'in_refund'):
return {'value': {}, 'domain':{'product_uom':[]}}
else:
return {'value': {'price_unit': 0.0}, 'domain':{'product_uom':[]}}
part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
fpos_obj = self.pool.get('account.fiscal.position')
fpos = fposition_id and fpos_obj.browse(cr, uid, fposition_id, context=context) or False
if part.lang:
context.update({'lang': part.lang})
result = {}
res = self.pool.get('product.product').browse(cr, uid, product, context=context)
if type in ('out_invoice','out_refund'):
a = res.property_account_income.id
if not a:
a = res.categ_id.property_account_income_categ.id
else:
a = res.property_account_expense.id
if not a:
a = res.categ_id.property_account_expense_categ.id
a = fpos_obj.map_account(cr, uid, fpos, a)
if a:
result['account_id'] = a
if type in ('out_invoice', 'out_refund'):
taxes = res.taxes_id and res.taxes_id or (a and self.pool.get('account.account').browse(cr, uid, a, context=context).tax_ids or False)
else:
taxes = res.supplier_taxes_id and res.supplier_taxes_id or (a and self.pool.get('account.account').browse(cr, uid, a, context=context).tax_ids or False)
tax_id = fpos_obj.map_tax(cr, uid, fpos, taxes)
if type in ('in_invoice', 'in_refund'):
result.update( {'price_unit': price_unit or res.standard_price,'invoice_line_tax_id': tax_id} )
else:
result.update({'price_unit': res.list_price, 'invoice_line_tax_id': tax_id})
result['name'] = res.partner_ref
result['uos_id'] = uom_id or res.uom_id.id
if res.description:
result['name'] += '\n'+res.description
domain = {'uos_id':[('category_id','=',res.uom_id.category_id.id)]}
res_final = {'value':result, 'domain':domain}
if not company_id or not currency_id:
return res_final
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
currency = self.pool.get('res.currency').browse(cr, uid, currency_id, context=context)
if company.currency_id.id != currency.id:
if type in ('in_invoice', 'in_refund'):
res_final['value']['price_unit'] = res.standard_price
new_price = res_final['value']['price_unit'] * currency.rate
res_final['value']['price_unit'] = new_price
if result['uos_id'] and result['uos_id'] != res.uom_id.id:
selected_uom = self.pool.get('product.uom').browse(cr, uid, result['uos_id'], context=context)
new_price = self.pool.get('product.uom')._compute_price(cr, uid, res.uom_id.id, res_final['value']['price_unit'], result['uos_id'])
res_final['value']['price_unit'] = new_price
return res_final
def uos_id_change(self, cr, uid, ids, product, uom, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None):
if context is None:
context = {}
company_id = company_id if company_id != None else context.get('company_id',False)
context = dict(context)
context.update({'company_id': company_id})
warning = {}
res = self.product_id_change(cr, uid, ids, product, uom, qty, name, type, partner_id, fposition_id, price_unit, currency_id, context=context)
if not uom:
res['value']['price_unit'] = 0.0
if product and uom:
prod = self.pool.get('product.product').browse(cr, uid, product, context=context)
prod_uom = self.pool.get('product.uom').browse(cr, uid, uom, context=context)
if prod.uom_id.category_id.id != prod_uom.category_id.id:
warning = {
'title': _('Warning!'),
'message': _('The selected unit of measure is not compatible with the unit of measure of the product.')
}
res['value'].update({'uos_id': prod.uom_id.id})
return {'value': res['value'], 'warning': warning}
return res
def move_line_get(self, cr, uid, invoice_id, context=None):
res = []
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
if context is None:
context = {}
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context)
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
for line in inv.invoice_line:
mres = self.move_line_get_item(cr, uid, line, context)
mres['invl_id'] = line.id
res.append(mres)
tax_code_found= False
for tax in tax_obj.compute_all(cr, uid, line.invoice_line_tax_id,
(line.price_unit * (1.0 - (line['discount'] or 0.0) / 100.0)),
line.quantity, line.product_id,
inv.partner_id)['taxes']:
if inv.type in ('out_invoice', 'in_invoice'):
tax_code_id = tax['base_code_id']
tax_amount = line.price_subtotal * tax['base_sign']
else:
tax_code_id = tax['ref_base_code_id']
tax_amount = line.price_subtotal * tax['ref_base_sign']
if tax_code_found:
if not tax_code_id:
continue
res.append(self.move_line_get_item(cr, uid, line, context))
res[-1]['price'] = 0.0
res[-1]['account_analytic_id'] = False
elif not tax_code_id:
continue
tax_code_found = True
res[-1]['tax_code_id'] = tax_code_id
res[-1]['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, tax_amount, context={'date': inv.date_invoice})
return res
def move_line_get_item(self, cr, uid, line, context=None):
return {
'type':'src',
'name': line.name.split('\n')[0][:64],
'price_unit':line.price_unit,
'quantity':line.quantity,
'price':line.price_subtotal,
'account_id':line.account_id.id,
'product_id':line.product_id.id,
'uos_id':line.uos_id.id,
'account_analytic_id':line.account_analytic_id.id,
'taxes':line.invoice_line_tax_id,
}
#
# Set the tax field according to the account and the fiscal position
#
def onchange_account_id(self, cr, uid, ids, product_id, partner_id, inv_type, fposition_id, account_id):
if not account_id:
return {}
unique_tax_ids = []
fpos = fposition_id and self.pool.get('account.fiscal.position').browse(cr, uid, fposition_id) or False
account = self.pool.get('account.account').browse(cr, uid, account_id)
if not product_id:
taxes = account.tax_ids
unique_tax_ids = self.pool.get('account.fiscal.position').map_tax(cr, uid, fpos, taxes)
else:
product_change_result = self.product_id_change(cr, uid, ids, product_id, False, type=inv_type,
partner_id=partner_id, fposition_id=fposition_id,
company_id=account.company_id.id)
if product_change_result and 'value' in product_change_result and 'invoice_line_tax_id' in product_change_result['value']:
unique_tax_ids = product_change_result['value']['invoice_line_tax_id']
return {'value':{'invoice_line_tax_id': unique_tax_ids}}
account_invoice_line()
class account_invoice_tax(osv.osv):
_name = "account.invoice.tax"
_description = "Invoice Tax"
def _count_factor(self, cr, uid, ids, name, args, context=None):
res = {}
for invoice_tax in self.browse(cr, uid, ids, context=context):
res[invoice_tax.id] = {
'factor_base': 1.0,
'factor_tax': 1.0,
}
if invoice_tax.amount <> 0.0:
factor_tax = invoice_tax.tax_amount / invoice_tax.amount
res[invoice_tax.id]['factor_tax'] = factor_tax
if invoice_tax.base <> 0.0:
factor_base = invoice_tax.base_amount / invoice_tax.base
res[invoice_tax.id]['factor_base'] = factor_base
return res
_columns = {
'invoice_id': fields.many2one('account.invoice', 'Invoice Line', ondelete='cascade', select=True),
'name': fields.char('Tax Description', size=64, required=True),
'account_id': fields.many2one('account.account', 'Tax Account', required=True, domain=[('type','<>','view'),('type','<>','income'), ('type', '<>', 'closed')]),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic account'),
'base': fields.float('Base', digits_compute=dp.get_precision('Account')),
'amount': fields.float('Amount', digits_compute=dp.get_precision('Account')),
'manual': fields.boolean('Manual'),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of invoice tax."),
'base_code_id': fields.many2one('account.tax.code', 'Base Code', help="The account basis of the tax declaration."),
'base_amount': fields.float('Base Code Amount', digits_compute=dp.get_precision('Account')),
'tax_code_id': fields.many2one('account.tax.code', 'Tax Code', help="The tax basis of the tax declaration."),
'tax_amount': fields.float('Tax Code Amount', digits_compute=dp.get_precision('Account')),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'factor_base': fields.function(_count_factor, string='Multipication factor for Base code', type='float', multi="all"),
'factor_tax': fields.function(_count_factor, string='Multipication factor Tax code', type='float', multi="all")
}
def base_change(self, cr, uid, ids, base, currency_id=False, company_id=False, date_invoice=False):
cur_obj = self.pool.get('res.currency')
company_obj = self.pool.get('res.company')
company_currency = False
factor = 1
if ids:
factor = self.read(cr, uid, ids[0], ['factor_base'])['factor_base']
if company_id:
company_currency = company_obj.read(cr, uid, [company_id], ['currency_id'])[0]['currency_id'][0]
if currency_id and company_currency:
base = cur_obj.compute(cr, uid, currency_id, company_currency, base*factor, context={'date': date_invoice or fields.date.context_today(self, cr, uid)}, round=False)
return {'value': {'base_amount':base}}
def amount_change(self, cr, uid, ids, amount, currency_id=False, company_id=False, date_invoice=False):
cur_obj = self.pool.get('res.currency')
company_obj = self.pool.get('res.company')
company_currency = False
factor = 1
if ids:
factor = self.read(cr, uid, ids[0], ['factor_tax'])['factor_tax']
if company_id:
company_currency = company_obj.read(cr, uid, [company_id], ['currency_id'])[0]['currency_id'][0]
if currency_id and company_currency:
amount = cur_obj.compute(cr, uid, currency_id, company_currency, amount*factor, context={'date': date_invoice or fields.date.context_today(self, cr, uid)}, round=False)
return {'value': {'tax_amount': amount}}
_order = 'sequence'
_defaults = {
'manual': 1,
'base_amount': 0.0,
'tax_amount': 0.0,
}
def compute(self, cr, uid, invoice_id, context=None):
tax_grouped = {}
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context)
cur = inv.currency_id
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
for line in inv.invoice_line:
for tax in tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, (line.price_unit* (1-(line.discount or 0.0)/100.0)), line.quantity, line.product_id, inv.partner_id)['taxes']:
val={}
val['invoice_id'] = inv.id
val['name'] = tax['name']
val['amount'] = tax['amount']
val['manual'] = False
val['sequence'] = tax['sequence']
val['base'] = cur_obj.round(cr, uid, cur, tax['price_unit'] * line['quantity'])
if inv.type in ('out_invoice','in_invoice'):
val['base_code_id'] = tax['base_code_id']
val['tax_code_id'] = tax['tax_code_id']
val['base_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['base'] * tax['base_sign'], context={'date': inv.date_invoice or fields.date.context_today(self, cr, uid, context=context)}, round=False)
val['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['amount'] * tax['tax_sign'], context={'date': inv.date_invoice or fields.date.context_today(self, cr, uid, context=context)}, round=False)
val['account_id'] = tax['account_collected_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_collected_id']
else:
val['base_code_id'] = tax['ref_base_code_id']
val['tax_code_id'] = tax['ref_tax_code_id']
val['base_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['base'] * tax['ref_base_sign'], context={'date': inv.date_invoice or fields.date.context_today(self, cr, uid, context=context)}, round=False)
val['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['amount'] * tax['ref_tax_sign'], context={'date': inv.date_invoice or fields.date.context_today(self, cr, uid, context=context)}, round=False)
val['account_id'] = tax['account_paid_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_paid_id']
key = (val['tax_code_id'], val['base_code_id'], val['account_id'], val['account_analytic_id'])
if not key in tax_grouped:
tax_grouped[key] = val
else:
tax_grouped[key]['amount'] += val['amount']
tax_grouped[key]['base'] += val['base']
tax_grouped[key]['base_amount'] += val['base_amount']
tax_grouped[key]['tax_amount'] += val['tax_amount']
for t in tax_grouped.values():
t['base'] = cur_obj.round(cr, uid, cur, t['base'])
t['amount'] = cur_obj.round(cr, uid, cur, t['amount'])
t['base_amount'] = cur_obj.round(cr, uid, cur, t['base_amount'])
t['tax_amount'] = cur_obj.round(cr, uid, cur, t['tax_amount'])
return tax_grouped
def move_line_get(self, cr, uid, invoice_id):
res = []
cr.execute('SELECT * FROM account_invoice_tax WHERE invoice_id=%s', (invoice_id,))
for t in cr.dictfetchall():
if not t['amount'] \
and not t['tax_code_id'] \
and not t['tax_amount']:
continue
res.append({
'type':'tax',
'name':t['name'],
'price_unit': t['amount'],
'quantity': 1,
'price': t['amount'] or 0.0,
'account_id': t['account_id'],
'tax_code_id': t['tax_code_id'],
'tax_amount': t['tax_amount'],
'account_analytic_id': t['account_analytic_id'],
})
return res
class res_partner(osv.osv):
""" Inherits partner and adds invoice information in the partner form """
_inherit = 'res.partner'
_columns = {
'invoice_ids': fields.one2many('account.invoice.line', 'partner_id', 'Invoices', readonly=True),
}
def _find_accounting_partner(self, partner):
'''
Find the partner for which the accounting entries will be created
'''
# FIXME: after 7.0, to replace by function field partner.commercial_partner_id
#if the chosen partner is not a company and has a parent company, use the parent for the journal entries
#because you want to invoice 'Agrolait, accounting department' but the journal items are for 'Agrolait'
while not partner.is_company and partner.parent_id:
partner = partner.parent_id
return partner
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default.update({'invoice_ids' : []})
return super(res_partner, self).copy(cr, uid, id, default, context)
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'account.invoice' and context.get('default_res_id') and context.get('mark_invoice_as_sent'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('account.invoice').write(cr, uid, [context['default_res_id']], {'sent': True}, context=context)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 12,661,014,359,507,784 | 52.474033 | 307 | 0.560245 | false |
Svalorzen/AI-Toolbox
|
.ycm_extra_conf.py
|
1
|
4719
|
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++2a',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-DAI_LOGGING_ENABLED',
'-Iinclude/',
'-isystem/usr/include/eigen3/',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if compilation_database_folder:
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def FlagsForFile( filename ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = database.GetCompilationInfoForFile( filename )
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
gpl-3.0
| -5,098,795,547,491,970,000 | 34.216418 | 80 | 0.712015 | false |
minrk/sympy
|
sympy/polys/polyclasses.py
|
1
|
46239
|
"""OO layer for several polynomial representations. """
from sympy.core.compatibility import cmp
class GenericPoly(object):
"""Base class for low-level polynomial representations. """
def ground_to_ring(f):
"""Make the ground domain a ring. """
return f.set_domain(f.dom.get_ring())
def ground_to_field(f):
"""Make the ground domain a field. """
return f.set_domain(f.dom.get_field())
def ground_to_exact(f):
"""Make the ground domain exact. """
return f.set_domain(f.dom.get_exact())
@classmethod
def _perify_factors(per, result, include):
if include:
coeff, factors = result
else:
coeff = result
factors = [ (per(g), k) for g, k in factors ]
if include:
return coeff, factors
else:
return factors
from sympy.utilities import any, all
from sympy.polys.densebasic import (
dmp_validate,
dup_normal, dmp_normal,
dup_convert, dmp_convert,
dup_from_sympy, dmp_from_sympy,
dup_strip, dmp_strip,
dup_degree, dmp_degree_in,
dmp_degree_list,
dmp_negative_p, dmp_positive_p,
dup_LC, dmp_ground_LC,
dup_TC, dmp_ground_TC,
dup_nth, dmp_ground_nth,
dmp_zero, dmp_one, dmp_ground,
dmp_zero_p, dmp_one_p, dmp_ground_p,
dup_from_dict, dmp_from_dict,
dup_to_raw_dict, dmp_to_dict,
dup_deflate, dmp_deflate,
dmp_inject, dmp_eject,
dup_terms_gcd, dmp_terms_gcd,
dmp_list_terms, dmp_exclude,
dmp_slice_in, dmp_permute,
dmp_to_tuple,)
from sympy.polys.densearith import (
dup_add_term, dmp_add_term,
dup_sub_term, dmp_sub_term,
dup_mul_term, dmp_mul_term,
dup_add_ground, dmp_add_ground,
dup_sub_ground, dmp_sub_ground,
dup_mul_ground, dmp_mul_ground,
dup_quo_ground, dmp_quo_ground,
dup_exquo_ground, dmp_exquo_ground,
dup_abs, dmp_abs,
dup_neg, dmp_neg,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_sqr, dmp_sqr,
dup_pow, dmp_pow,
dup_pdiv, dmp_pdiv,
dup_prem, dmp_prem,
dup_pquo, dmp_pquo,
dup_pexquo, dmp_pexquo,
dup_div, dmp_div,
dup_rem, dmp_rem,
dup_quo, dmp_quo,
dup_exquo, dmp_exquo,
dmp_add_mul, dmp_sub_mul,
dup_max_norm, dmp_max_norm,
dup_l1_norm, dmp_l1_norm)
from sympy.polys.densetools import (
dup_clear_denoms, dmp_clear_denoms,
dup_integrate, dmp_integrate_in,
dup_diff, dmp_diff_in,
dup_eval, dmp_eval_in,
dup_revert,
dup_trunc, dmp_ground_trunc,
dup_content, dmp_ground_content,
dup_primitive, dmp_ground_primitive,
dup_monic, dmp_ground_monic,
dup_compose, dmp_compose,
dup_decompose,
dup_shift,
dmp_lift)
from sympy.polys.euclidtools import (
dup_half_gcdex, dup_gcdex, dup_invert,
dup_subresultants, dmp_subresultants,
dup_resultant, dmp_resultant,
dup_discriminant, dmp_discriminant,
dup_inner_gcd, dmp_inner_gcd,
dup_gcd, dmp_gcd,
dup_lcm, dmp_lcm,
dup_cancel, dmp_cancel)
from sympy.polys.sqfreetools import (
dup_gff_list,
dup_sqf_p, dmp_sqf_p,
dup_sqf_norm, dmp_sqf_norm,
dup_sqf_part, dmp_sqf_part,
dup_sqf_list, dup_sqf_list_include,
dmp_sqf_list, dmp_sqf_list_include)
from sympy.polys.factortools import (
dup_zz_cyclotomic_p,
dup_factor_list, dup_factor_list_include,
dmp_factor_list, dmp_factor_list_include)
from sympy.polys.rootisolation import (
dup_isolate_real_roots_sqf,
dup_isolate_real_roots,
dup_isolate_all_roots_sqf,
dup_isolate_all_roots,
dup_refine_real_root,
dup_count_real_roots,
dup_count_complex_roots,
dup_sturm)
from sympy.polys.polyerrors import (
UnificationFailed,
PolynomialError,
DomainError)
def init_normal_DMP(rep, lev, dom):
return DMP(dmp_normal(rep, lev, dom), dom, lev)
class DMP(object):
"""Dense Multivariate Polynomials over `K`. """
__slots__ = ['rep', 'lev', 'dom']
def __init__(self, rep, dom, lev=None):
if lev is not None:
if type(rep) is dict:
rep = dmp_from_dict(rep, lev, dom)
elif type(rep) is not list:
rep = dmp_ground(dom.convert(rep), lev)
else:
rep, lev = dmp_validate(rep)
self.rep = rep
self.lev = lev
self.dom = dom
def __repr__(f):
return "%s(%s, %s)" % (f.__class__.__name__, f.rep, f.dom)
def __hash__(f):
return hash((f.__class__.__name__, f.to_tuple(), f.lev, f.dom))
def __getstate__(self):
return (self.rep, self.lev, self.dom)
def __getnewargs__(self):
return (self.rep, self.lev, self.dom)
def unify(f, g):
"""Unify representations of two multivariate polynomials. """
return f.lev, f.dom, f.per, f.rep, g.rep
if not isinstance(g, DMP) or f.lev != g.lev:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if f.dom == g.dom:
return f.lev, f.dom, f.per, f.rep, g.rep
else:
lev, dom = f.lev, f.dom.unify(g.dom)
F = dmp_convert(f.rep, lev, f.dom, dom)
G = dmp_convert(g.rep, lev, g.dom, dom)
def per(rep, dom=dom, lev=lev, kill=False):
if kill:
if not lev:
return rep
else:
lev -= 1
return DMP(rep, dom, lev)
return lev, dom, per, F, G
def per(f, rep, dom=None, kill=False):
"""Create a DMP out of the given representation. """
lev = f.lev
if kill:
if not lev:
return rep
else:
lev -= 1
if dom is None:
dom = f.dom
return DMP(rep, dom, lev)
@classmethod
def zero(cls, lev, dom):
return DMP(0, dom, lev)
@classmethod
def one(cls, lev, dom):
return DMP(1, dom, lev)
@classmethod
def from_list(cls, rep, lev, dom):
"""Create an instance of `cls` given a list of native coefficients. """
return cls(dmp_convert(rep, lev, None, dom), dom, lev)
@classmethod
def from_sympy_list(cls, rep, lev, dom):
"""Create an instance of `cls` given a list of SymPy coefficients. """
return cls(dmp_from_sympy(rep, lev, dom), dom, lev)
def to_dict(f, zero=False):
"""Convert `f` to a dict representation with native coefficients. """
return dmp_to_dict(f.rep, f.lev, f.dom, zero=zero)
def to_sympy_dict(f, zero=False):
"""Convert `f` to a dict representation with SymPy coefficients. """
rep = dmp_to_dict(f.rep, f.lev, f.dom, zero=zero)
for k, v in rep.iteritems():
rep[k] = f.dom.to_sympy(v)
return rep
def to_tuple(f):
"""
Convert `f` to a tuple representation with native coefficients.
This is needed for hashing.
"""
return dmp_to_tuple(f.rep, f.lev)
@classmethod
def from_dict(cls, rep, lev, dom):
"""Construct and instance of ``cls`` from a ``dict`` representation. """
return cls(dmp_from_dict(rep, lev, dom), dom, lev)
@classmethod
def from_monoms_coeffs(cls, monoms, coeffs, lev, dom):
return DMP(dict(zip(monoms, coeffs)), dom, lev)
def to_ring(f):
"""Make the ground domain a field. """
return f.convert(f.dom.get_ring())
def to_field(f):
"""Make the ground domain a field. """
return f.convert(f.dom.get_field())
def to_exact(f):
"""Make the ground domain exact. """
return f.convert(f.dom.get_exact())
def convert(f, dom):
"""Convert the ground domain of `f`. """
if f.dom == dom:
return f
else:
return DMP(dmp_convert(f.rep, f.lev, f.dom, dom), dom, f.lev)
def slice(f, m, n, j=0):
"""Take a continuous subsequence of terms of `f`. """
return f.per(dmp_slice_in(f.rep, m, n, j, f.lev, f.dom))
def coeffs(f, order=None):
"""Returns all non-zero coefficients from `f` in lex order. """
return [ c for _, c in dmp_list_terms(f.rep, f.lev, f.dom, order=order) ]
def monoms(f, order=None):
"""Returns all non-zero monomials from `f` in lex order. """
return [ m for m, _ in dmp_list_terms(f.rep, f.lev, f.dom, order=order) ]
def terms(f, order=None):
"""Returns all non-zero terms from `f` in lex order. """
return dmp_list_terms(f.rep, f.lev, f.dom, order=order)
def all_coeffs(f):
"""Returns all coefficients from `f`. """
if not f.lev:
if not f:
return [f.dom.zero]
else:
return [ c for c in f.rep ]
else:
raise PolynomialError('multivariate polynomials not supported')
def all_monoms(f):
"""Returns all monomials from `f`. """
if not f.lev:
n = dup_degree(f.rep)
if n < 0:
return [(0,)]
else:
return [ (n-i,) for i, c in enumerate(f.rep) ]
else:
raise PolynomialError('multivariate polynomials not supported')
def all_terms(f):
"""Returns all terms from a `f`. """
if not f.lev:
n = dup_degree(f.rep)
if n < 0:
return [((0,), f.dom.zero)]
else:
return [ ((n-i,), c) for i, c in enumerate(f.rep) ]
else:
raise PolynomialError('multivariate polynomials not supported')
def lift(f):
"""Convert algebraic coefficients to rationals. """
return f.per(dmp_lift(f.rep, f.lev, f.dom), dom=f.dom.dom)
def deflate(f):
"""Reduce degree of `f` by mapping `x_i**m` to `y_i`. """
J, F = dmp_deflate(f.rep, f.lev, f.dom)
return J, f.per(F)
def inject(f, front=False):
"""Inject ground domain generators into ``f``. """
F, lev = dmp_inject(f.rep, f.lev, f.dom, front=front)
return f.__class__(F, f.dom.dom, lev)
def eject(f, dom, front=False):
"""Eject selected generators into the ground domain. """
F = dmp_eject(f.rep, f.lev, dom, front=front)
return f.__class__(F, dom, f.lev - len(dom.gens))
def exclude(f):
r"""
Remove useless generators from ``f``.
Returns the removed generators and the new excluded ``f``.
**Example**
>>> from sympy.polys.polyclasses import DMP
>>> from sympy.polys.domains import ZZ
>>> DMP([[[ZZ(1)]], [[ZZ(1)], [ZZ(2)]]], ZZ).exclude()
([2], DMP([[1], [1, 2]], ZZ))
"""
J, F, u = dmp_exclude(f.rep, f.lev, f.dom)
return J, f.__class__(F, f.dom, u)
def permute(f, P):
r"""
Returns a polynomial in ``K[x_{P(1)}, ..., x_{P(n)}]``.
**Example**
>>> from sympy.polys.polyclasses import DMP
>>> from sympy.polys.domains import ZZ
>>> DMP([[[ZZ(2)], [ZZ(1), ZZ(0)]], [[]]], ZZ).permute([1, 0, 2])
DMP([[[2], []], [[1, 0], []]], ZZ)
>>> DMP([[[ZZ(2)], [ZZ(1), ZZ(0)]], [[]]], ZZ).permute([1, 2, 0])
DMP([[[1], []], [[2, 0], []]], ZZ)
"""
return f.per(dmp_permute(f.rep, P, f.lev, f.dom))
def terms_gcd(f):
"""Remove GCD of terms from the polynomial `f`. """
J, F = dmp_terms_gcd(f.rep, f.lev, f.dom)
return J, f.per(F)
def add_ground(f, c):
"""Add an element of the ground domain to ``f``. """
return f.per(dmp_add_ground(f.rep, f.dom.convert(c), f.lev, f.dom))
def sub_ground(f, c):
"""Subtract an element of the ground domain from ``f``. """
return f.per(dmp_sub_ground(f.rep, f.dom.convert(c), f.lev, f.dom))
def mul_ground(f, c):
"""Multiply ``f`` by a an element of the ground domain. """
return f.per(dmp_mul_ground(f.rep, f.dom.convert(c), f.lev, f.dom))
def quo_ground(f, c):
"""Quotient of ``f`` by a an element of the ground domain. """
return f.per(dmp_quo_ground(f.rep, f.dom.convert(c), f.lev, f.dom))
def exquo_ground(f, c):
"""Exact quotient of ``f`` by a an element of the ground domain. """
return f.per(dmp_exquo_ground(f.rep, f.dom.convert(c), f.lev, f.dom))
def abs(f):
"""Make all coefficients in `f` positive. """
return f.per(dmp_abs(f.rep, f.lev, f.dom))
def neg(f):
"""Negate all cefficients in `f`. """
return f.per(dmp_neg(f.rep, f.lev, f.dom))
def add(f, g):
"""Add two multivariate polynomials `f` and `g`. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_add(F, G, lev, dom))
def sub(f, g):
"""Subtract two multivariate polynomials `f` and `g`. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_sub(F, G, lev, dom))
def mul(f, g):
"""Multiply two multivariate polynomials `f` and `g`. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_mul(F, G, lev, dom))
def sqr(f):
"""Square a multivariate polynomial `f`. """
return f.per(dmp_sqr(f.rep, f.lev, f.dom))
def pow(f, n):
"""Raise `f` to a non-negative power `n`. """
if isinstance(n, int):
return f.per(dmp_pow(f.rep, n, f.lev, f.dom))
else:
raise TypeError("`int` expected, got %s" % type(n))
def pdiv(f, g):
"""Polynomial pseudo-division of `f` and `g`. """
lev, dom, per, F, G = f.unify(g)
q, r = dmp_pdiv(F, G, lev, dom)
return per(q), per(r)
def prem(f, g):
"""Polynomial pseudo-remainder of `f` and `g`. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_prem(F, G, lev, dom))
def pquo(f, g):
"""Polynomial pseudo-quotient of `f` and `g`. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_pquo(F, G, lev, dom))
def pexquo(f, g):
"""Polynomial exact pseudo-quotient of `f` and `g`. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_pexquo(F, G, lev, dom))
def div(f, g):
"""Polynomial division with remainder of `f` and `g`. """
lev, dom, per, F, G = f.unify(g)
q, r = dmp_div(F, G, lev, dom)
return per(q), per(r)
def rem(f, g):
"""Computes polynomial remainder of `f` and `g`. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_rem(F, G, lev, dom))
def quo(f, g):
"""Computes polynomial quotient of `f` and `g`. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_quo(F, G, lev, dom))
def exquo(f, g):
"""Computes polynomial exact quotient of `f` and `g`. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_exquo(F, G, lev, dom))
def degree(f, j=0):
"""Returns the leading degree of `f` in `x_j`. """
if isinstance(j, int):
return dmp_degree_in(f.rep, j, f.lev)
else:
raise TypeError("`int` expected, got %s" % type(j))
def degree_list(f):
"""Returns a list of degrees of `f`. """
return dmp_degree_list(f.rep, f.lev)
def total_degree(f):
"""Returns the total degree of `f`. """
return sum(dmp_degree_list(f.rep, f.lev))
def LC(f):
"""Returns the leading coefficent of `f`. """
return dmp_ground_LC(f.rep, f.lev, f.dom)
def TC(f):
"""Returns the trailing coefficent of `f`. """
return dmp_ground_TC(f.rep, f.lev, f.dom)
def nth(f, *N):
"""Returns the `n`-th coefficient of `f`. """
if all(isinstance(n, int) for n in N):
return dmp_ground_nth(f.rep, N, f.lev, f.dom)
else:
raise TypeError("a sequence of integers expected")
def max_norm(f):
"""Returns maximum norm of `f`. """
return dmp_max_norm(f.rep, f.lev, f.dom)
def l1_norm(f):
"""Returns l1 norm of `f`. """
return dmp_l1_norm(f.rep, f.lev, f.dom)
def clear_denoms(f):
"""Clear denominators, but keep the ground domain. """
coeff, F = dmp_clear_denoms(f.rep, f.lev, f.dom)
return coeff, f.per(F)
def integrate(f, m=1, j=0):
"""Computes indefinite integral of `f`. """
if not isinstance(m, int):
raise TypeError("`int` expected, got %s" % type(m))
if not isinstance(j, int):
raise TypeError("`int` expected, got %s" % type(j))
return f.per(dmp_integrate_in(f.rep, m, j, f.lev, f.dom))
def diff(f, m=1, j=0):
"""Computes `m`-th order derivative of `f` in `x_j`. """
if not isinstance(m, int):
raise TypeError("`int` expected, got %s" % type(m))
if not isinstance(j, int):
raise TypeError("`int` expected, got %s" % type(j))
return f.per(dmp_diff_in(f.rep, m, j, f.lev, f.dom))
def eval(f, a, j=0):
"""Evaluates `f` at the given point `a` in `x_j`. """
if not isinstance(j, int):
raise TypeError("`int` expected, got %s" % type(j))
return f.per(dmp_eval_in(f.rep,
f.dom.convert(a), j, f.lev, f.dom), kill=True)
def half_gcdex(f, g):
"""Half extended Euclidean algorithm, if univariate. """
lev, dom, per, F, G = f.unify(g)
if not lev:
s, h = dup_half_gcdex(F, G, dom)
return per(s), per(h)
else:
raise ValueError('univariate polynomial expected')
def gcdex(f, g):
"""Extended Euclidean algorithm, if univariate. """
lev, dom, per, F, G = f.unify(g)
if not lev:
s, t, h = dup_gcdex(F, G, dom)
return per(s), per(t), per(h)
else:
raise ValueError('univariate polynomial expected')
def invert(f, g):
"""Invert `f` modulo `g`, if possible. """
lev, dom, per, F, G = f.unify(g)
if not lev:
return per(dup_invert(F, G, dom))
else:
raise ValueError('univariate polynomial expected')
def revert(f, n):
"""Compute `f**(-1)` mod `x**n`. """
if not f.lev:
return f.per(dup_revert(f.rep, n, f.dom))
else:
raise ValueError('univariate polynomial expected')
def subresultants(f, g):
"""Computes subresultant PRS sequence of `f` and `g`. """
lev, dom, per, F, G = f.unify(g)
R = dmp_subresultants(F, G, lev, dom)
return map(per, R)
def resultant(f, g):
"""Computes resultant of `f` and `g` via PRS. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_resultant(F, G, lev, dom), kill=True)
def discriminant(f):
"""Computes discriminant of `f`. """
return f.per(dmp_discriminant(f.rep, f.lev, f.dom), kill=True)
def cofactors(f, g):
"""Returns GCD of `f` and `g` and their cofactors. """
lev, dom, per, F, G = f.unify(g)
h, cff, cfg = dmp_inner_gcd(F, G, lev, dom)
return per(h), per(cff), per(cfg)
def gcd(f, g):
"""Returns polynomial GCD of `f` and `g`. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_gcd(F, G, lev, dom))
def lcm(f, g):
"""Returns polynomial LCM of `f` and `g`. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_lcm(F, G, lev, dom))
def cancel(f, g, include=True):
"""Cancel common factors in a rational function ``f/g``. """
lev, dom, per, F, G = f.unify(g)
if include:
F, G = dmp_cancel(F, G, lev, dom, include=True)
else:
cF, cG, F, G = dmp_cancel(F, G, lev, dom, include=False)
F, G = per(F), per(G)
if include:
return F, G
else:
return cF, cG, F, G
def trunc(f, p):
"""Reduce `f` modulo a constant `p`. """
return f.per(dmp_ground_trunc(f.rep, f.dom.convert(p), f.lev, f.dom))
def monic(f):
"""Divides all coefficients by `LC(f)`. """
return f.per(dmp_ground_monic(f.rep, f.lev, f.dom))
def content(f):
"""Returns GCD of polynomial coefficients. """
return dmp_ground_content(f.rep, f.lev, f.dom)
def primitive(f):
"""Returns content and a primitive form of `f`. """
cont, F = dmp_ground_primitive(f.rep, f.lev, f.dom)
return cont, f.per(F)
def compose(f, g):
"""Computes functional composition of `f` and `g`. """
lev, dom, per, F, G = f.unify(g)
return per(dmp_compose(F, G, lev, dom))
def decompose(f):
"""Computes functional decomposition of `f`. """
if not f.lev:
return map(f.per, dup_decompose(f.rep, f.dom))
else:
raise ValueError('univariate polynomial expected')
def shift(f, a):
"""Efficiently compute Taylor shift ``f(x + a)``. """
if not f.lev:
return f.per(dup_shift(f.rep, f.dom.convert(a), f.dom))
else:
raise ValueError('univariate polynomial expected')
def sturm(f):
"""Computes the Sturm sequence of `f`. """
if not f.lev:
return map(f.per, dup_sturm(f.rep, f.dom))
else:
raise ValueError('univariate polynomial expected')
def gff_list(f):
"""Computes greatest factorial factorization of `f`. """
if not f.lev:
return [ (f.per(g), k) for g, k in dup_gff_list(f.rep, f.dom) ]
else:
raise ValueError('univariate polynomial expected')
def sqf_norm(f):
"""Computes square-free norm of `f`. """
s, g, r = dmp_sqf_norm(f.rep, f.lev, f.dom)
return s, f.per(g), f.per(r, dom=f.dom.dom)
def sqf_part(f):
"""Computes square-free part of `f`. """
return f.per(dmp_sqf_part(f.rep, f.lev, f.dom))
def sqf_list(f, all=False):
"""Returns a list of square-free factors of `f`. """
coeff, factors = dmp_sqf_list(f.rep, f.lev, f.dom, all)
return coeff, [ (f.per(g), k) for g, k in factors ]
def sqf_list_include(f, all=False):
"""Returns a list of square-free factors of `f`. """
factors = dmp_sqf_list_include(f.rep, f.lev, f.dom, all)
return [ (f.per(g), k) for g, k in factors ]
def factor_list(f):
"""Returns a list of irreducible factors of `f`. """
coeff, factors = dmp_factor_list(f.rep, f.lev, f.dom)
return coeff, [ (f.per(g), k) for g, k in factors ]
def factor_list_include(f):
"""Returns a list of irreducible factors of `f`. """
factors = dmp_factor_list_include(f.rep, f.lev, f.dom)
return [ (f.per(g), k) for g, k in factors ]
def intervals(f, all=False, eps=None, inf=None, sup=None, fast=False, sqf=False):
"""Compute isolating intervals for roots of `f`. """
if not f.lev:
if not all:
if not sqf:
return dup_isolate_real_roots(f.rep, f.dom, eps=eps, inf=inf, sup=sup, fast=fast)
else:
return dup_isolate_real_roots_sqf(f.rep, f.dom, eps=eps, inf=inf, sup=sup, fast=fast)
else:
if not sqf:
return dup_isolate_all_roots(f.rep, f.dom, eps=eps, inf=inf, sup=sup, fast=fast)
else:
return dup_isolate_all_roots_sqf(f.rep, f.dom, eps=eps, inf=inf, sup=sup, fast=fast)
else:
raise PolynomialError("can't isolate roots of a multivariate polynomial")
def refine_root(f, s, t, eps=None, steps=None, fast=False):
"""Refine an isolating interval to the given precision. """
if not f.lev:
return dup_refine_real_root(f.rep, s, t, f.dom, eps=eps, steps=steps, fast=fast)
else:
raise PolynomialError("can't refine a root of a multivariate polynomial")
def count_real_roots(f, inf=None, sup=None):
"""Return the number of real roots of ``f`` in ``[inf, sup]``. """
return dup_count_real_roots(f.rep, f.dom, inf=inf, sup=sup)
def count_complex_roots(f, inf=None, sup=None):
"""Return the number of complex roots of ``f`` in ``[inf, sup]``. """
return dup_count_complex_roots(f.rep, f.dom, inf=inf, sup=sup)
@property
def is_zero(f):
"""Returns `True` if `f` is a zero polynomial. """
return dmp_zero_p(f.rep, f.lev)
@property
def is_one(f):
"""Returns `True` if `f` is a unit polynomial. """
return dmp_one_p(f.rep, f.lev, f.dom)
@property
def is_ground(f):
"""Returns `True` if `f` is an element of the ground domain. """
return dmp_ground_p(f.rep, None, f.lev)
@property
def is_sqf(f):
"""Returns `True` if `f` is a square-free polynomial. """
return dmp_sqf_p(f.rep, f.lev, f.dom)
@property
def is_monic(f):
"""Returns `True` if the leading coefficient of `f` is one. """
return f.dom.is_one(dmp_ground_LC(f.rep, f.lev, f.dom))
@property
def is_primitive(f):
"""Returns `True` if GCD of coefficients of `f` is one. """
return f.dom.is_one(dmp_ground_content(f.rep, f.lev, f.dom))
@property
def is_linear(f):
"""Returns `True` if `f` is linear in all its variables. """
return all([ sum(monom) <= 1 for monom in dmp_to_dict(f.rep, f.lev, f.dom).keys() ])
@property
def is_quadratic(f):
"""Returns `True` if `f` is quadratic in all its variables. """
return all([ sum(monom) <= 2 for monom in dmp_to_dict(f.rep, f.lev, f.dom).keys() ])
@property
def is_monomial(f):
"""Returns `True` if `f` is zero or has only one term. """
return len(f.to_dict()) <= 1
@property
def is_homogeneous(f):
"""Returns `True` if `f` has zero trailing coefficient. """
return f.dom.is_zero(dmp_ground_TC(f.rep, f.lev, f.dom))
@property
def is_cyclotomic(f):
"""Returns ``True`` if ``f`` is a cyclotomic polnomial. """
if not f.lev:
return dup_zz_cyclotomic_p(f.rep, f.dom)
else:
return False
def __abs__(f):
return f.abs()
def __neg__(f):
return f.neg()
def __add__(f, g):
if not isinstance(g, DMP):
try:
g = f.per(dmp_ground(f.dom.convert(g), f.lev))
except TypeError:
return NotImplemented
return f.add(g)
def __radd__(f, g):
return f.__add__(g)
def __sub__(f, g):
if not isinstance(g, DMP):
try:
g = f.per(dmp_ground(f.dom.convert(g), f.lev))
except TypeError:
return NotImplemented
return f.sub(g)
def __rsub__(f, g):
return (-f).__add__(g)
def __mul__(f, g):
if isinstance(g, DMP):
return f.mul(g)
else:
try:
return f.mul_ground(g)
except TypeError:
return NotImplemented
def __rmul__(f, g):
return f.__mul__(g)
def __pow__(f, n):
return f.pow(n)
def __divmod__(f, g):
return f.div(g)
def __mod__(f, g):
return f.rem(g)
def __floordiv__(f, g):
if isinstance(g, DMP):
return f.quo(g)
else:
try:
return f.quo_ground(g)
except TypeError:
return NotImplemented
def __eq__(f, g):
try:
_, _, _, F, G = f.unify(g)
if f.lev == g.lev:
return F == G
except UnificationFailed:
pass
return False
def __ne__(f, g):
try:
_, _, _, F, G = f.unify(g)
if f.lev == g.lev:
return F != G
except UnificationFailed:
pass
return True
def __lt__(f, g):
_, _, _, F, G = f.unify(g)
return F.__lt__(G)
def __le__(f, g):
_, _, _, F, G = f.unify(g)
return F.__le__(G)
def __gt__(f, g):
_, _, _, F, G = f.unify(g)
return F.__gt__(G)
def __ge__(f, g):
_, _, _, F, G = f.unify(g)
return F.__ge__(G)
def __nonzero__(f):
return not dmp_zero_p(f.rep, f.lev)
def init_normal_DMF(num, den, lev, dom):
return DMF(dmp_normal(num, lev, dom),
dmp_normal(den, lev, dom), dom, lev)
class DMF(object):
"""Dense Multivariate Fractions over `K`. """
__slots__ = ['num', 'den', 'lev', 'dom']
def __init__(self, rep, dom, lev=None):
num, den, lev = self._parse(rep, dom, lev)
num, den = dmp_cancel(num, den, lev, dom)
self.num = num
self.den = den
self.lev = lev
self.dom = dom
@classmethod
def new(cls, rep, dom, lev=None):
num, den, lev = cls._parse(rep, dom, lev)
obj = object.__new__(cls)
obj.num = num
obj.den = den
obj.lev = lev
obj.dom = dom
return obj
@classmethod
def _parse(cls, rep, dom, lev=None):
if type(rep) is tuple:
num, den = rep
if lev is not None:
if type(num) is dict:
num = dmp_from_dict(num, lev, dom)
if type(den) is dict:
den = dmp_from_dict(den, lev, dom)
else:
num, num_lev = dmp_validate(num)
den, den_lev = dmp_validate(den)
if num_lev == den_lev:
lev = num_lev
else:
raise ValueError('inconsistent number of levels')
if dmp_zero_p(den, lev):
raise ZeroDivisionError('fraction denominator')
if dmp_zero_p(num, lev):
den = dmp_one(lev, dom)
else:
if dmp_negative_p(den, lev, dom):
num = dmp_neg(num, lev, dom)
den = dmp_neg(den, lev, dom)
else:
num = rep
if lev is not None:
if type(num) is dict:
num = dmp_from_dict(num, lev, dom)
elif type(num) is not list:
num = dmp_ground(dom.convert(num), lev)
else:
num, lev = dmp_validate(num)
den = dmp_one(lev, dom)
return num, den, lev
def __repr__(f):
return "%s((%s, %s), %s)" % (f.__class__.__name__, f.num, f.den, f.dom)
def __hash__(f):
return hash((f.__class__.__name__, dmp_to_tuple(f.num, f.lev),
dmp_to_tuple(f.den, f.lev), f.lev, f.dom))
def __getstate__(self):
return (self.num, self.den, self.lev, self.dom)
def __getnewargs__(self):
return (self.num, self.den, self.lev, self.dom)
def poly_unify(f, g):
"""Unify a multivariate fraction and a polynomial. """
if not isinstance(g, DMP) or f.lev != g.lev:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if f.dom == g.dom:
return (f.lev, f.dom, f.per, (f.num, f.den), g.rep)
else:
lev, dom = f.lev, f.dom.unify(g.dom)
F = (dmp_convert(f.num, lev, f.dom, dom),
dmp_convert(f.den, lev, f.dom, dom))
G = dmp_convert(g.rep, lev, g.dom, dom)
def per(num, den, cancel=True, kill=False):
if kill:
if not lev:
return num/den
else:
lev = lev - 1
if cancel:
num, den = dmp_cancel(num, den, lev, dom)
return f.__class__.new((num, den), dom, lev)
return lev, dom, per, F, G
def frac_unify(f, g):
"""Unify representations of two multivariate fractions. """
if not isinstance(g, DMF) or f.lev != g.lev:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if f.dom == g.dom:
return (f.lev, f.dom, f.per, (f.num, f.den),
(g.num, g.den))
else:
lev, dom = f.lev, f.dom.unify(g.dom)
F = (dmp_convert(f.num, lev, f.dom, dom),
dmp_convert(f.den, lev, f.dom, dom))
G = (dmp_convert(g.num, lev, g.dom, dom),
dmp_convert(g.den, lev, g.dom, dom))
def per(num, den, cancel=True, kill=False):
if kill:
if not lev:
return num/den
else:
lev = lev - 1
if cancel:
num, den = dmp_cancel(num, den, lev, dom)
return f.__class__.new((num, den), dom, lev)
return lev, dom, per, F, G
def per(f, num, den, cancel=True, kill=False):
"""Create a DMF out of the given representation. """
lev, dom = f.lev, f.dom
if kill:
if not lev:
return num/den
else:
lev -= 1
if cancel:
num, den = dmp_cancel(num, den, lev, dom)
return f.__class__.new((num, den), dom, lev)
def half_per(f, rep, kill=False):
"""Create a DMP out of the given representation. """
lev = f.lev
if kill:
if not lev:
return rep
else:
lev -= 1
return DMP(rep, f.dom, lev)
@classmethod
def zero(cls, lev, dom):
return cls.new(0, dom, lev)
@classmethod
def one(cls, lev, dom):
return cls.new(1, dom, lev)
def numer(f):
"""Returns numerator of `f`. """
return f.half_per(f.num)
def denom(f):
"""Returns denominator of `f`. """
return f.half_per(f.den)
def cancel(f):
"""Remove common factors from `f.num` and `f.den`. """
return f.per(f.num, f.den)
def neg(f):
"""Negate all cefficients in `f`. """
return f.per(dmp_neg(f.num, f.lev, f.dom), f.den, cancel=False)
def add(f, g):
"""Add two multivariate fractions `f` and `g`. """
if isinstance(g, DMP):
lev, dom, per, (F_num, F_den), G = f.poly_unify(g)
num, den = dmp_add_mul(F_num, F_den, G, lev, dom), F_den
else:
lev, dom, per, F, G = f.frac_unify(g)
(F_num, F_den), (G_num, G_den) = F, G
num = dmp_add(dmp_mul(F_num, G_den, lev, dom),
dmp_mul(F_den, G_num, lev, dom), lev, dom)
den = dmp_mul(F_den, G_den, lev, dom)
return per(num, den)
def sub(f, g):
"""Subtract two multivariate fractions `f` and `g`. """
if isinstance(g, DMP):
lev, dom, per, (F_num, F_den), G = f.poly_unify(g)
num, den = dmp_sub_mul(F_num, F_den, G, lev, dom), F_den
else:
lev, dom, per, F, G = f.frac_unify(g)
(F_num, F_den), (G_num, G_den) = F, G
num = dmp_sub(dmp_mul(F_num, G_den, lev, dom),
dmp_mul(F_den, G_num, lev, dom), lev, dom)
den = dmp_mul(F_den, G_den, lev, dom)
return per(num, den)
def mul(f, g):
"""Multiply two multivariate fractions `f` and `g`. """
if isinstance(g, DMP):
lev, dom, per, (F_num, F_den), G = f.poly_unify(g)
num, den = dmp_mul(F_num, G, lev, dom), F_den
else:
lev, dom, per, F, G = f.frac_unify(g)
(F_num, F_den), (G_num, G_den) = F, G
num = dmp_mul(F_num, G_num, lev, dom)
den = dmp_mul(F_den, G_den, lev, dom)
return per(num, den)
def pow(f, n):
"""Raise `f` to a non-negative power `n`. """
if isinstance(n, int):
return f.per(dmp_pow(f.num, n, f.lev, f.dom),
dmp_pow(f.den, n, f.lev, f.dom), cancel=False)
else:
raise TypeError("`int` expected, got %s" % type(n))
def quo(f, g):
"""Computes quotient of fractions `f` and `g`. """
if isinstance(g, DMP):
lev, dom, per, (F_num, F_den), G = f.poly_unify(g)
num, den = F_num, dmp_mul(F_den, G, lev, dom)
else:
lev, dom, per, F, G = f.frac_unify(g)
(F_num, F_den), (G_num, G_den) = F, G
num = dmp_mul(F_num, G_den, lev, dom)
den = dmp_mul(F_den, G_num, lev, dom)
return per(num, den)
exquo = quo
def invert(f):
"""Computes inverse of a fraction `f`. """
return f.per(f.den, f.num, cancel=False)
@property
def is_zero(f):
"""Returns `True` if `f` is a zero fraction. """
return dmp_zero_p(f.num, f.lev)
@property
def is_one(f):
"""Returns `True` if `f` is a unit fraction. """
return dmp_one_p(f.num, f.lev, f.dom) and \
dmp_one_p(f.den, f.lev, f.dom)
def __neg__(f):
return f.neg()
def __add__(f, g):
if isinstance(g, (DMP, DMF)):
return f.add(g)
try:
return f.add(f.half_per(g))
except TypeError:
return NotImplemented
def __radd__(f, g):
return f.__add__(g)
def __sub__(f, g):
if isinstance(g, (DMP, DMF)):
return f.sub(g)
try:
return f.sub(f.half_per(g))
except TypeError:
return NotImplemented
def __rsub__(f, g):
return (-f).__add__(g)
def __mul__(f, g):
if isinstance(g, (DMP, DMF)):
return f.mul(g)
try:
return f.mul(f.half_per(g))
except TypeError:
return NotImplemented
def __rmul__(f, g):
return f.__mul__(g)
def __pow__(f, n):
return f.pow(n)
def __div__(f, g):
if isinstance(g, (DMP, DMF)):
return f.quo(g)
try:
return f.quo(f.half_per(g))
except TypeError:
return NotImplemented
__truediv__ = __div__
def __eq__(f, g):
try:
if isinstance(g, DMP):
_, _, _, (F_num, F_den), G = f.poly_unify(g)
if f.lev == g.lev:
return dmp_one_p(F_den, f.lev, f.dom) and F_num == G
else:
_, _, _, F, G = f.frac_unify(g)
if f.lev == g.lev:
return F == G
except UnificationFailed:
pass
return False
def __ne__(f, g):
try:
if isinstance(g, DMP):
_, _, _, (F_num, F_den), G = f.poly_unify(g)
if f.lev == g.lev:
return not (dmp_one_p(F_den, f.lev, f.dom) and F_num == G)
else:
_, _, _, F, G = f.frac_unify(g)
if f.lev == g.lev:
return F != G
except UnificationFailed:
pass
return True
def __lt__(f, g):
_, _, _, F, G = f.frac_unify(g)
return F.__lt__(G)
def __le__(f, g):
_, _, _, F, G = f.frac_unify(g)
return F.__le__(G)
def __gt__(f, g):
_, _, _, F, G = f.frac_unify(g)
return F.__gt__(G)
def __ge__(f, g):
_, _, _, F, G = f.frac_unify(g)
return F.__ge__(G)
def __nonzero__(f):
return not dmp_zero_p(f.num, f.lev)
def init_normal_ANP(rep, mod, dom):
return ANP(dup_normal(rep, dom),
dup_normal(mod, dom), dom)
class ANP(object):
"""Dense Algebraic Number Polynomials over a field. """
__slots__ = ['rep', 'mod', 'dom']
def __init__(self, rep, mod, dom):
if type(rep) is dict:
self.rep = dup_from_dict(rep, dom)
else:
if type(rep) is not list:
rep = [dom.convert(rep)]
self.rep = dup_strip(rep)
if isinstance(mod, DMP):
self.mod = mod.rep
else:
if type(mod) is dict:
self.mod = dup_from_dict(mod, dom)
else:
self.mod = dup_strip(mod)
self.dom = dom
def __repr__(f):
return "%s(%s, %s, %s)" % (f.__class__.__name__, f.rep, f.mod, f.dom)
def __hash__(f):
return hash((f.__class__.__name__, f.to_tuple(), dmp_to_tuple(f.mod, 0), f.dom))
def __getstate__(self):
return (self.rep, self.mod, self.dom)
def __getnewargs__(self):
return (self.rep, self.mod, self.dom)
def unify(f, g):
"""Unify representations of two algebraic numbers. """
if not isinstance(g, ANP) or f.mod != g.mod:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if f.dom == g.dom:
return f.dom, f.per, f.rep, g.rep, f.mod
else:
dom = f.dom.unify(g.dom)
F = dup_convert(f.rep, f.dom, dom)
G = dup_convert(g.rep, g.dom, dom)
if dom != f.dom and dom != g.dom:
mod = dup_convert(f.mod, f.dom, dom)
else:
if dom == f.dom:
H = f.mod
else:
H = g.mod
per = lambda rep: ANP(rep, mod, dom)
return dom, per, F, G, mod
def per(f, rep, mod=None, dom=None):
return ANP(rep, mod or f.mod, dom or f.dom)
@classmethod
def zero(cls, mod, dom):
return ANP(0, mod, dom)
@classmethod
def one(cls, mod, dom):
return ANP(1, mod, dom)
def to_dict(f):
"""Convert `f` to a dict representation with native coefficients. """
return dmp_to_dict(f.rep, 0, f.dom)
def to_sympy_dict(f):
"""Convert `f` to a dict representation with SymPy coefficients. """
rep = dmp_to_dict(f.rep, 0, f.dom)
for k, v in rep.iteritems():
rep[k] = f.dom.to_sympy(v)
return rep
def to_list(f):
"""Convert `f` to a list representation with native coefficients. """
return f.rep
def to_sympy_list(f):
"""Convert `f` to a list representation with SymPy coefficients. """
return [ f.dom.to_sympy(c) for c in f.rep ]
def to_tuple(f):
"""
Convert `f` to a tuple representation with native coefficients.
This is needed for hashing.
"""
return dmp_to_tuple(f.rep, 0)
@classmethod
def from_list(cls, rep, mod, dom):
return ANP(dup_strip(map(dom.convert, rep)), mod, dom)
def neg(f):
return f.per(dup_neg(f.rep, f.dom))
def add(f, g):
dom, per, F, G, mod = f.unify(g)
return per(dup_add(F, G, dom))
def sub(f, g):
dom, per, F, G, mod = f.unify(g)
return per(dup_sub(F, G, dom))
def mul(f, g):
dom, per, F, G, mod = f.unify(g)
return per(dup_rem(dup_mul(F, G, dom), mod, dom))
def pow(f, n):
"""Raise `f` to a non-negative power `n`. """
if isinstance(n, int):
if n < 0:
F, n = dup_invert(f.rep, f.mod, f.dom), -n
else:
F = f.rep
return f.per(dup_rem(dup_pow(F, n, f.dom), f.mod, f.dom))
else:
raise TypeError("`int` expected, got %s" % type(n))
def div(f, g):
dom, per, F, G, mod = f.unify(g)
return (per(dup_rem(dup_mul(F, dup_invert(G, mod, dom), dom), mod, dom)), self.zero(mod, dom))
def rem(f, g):
dom, _, _, _, mod = f.unify(g)
return self.zero(mod, dom)
def quo(f, g):
dom, per, F, G, mod = f.unify(g)
return per(dup_rem(dup_mul(F, dup_invert(G, mod, dom), dom), mod, dom))
exquo = quo
def LC(f):
"""Returns the leading coefficent of `f`. """
return dup_LC(f.rep, f.dom)
def TC(f):
"""Returns the trailing coefficent of `f`. """
return dup_TC(f.rep, f.dom)
@property
def is_zero(f):
"""Returns `True` if `f` is a zero algebraic number. """
return not f
@property
def is_one(f):
"""Returns `True` if `f` is a unit algebraic number. """
return f.rep == [f.dom.one]
@property
def is_ground(f):
"""Returns `True` if `f` is an element of the ground domain. """
return not f.rep or len(f.rep) == 1
def __neg__(f):
return f.neg()
def __add__(f, g):
if isinstance(g, ANP):
return f.add(g)
else:
try:
return f.add(f.per(g))
except TypeError:
return NotImplemented
def __radd__(f, g):
return f.__add__(g)
def __sub__(f, g):
if isinstance(g, ANP):
return f.sub(g)
else:
try:
return f.sub(f.per(g))
except TypeError:
return NotImplemented
def __rsub__(f, g):
return (-f).__add__(g)
def __mul__(f, g):
if isinstance(g, ANP):
return f.mul(g)
else:
try:
return f.mul(f.per(g))
except TypeError:
return NotImplemented
def __rmul__(f, g):
return f.__mul__(g)
def __pow__(f, n):
return f.pow(n)
def __divmod__(f, g):
return f.div(g)
def __mod__(f, g):
return f.rem(g)
def __div__(f, g):
if isinstance(g, ANP):
return f.quo(g)
else:
try:
return f.quo(f.per(g))
except TypeError:
return NotImplemented
__truediv__ = __div__
def __eq__(f, g):
try:
_, _, F, G, _ = f.unify(g)
return F == G
except UnificationFailed:
return False
def __ne__(f, g):
try:
_, _, F, G, _ = f.unify(g)
return F != G
except UnificationFailed:
return True
def __lt__(f, g):
_, _, F, G, _ = f.unify(g)
return F.__lt__(G)
def __le__(f, g):
_, _, F, G, _ = f.unify(g)
return F.__le__(G)
def __gt__(f, g):
_, _, F, G, _ = f.unify(g)
return F.__gt__(G)
def __ge__(f, g):
_, _, F, G, _ = f.unify(g)
return F.__ge__(G)
def __nonzero__(f):
return bool(f.rep)
|
bsd-3-clause
| -899,980,643,077,124,200 | 28.583493 | 105 | 0.50239 | false |
scalyr/scalyr-agent-2
|
scalyr_agent/line_matcher.py
|
1
|
20484
|
# Copyright 2015 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# Line Matching abstraction to allow for single and multi-line processing
# of log files.
# See here for a more detailed description of each log type:
# https://www.scalyr.com/help/parsing-logs#multiline
#
# author: Imron Alston <imron@scalyr.com>
from __future__ import unicode_literals
from __future__ import absolute_import
__author__ = "imron@scalyr.com"
import re
import six
class LineMatcher(object):
"""An abstraction for a Line Matcher.
Reads an entire 'line' from a file like object (e.g. anything implementing Python's
file interface such as StringIO). By default it reads just a single line terminated
by a newline, however subclasses can override the _readline method to provide their
own definition of what a 'line' is.
This class also handles partial lines and timeouts between reading a full line.
"""
@staticmethod
def create_line_matchers(log_config, max_line_length, line_completion_wait_time):
"""Creates line matchers based on the config passed in
see: https://www.scalyr.com/help/parsing-logs#multiline for more info
If no lineGroupers attribute is found, then it defaults to a single line matcher
@param log_config: A JsonObject containing the log config
@param max_line_length: The maximum amount to read before returning a new line
@param line_completion_wait_time: The maximum amount of time to wait
if only a partial line is ready
@return - a line matcher object based on the config
"""
line_groupers = log_config["lineGroupers"]
# return a single line matcher if line_groupers is empty or None
if not line_groupers:
return LineMatcher(max_line_length, line_completion_wait_time)
# build a line matcher collection
result = LineMatcherCollection(max_line_length, line_completion_wait_time)
for grouper in line_groupers:
if "start" in grouper:
if "continueThrough" in grouper:
matcher = ContinueThrough(
grouper["start"],
grouper["continueThrough"],
max_line_length,
line_completion_wait_time,
)
result.add_matcher(matcher)
elif "continuePast" in grouper:
matcher = ContinuePast(
grouper["start"],
grouper["continuePast"],
max_line_length,
line_completion_wait_time,
)
result.add_matcher(matcher)
elif "haltBefore" in grouper:
matcher = HaltBefore(
grouper["start"],
grouper["haltBefore"],
max_line_length,
line_completion_wait_time,
)
result.add_matcher(matcher)
elif "haltWith" in grouper:
matcher = HaltWith(
grouper["start"],
grouper["haltWith"],
max_line_length,
line_completion_wait_time,
)
result.add_matcher(matcher)
else:
raise Exception(
"Error, no continuation pattern found for line grouper: %s"
% six.text_type(grouper)
)
else:
raise Exception(
"Error, no start pattern found for line grouper: %s"
% six.text_type(grouper)
)
return result
def __init__(self, max_line_length=5 * 1024, line_completion_wait_time=5 * 60):
self.max_line_length = max_line_length
self.__line_completion_wait_time = line_completion_wait_time
self.__partial_line_time = None
def readline(self, file_like, current_time):
# save the original position
original_offset = file_like.tell()
# read a line, and whether or not this is a partial line from the file_like object
line, partial = self._readline(file_like)
if len(line) == 0:
self.__partial_line_time = None
return line
# If we have a partial line then we should only
# return it if sufficient time has passed.
if partial and len(line) < self.max_line_length:
if self.__partial_line_time is None:
self.__partial_line_time = current_time
if (
current_time - self.__partial_line_time
< self.__line_completion_wait_time
):
# We aren't going to return it so reset buffer back to the original spot.
file_like.seek(original_offset)
return b""
else:
self.__partial_line_time = None
return line
def _readline(self, file_like, max_length=0):
"""Takes a file_like object (e.g. anything conforming to python's file interface
and returns either a full line, or a partial line.
@param file_like: a file like object (e.g. StringIO)
@param max_length: the maximum length to read from the file_like object
@returns - (string,bool) the content read from the file_like object and whether
this is a partial line or not.
"""
if max_length == 0:
max_length = self.max_line_length
line = file_like.readline(max_length)
if len(line) == 0:
return line, False
# 2->TODO use slicing to get bytes string on both versions.
last_char = line[-1:]
partial = last_char != b"\n" and last_char != b"\r"
return line, partial
class LineMatcherCollection(LineMatcher):
"""A group of line matchers.
Returns the line from the first line matcher that returns a non-empty line,
or a single newline terminated line if no matches are found.
"""
def __init__(self, max_line_length=5 * 1024, line_completion_wait_time=5 * 60):
LineMatcher.__init__(self, max_line_length, line_completion_wait_time)
self.__matchers = []
def add_matcher(self, matcher):
self.__matchers.append(matcher)
def _readline(self, file_like, max_length=0):
"""Takes a file_like object (e.g. anything conforming to python's file interface
and checks all self.__matchers to see if any of them match. If so then return the
first matching line, otherwise return a single newline terminated line from the input.
@param file_like: a file like object (e.g. StringIO)
@param max_length: the maximum length to read from the file_like object
@returns - (string,bool) the content read from the file_like object and whether
this is a partial line or not.
"""
# default to our own max_line_length if no length has been specified
if max_length == 0:
max_length = self.max_line_length
line = None
partial = False
for matcher in self.__matchers:
offset = file_like.tell()
# if we have a line, then break out of the loop
line, partial = matcher._readline(file_like, max_length)
if line:
break
# no match, so reset back to the original offset to prepare for the next line
file_like.seek(offset)
# if we didn't match any of self.__matchers, then check to see if there is a
# single line waiting
if not line:
line, partial = LineMatcher._readline(self, file_like, max_length)
return line, partial
class LineGrouper(LineMatcher):
"""An abstraction for a LineMatcher that groups multiple lines as a single line.
Most of the complexity of multiline matching is handled by this class, and subclasses
are expected to override methods determining when a multiline begins, and whether the
multiline should continue.
"""
# Value for the "errors" argument which is passed to the bytes.decode() function call.
# This should really only be set to something else than ignore / replace inside the tests.
DECODE_ERRORS_VALUE = "replace"
def __init__(
self,
start_pattern,
continuation_pattern,
max_line_length=5 * 1024,
line_completion_wait_time=5 * 60,
):
LineMatcher.__init__(self, max_line_length, line_completion_wait_time)
self._start_pattern = re.compile(start_pattern)
self._continuation_pattern = re.compile(continuation_pattern)
def _readline(self, file_like, max_length=0):
"""Takes a file_like object (e.g. anything conforming to python's file interface
and returns either a full line, or a partial line.
@param file_like: a file like object (e.g. StringIO)
@param max_length: the maximum length to read from the file_like object
@returns - (string,bool) the content read from the file_like object and whether
this is a partial line or not. This function always returns either an empty string ''
or a complete multi-line string.
"""
# default to our own max_line_length if no max_length is specified
if max_length == 0:
max_length = self.max_line_length
start_offset = file_like.tell()
line = ""
partial = False
# read a single line of input from the file_like object
start_line, partial = LineMatcher._readline(self, file_like, max_length)
# early exit if is a partial line
if partial:
return start_line, partial
# NOTE: When decoding line data we simply ignore any invalid or partial unicode sequences
# This way we still ingest rest of the data even if part of it is malformed or corrupted.
# check to see if this line starts a multiline
start_line_decoded = start_line.decode("utf-8", self.DECODE_ERRORS_VALUE)
start = self._start_line(start_line_decoded)
if start:
max_length -= len(start_line)
next_offset = file_like.tell()
# read the next single line of input
next_line, next_partial = LineMatcher._readline(self, file_like, max_length)
if next_line and max_length - len(next_line) <= 0:
line = start_line + next_line
partial = False
elif next_line:
# see if we are continuing the line
next_line_decoded = next_line.decode("utf-8", self.DECODE_ERRORS_VALUE)
cont = self._continue_line(next_line_decoded)
if cont:
line = start_line
# build up a multiline string by looping over all lines for as long as
# the multiline continues, there is still more input in the file and we still have space in our buffer
line_max_reached = False
while cont and next_line and max_length > 0:
line += next_line
max_length -= len(next_line)
next_offset = file_like.tell()
if max_length > 0:
next_line, partial = LineMatcher._readline(
self, file_like, max_length
)
# Only check if this line is a continuation if we got the full line.
next_line_decoded = next_line.decode(
"utf-8", self.DECODE_ERRORS_VALUE
)
cont = not partial and self._continue_line(
next_line_decoded
)
line_max_reached = max_length - len(next_line) <= 0
if line_max_reached:
# Have to cover a very particular case. If we reached line max, then no matter what, we add
# in the partial line because returning a string with len equal to max_length signals to the
# higher level code we hit the max.
line += next_line
next_offset = file_like.tell()
# the previous loop potentially needs to read one line past the end of a multi-line
# so reset the file position to the start of that line for future calls.
file_like.seek(next_offset)
# if we are here and cont is true, it means that we are in the middle of a multiline
# but there is no further input, so we have a partial line.
# partial = not line_max_reached and (partial or cont)
partial = partial or cont
# first line matched, but the second line failed to continue the multiline
else:
# check if we can match a single line
if self._match_single_line():
line = start_line
# otherwise reset the file position and return an empty line
else:
file_like.seek(start_offset)
line = b""
# first line started a multiline and now we are waiting for the next line of
# input, so return a partial line
else:
line = start_line
partial = True
# the line didn't start a multiline, so reset the file position and return an empty line
else:
file_like.seek(start_offset)
line, partial = b"", False
return line, partial
def _match_single_line(self):
"""Returns whether or not the grouper can match a single line based on the start_pattern.
Defaults to false
"""
return False
def _continue_line(self, line):
"""Returns whether or not the grouper should continue matching a multiline. Defaults to false
@param line - the next line of input
"""
return False
def _start_line(self, line):
"""Returns whether or not the grouper should start matching a multiline.
@param line - the next line of input
@return bool - whether or not the start pattern finds a match in the input line
"""
return self._start_pattern.search(line) is not None
class ContinueThrough(LineGrouper):
"""A ContinueThrough multiline grouper.
If the start_pattern matches, then all consecutive lines matching the continuation pattern are included in the line.
This is useful in cases such as a Java stack trace, where some indicator in the line (such as leading whitespace)
indicates that it is an extension of the preceeding line.
"""
def _continue_line(self, line):
"""
@param line - the next line of input
@return bool - True if the line is empty or if the contination_pattern finds a match in the input line
"""
if not line:
return True
return self._continuation_pattern.search(line) is not None
class ContinuePast(LineGrouper):
"""A ContinuePast multiline grouper.
If the start pattern matches, then all consecutive lines matching the contuation pattern, plus one additional line,
are included in the line.
This is useful in cases where a log message ends with a continuation marker, such as a backslash, indicating
that the following line is part of the same message.
"""
def __init__(
self,
start_pattern,
continuation_pattern,
max_line_length=5 * 1024,
line_completion_wait_time=5 * 60,
):
LineGrouper.__init__(
self,
start_pattern,
continuation_pattern,
max_line_length,
line_completion_wait_time,
)
self.__last_line = False
self.__grouping = False
def _start_line(self, line):
result = LineGrouper._start_line(self, line)
self.__grouping = result
self.__last_line = False
return result
def _continue_line(self, line):
# if the previous call saw the last line of the pattern, then the next call should always return False
if self.__last_line:
self.__last_line = False
return False
# see if we match the continuation pattern
match = self._continuation_pattern.search(line)
result = True
# if we are grouping lines and we don't have a match, then we need to flag that the next
# line will always end the line continuation
if self.__grouping:
if not match:
self.__grouping = False
self.__last_line = True
else:
# we aren't grouping lines so if we don't have a match then the input doesn't match this
# line grouping pattern, so return False
if not match:
result = False
return result
class HaltBefore(LineGrouper):
"""A HaltBefore line grouper.
If the start pattern matches, then all consecutive lines not matching the contuation pattern are included in the line.
This is useful where a log line contains a marker indicating that it begins a new message.
"""
def __init__(
self,
start_pattern,
continuation_pattern,
max_line_length=5 * 1024,
line_completion_wait_time=5 * 60,
):
LineGrouper.__init__(
self,
start_pattern,
continuation_pattern,
max_line_length,
line_completion_wait_time,
)
self.__match_single = False
def _start_line(self, line):
self.__match_single = LineGrouper._start_line(self, line)
return self.__match_single
def _continue_line(self, line):
return self._continuation_pattern.search(line) is None
def _match_single_line(self):
# HaltBefore can potentiall match a single line
return self.__match_single
class HaltWith(LineGrouper):
"""A HaltWith line grouper.
If the start pattern matches, all consecutive lines, up to and including the first line matching the contuation pattern,
are included in the line. This is useful where a log line ends with a termination marker, such as a semicolon.
"""
def __init__(
self,
start_pattern,
continuation_pattern,
max_line_length=5 * 1024,
line_completion_wait_time=5 * 60,
):
LineGrouper.__init__(
self,
start_pattern,
continuation_pattern,
max_line_length,
line_completion_wait_time,
)
self.__last_line = False
self.__grouping = False
def _start_line(self, line):
result = LineGrouper._start_line(self, line)
self.__last_line = False
return result
def _continue_line(self, line):
# if we have previously been flagged that the last line has been reached, then return False to stop
# the line from continuing
if self.__last_line:
self.__last_line = False
return False
# see if the continuation pattern matches
cont = self._continuation_pattern.search(line) is None
# if it doesn't, then we still continue the line for this input, but we have reached the end ofr
# the pattern so the next line should end the group
if not cont:
self.__last_line = True
cont = True
return cont
|
apache-2.0
| -3,389,244,523,084,116,000 | 38.697674 | 124 | 0.586555 | false |
paveldedik/thesis
|
models/models.py
|
1
|
30072
|
# -*- coding: utf-8 -*-
"""
Evaluation Models
=================
"""
from __future__ import division
from copy import copy
from itertools import izip
from collections import defaultdict
import numpy as np
import pandas as pd
import tools
__all__ = (
'DummyPriorModel',
'EloModel',
'EloResponseTime',
'PFAModel',
'PFAResponseTime',
'PFAExt',
'PFAExtTiming',
'PFAExtStaircase',
'PFAExtSpacing',
'PFAGong',
'PFAGongTiming',
'PFATiming',
)
#: Dictionary of the most commonly used time effect functions in this thesis.
time_effect_funcs = {}
def register_time_effect(name):
"""Registers new time effect functions."""
def register(time_effect):
time_effect_funcs[name] = time_effect
return register
@register_time_effect('log')
def time_effect_log(t, a=1.8, c=0.123):
return a - c * np.log(t)
@register_time_effect('pow')
def time_effect_div(t, a=2, c=0.2):
return a / (t+1) ** c
@register_time_effect('exp')
def time_effect_exp(t, a=1.6, c=0.01):
return a * np.exp(-c * np.sqrt(t))
def init_time_effect(obj, name, parameters=('a', 'c')):
"""Prepares time effect function based on name. Initializes
the given object with default parameters `a` and `c`.
:param obj: Object to initialize with time effect function.
:param name: Name of the time effect function.
"""
time_effect_fun = time_effect_funcs[name]
defaults = time_effect_fun.func_defaults
a, c = parameters
if getattr(obj, a, None) is None:
setattr(obj, a, defaults[0])
if getattr(obj, c, None) is None:
setattr(obj, c, defaults[1])
def time_effect(t):
a_val, c_val = getattr(obj, a), getattr(obj, c)
return time_effect_fun(t, a_val, c_val)
return time_effect
class Question(object):
"""Representation of a question."""
def __init__(self, **kwargs):
self.id = kwargs.pop('id')
self.user_id = kwargs.pop('user_id')
self.place_id = kwargs.pop('place_id')
self.type = kwargs.pop('type')
self.inserted = kwargs.pop('inserted')
self.options = kwargs.pop('options')
class Answer(Question):
"""Answer to a question."""
def __init__(self, **kwargs):
super(Answer, self).__init__(**kwargs)
self.place_answered = kwargs.pop('place_answered')
self.response_time = kwargs.pop('response_time')
self.is_correct = kwargs.pop('is_correct')
class User(object):
"""Returns a user with given ID.
:param user_id: ID of the user.
:type user_id: int
"""
def __init__(self, user_id):
self.id = user_id
self.skill_increments = []
@property
def skill(self):
"""Skill of the user."""
return sum(self.skill_increments)
@property
def answers_count(self):
"""Number of answer of the user (equal to the number of
skill increments.
"""
return len(self.skill_increments)
def inc_skill(self, increment):
"""Increments the skill of the user.
:param increment: Increment (or decrement) of the skill.
:type increment: int
"""
self.skill_increments += [increment]
class Place(object):
"""Returns a place with given ID.
:param place_id: ID of the place.
:type place_id: int
"""
def __init__(self, place_id):
self.id = place_id
self.difficulty_increments = []
@property
def difficulty(self):
"""Difficulty of the place."""
return sum(self.difficulty_increments)
@property
def answers_count(self):
"""Number of answer for the place (equal to the number of
difficulty increments.
"""
return len(self.difficulty_increments)
def inc_difficulty(self, increment):
"""Increments the difficulty of the place.
:param increment: Increment (or decrement) of the difficulty.
:type increment: int
"""
self.difficulty_increments += [increment]
class Item(object):
"""Item representation.
:param prior: Prior skills of users and difficulties of places.
:type prior: dictionary
:param user_id: ID of the user.
:type user_id: int
:param place_id: ID of the place.
:type place_id: int
"""
def __init__(self, prior, user_id, place_id):
self.prior = prior
self.user_id = user_id
self.place_id = place_id
self.practices = []
self.knowledge_increments = []
@property
def user(self):
"""User answering the item."""
return self.prior.users[self.user_id]
@property
def place(self):
"""Place of the item being asked."""
return self.prior.places[self.place_id]
@property
def knowledge(self):
"""Knowledge of the item by the user."""
return (
(self.user.skill - self.place.difficulty)
+ sum(self.knowledge_increments)
)
@property
def correct(self):
"""List of correct answers."""
return [ans for ans in self.practices if ans.is_correct]
@property
def incorrect(self):
"""List of incorrect answers."""
return [ans for ans in self.practices if not ans.is_correct]
@property
def last_inserted(self):
"""Returns the time of the last answer for this item
or :obj:`None` if the item was never answered before.
"""
if self.practices:
return self.practices[-1].inserted
@property
def any_incorrect(self):
""":obj:`True` if at least one of the practiced item
was answered incorrectly, otherwise :obj:`False`.
"""
return any(not answer.is_correct for answer in self.practices)
def get_diffs(self, current):
"""Returns list of previous practices expresed as the number
of seconds that passed between *current* practice and all
the *previous* practices.
:param current: Datetime of the current practice.
:type place: string
"""
return [
tools.time_diff(current, prior.inserted)
for prior in self.practices
]
def inc_knowledge(self, increment):
"""Increments the knowledge of the user of the item.
:param increment: Increment (or decrement) of the knowledge.
:type increment: int
"""
self.knowledge_increments += [increment]
def add_practice(self, answer):
"""Registers new practice of the item.
:param answer: Information about the answer.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
if isinstance(answer, pd.Series):
self.practices += [Answer(**answer.to_dict())]
else:
self.practices += [copy(answer)]
class Model(object):
"""Abstract model class."""
ABBR = None
def respect_guess(self, prediction, options):
"""Updates prediction with respect to guessing paramter.
:param prediction: Prediction calculated so far.
:type prediction: float
:param options: Number of options in the multiple-choice question.
:type options: int
"""
if options:
val = 1 / len(options)
return val + (1 - val) * prediction
else:
return prediction
def predict(self, question):
"""Returns probability of correct answer for given question.
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
raise NotImplementedError()
def update(self, answer):
"""Performes an update of skills, difficulties or knowledge.
:param answer: Asked question.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
raise NotImplementedError()
def train(self, data):
"""Trains the model on given data set.
:param data: Data set on which to train the model.
:type data: :class:`pandas.DataFrame`
"""
raise NotImplementedError()
@classmethod
def split_data(cls, data, ratio=0.7):
"""Classmethod that splits data into training set and test set.
:param data: The object containing data.
:type data: :class:`pandas.DataFrame`.
:param ratio: What portion of data to include in the training set
and the test set. :obj:`0.5` means that the data will be
distributed equaly.
:type ratio: float
"""
raise NotImplementedError()
class DummyPriorModel(Model):
"""Dummy model that sets all skills of users and difficulties
of places to zero.
"""
class _User(object):
"""Returns a user with given ID."""
def __init__(self, skill):
self.skill = skill
class _Place(object):
"""Returns a place with given ID."""
def __init__(self, difficulty):
self.difficulty = difficulty
def __init__(self, skill=0.0, difficulty=0.0):
self.users = defaultdict(lambda: self._User(skill))
self.places = defaultdict(lambda: self._Place(difficulty))
def update(self, answer):
pass
def train(self, data):
pass
class EloModel(Model):
"""Predicts correctness of answers using Elo Rating System.
The model is parametrized with `alpha` and `beta`. These parameters
affect the uncertainty function.
"""
ABBR = 'Elo'
def __init__(self, alpha=1, beta=0.05):
self.alpha = alpha
self.beta = beta
self.init_model()
def init_model(self):
"""Initializes two attributes of the model. Both attributes are
dataframes. The first attribute represents difficulties of countries.
The second attribute represents global knowledge of students.
"""
self.places = tools.keydefaultdict(Place)
self.users = tools.keydefaultdict(User)
self.predictions = {}
def uncertainty(self, n):
"""Uncertainty function. The purpose is to make each update on
the model trained with sequence of `n` answers less and less
significant as the number of prior answers is bigger.
:param n: Number of user's answers or total answers to a place.
:type n: int
"""
return self.alpha / (1 + self.beta * n)
def predict(self, question):
"""Returns probability of correct answer for given question.
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
user = self.users[question.user_id]
place = self.places[question.place_id]
prediction = tools.sigmoid(user.skill - place.difficulty)
return self.respect_guess(prediction, question.options)
def update(self, answer):
"""Updates skills of users and difficulties of places according
to given answer.
:param answer: Answer to a question.
:type answer: :class:`pandas.Series`
"""
user = self.users[answer.user_id]
place = self.places[answer.place_id]
prediction = self.predict(answer)
shift = answer.is_correct - prediction
user.inc_skill(self.uncertainty(user.answers_count) * shift)
place.inc_difficulty(-(self.uncertainty(place.answers_count) * shift))
self.predictions[answer.id] = prediction
def train(self, data):
"""Trains the model on given data set.
:param data: Data set on which to train the model.
:type data: :class:`pandas.DataFrame`
"""
self.init_model()
data = tools.first_answers(data)
data.sort(['inserted']).apply(self.update, axis=1)
@classmethod
def split_data(cls, data, ratio=0.7):
"""Classmethod that splits data into training set and test set.
:param data: The object containing data.
:type data: :class:`pandas.DataFrame`.
:param ratio: What portion of data to include in the training set
and the test set. :obj:`0.5` means that the data will be
distributed equaly.
:type ratio: float
"""
data = tools.first_answers(data)
return tools.split_data(data, ratio=ratio)
class EloResponseTime(EloModel):
"""Extension of the Elo model that takes response time of user
into account.
"""
ABBR = 'Elo/RT'
def __init__(self, *args, **kwargs):
self.zeta = kwargs.pop('zeta', 3)
super(EloResponseTime, self).__init__(*args, **kwargs)
def update(self, answer):
"""Updates skills of users and difficulties of places according
to given answer.
:param answer: Answer to a question.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
user = self.users[answer.user_id]
place = self.places[answer.place_id]
prediction = self.predict(answer)
level = tools.automaticity_level(answer.response_time)
prob = (prediction * self.zeta + level) / (self.zeta + 1)
shift = answer.is_correct - prob
user.inc_skill(self.uncertainty(user.answers_count) * shift)
place.inc_difficulty(-(self.uncertainty(place.answers_count) * shift))
self.predictions[answer.id] = prediction
class PFAModel(Model):
"""Standard Performance Factor Analysis.
:param gamma: The significance of the update when the student
answered correctly.
:type gamma: float
:param delta: The significance of the update when the student
answered incorrectly.
:type delta: float
"""
ABBR = 'PFA'
def __init__(self, prior=None, gamma=3.4, delta=-0.3):
super(PFAModel, self).__init__()
self.prior = prior or DummyPriorModel()
self.gamma = gamma
self.delta = delta
self.init_model()
def init_model(self):
"""Initializes attribute of the model that stores current
knowledge of places for all students.
"""
self.items = tools.keydefaultdict(
lambda *args: Item(self.prior, *args)
)
self.predictions = {}
def predict(self, question):
"""Returns probability of correct answer for given question.
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
item = self.items[question.user_id, question.place_id]
knowledge = (
item.knowledge +
self.gamma * len(item.correct) +
self.delta * len(item.incorrect)
)
return tools.sigmoid(knowledge)
def update(self, answer):
"""Performes update of current knowledge of a user based on the
given answer.
:param answer: Answer to a question.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
item = self.items[answer.user_id, answer.place_id]
if not item.practices:
self.prior.update(answer)
prediction = self.predict(answer)
self.predictions[answer.id] = prediction
item.add_practice(answer)
def train(self, data):
"""Trains the model on given data set.
:param data: Data set on which to train the model.
:type data: :class:`pandas.DataFrame`
"""
self.init_model()
data.sort(['inserted']).apply(self.update, axis=1)
@classmethod
def split_data(self, data):
"""Classmethod that splits data into training set and test set.
:param data: The object containing data.
:type data: :class:`pandas.DataFrame`.
"""
test_set = tools.last_answers(data)
train_set = data[~data['id'].isin(test_set['id'])]
return train_set, test_set
class PFAExt(PFAModel):
"""PFA model for estimation of current knowledge.
:param gamma: The significance of the update when the student
answered correctly.
:type gamma: float
:param delta: The significance of the update when the student
answered incorrectly.
:type delta: float
"""
ABBR = 'PFA/E'
def predict(self, question):
"""Returns probability of correct answer for given question.
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
item = self.items[question.user_id, question.place_id]
prediction = tools.sigmoid(item.knowledge)
return self.respect_guess(prediction, question.options)
def update(self, answer):
"""Performes update of current knowledge of a user based on the
given answer.
:param answer: Answer to a question.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
item = self.items[answer.user_id, answer.place_id]
if not item.practices:
self.prior.update(answer)
prediction = self.predict(answer)
self.predictions[answer.id] = prediction
item.add_practice(answer)
if answer.is_correct:
item.inc_knowledge(self.gamma * (1 - prediction))
else:
item.inc_knowledge(self.delta * prediction)
class PFAResponseTime(PFAExt):
"""An extended version of the PFAExt model which alters student's
knowledge by respecting past response times.
:param gamma: The significance of the update when the student
answered correctly.
:type gamma: float
:param delta: The significance of the update when the student
answered incorrectly.
:type delta: float
:param zeta: The significance of response times.
:type zeta: float
"""
ABBR = 'PFA/E/RT'
def __init__(self, *args, **kwargs):
kwargs.setdefault('gamma', 1.5)
kwargs.setdefault('delta', -1.4)
self.zeta = kwargs.pop('zeta', 1.9)
super(PFAResponseTime, self).__init__(*args, **kwargs)
def update(self, answer):
"""Performes update of current knowledge of a user based on the
given answer.
:param answer: Answer to a question.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
item = self.items[answer.user_id, answer.place_id]
if not item.practices:
self.prior.update(answer)
prediction = self.predict(answer)
self.predictions[answer.id] = prediction
item.add_practice(answer)
level = tools.automaticity_level(answer.response_time) / self.zeta
if answer.is_correct:
item.inc_knowledge(self.gamma * (1 - prediction) + level)
else:
item.inc_knowledge(self.delta * prediction + level)
class PFAExtTiming(PFAExt):
"""Alternative version of :class:`PFAExtSpacing` which ignores
spacing effect. Only forgetting is considered.
:param gamma: The significance of the update when the student
answered correctly.
:type gamma: float
:param delta: The significance of the update when the student
answered incorrectly.
:type delta: float
:param time_effect_fun: Time effect function.
:type time_effect_fun: callable or string
"""
ABBR = 'PFA/E/T'
def __init__(self, *args, **kwargs):
kwargs.setdefault('gamma', 2.3)
kwargs.setdefault('delta', -0.9)
time_effect = kwargs.pop('time_effect_fun', 'poly')
if isinstance(time_effect, basestring):
self.a, self.c = kwargs.pop('a', None), kwargs.pop('c', None)
self.time_effect = init_time_effect(self, time_effect)
else:
self.time_effect = time_effect
super(PFAExtTiming, self).__init__(*args, **kwargs)
def predict(self, question):
"""Returns probability of correct answer for given question.
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
item = self.items[question.user_id, question.place_id]
if item.practices:
seconds = tools.time_diff(question.inserted, item.last_inserted)
time_effect = self.time_effect(seconds)
else:
time_effect = 0
prediction = tools.sigmoid(item.knowledge + time_effect)
return self.respect_guess(prediction, question.options)
class PFAExtStaircase(PFAExtTiming):
"""Alternative version of :class:`PFAESpacing` which ignores
spacing effect. Only forgetting is considered given by staircase
fucntion.
:param gamma: The significance of the update when the student
answered correctly.
:type gamma: float
:param delta: The significance of the update when the student
answered incorrectly.
:type delta: float
:param time_effect_fun: Values for staircase function.
:type time_effect_fun: dict (tuples as keys)
"""
ABBR = 'PFA/E/T staircase'
def __init__(self, *args, **kwargs):
kwargs.setdefault('gamma', 2.5)
kwargs.setdefault('delta', -0.8)
self.staircase = tools.intervaldict(kwargs.pop('staircase'))
self.time_effect = lambda k: self.staircase[k]
super(PFAExtTiming, self).__init__(*args, **kwargs)
class PFAExtSpacing(PFAExtTiming):
"""Extended version of PFA that takes into account the effect of
forgetting and spacing.
:param gamma: The significance of the update when the student
answers correctly.
:type gamma: float
:param delta: The significance of the update when the student
answers incorrectly.
:type delta: float
:param spacing_rate: The significance of the spacing effect. Lower
values make the effect less significant. If the spacing rate
is set to zero, the model is unaware of the spacing effect.
:type spacing_rate: float
:param decay_rate: The significance of the forgetting effect. Higher
values of decay rate make the students forget the item faster
and vice versa.
:type decay_rate: float
"""
ABBR = 'PFA/E/S'
def __init__(self, *args, **kwargs):
kwargs.setdefault('gamma', 2.8)
kwargs.setdefault('delta', -0.7)
self.spacing_rate = kwargs.pop('spacing_rate', 0)
self.decay_rate = kwargs.pop('decay_rate', 0.18)
self.iota = kwargs.pop('iota', 1.5)
super(PFAExtSpacing, self).__init__(*args, **kwargs)
def memory_strength(self, question):
"""Estimates memory strength of an item.
:param question: Asked question.
:type question: :class:`pandas.Series`
"""
item = self.items[question.user_id, question.place_id]
practices = item.get_diffs(question.inserted)
if len(practices) > 0:
return self.iota + tools.memory_strength(
filter(lambda x: x > 0, practices),
spacing_rate=self.spacing_rate,
decay_rate=self.decay_rate,
)
def predict(self, question):
"""Returns probability of correct answer for given question.
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
item = self.items[question.user_id, question.place_id]
if item.any_incorrect:
strength = self.memory_strength(question)
else:
strength = 0
prediction = tools.sigmoid(item.knowledge + strength)
return self.respect_guess(prediction, question.options)
class PFAGong(PFAModel):
"""Yue Gong's extended Performance Factor Analysis.
:param gamma: The significance of the update when the student
answers correctly.
:type gamma: float
:param delta: The significance of the update when the student
answers incorrectly.
:type delta: float
:param decay: Decay rate of answers.
:type decay: float
"""
ABBR = 'PFA/G'
def __init__(self, *args, **kwargs):
kwargs.setdefault('gamma', 2.1)
kwargs.setdefault('delta', -0.8)
self.decay = kwargs.pop('decay', 0.8)
super(PFAGong, self).__init__(*args, **kwargs)
def get_weights(self, item, question):
"""Returns weights of previous answers to the given item.
:param item: *Item* (i.e. practiced place by a user).
:type item: :class:`Item`
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
correct_weights = [
ans.is_correct * self.decay ** k for k, ans
in tools.reverse_enumerate(item.practices)
]
incorrect_weights = [
(1 - ans.is_correct) * self.decay ** k for k, ans
in tools.reverse_enumerate(item.practices)
]
return sum(correct_weights), sum(incorrect_weights)
def predict(self, question):
"""Returns probability of correct answer for given question.
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
item = self.items[question.user_id, question.place_id]
correct_weight, incorrect_weight = self.get_weights(item, question)
knowledge = (
item.knowledge +
self.gamma * correct_weight +
self.delta * incorrect_weight
)
prediction = tools.sigmoid(knowledge)
return self.respect_guess(prediction, question.options)
def update(self, answer):
"""Performes update of current knowledge of a user based on the
given answer.
:param answer: Answer to a question.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
item = self.items[answer.user_id, answer.place_id]
if not item.practices:
self.prior.update(answer)
prediction = self.predict(answer)
self.predictions[answer.id] = prediction
item.add_practice(answer)
class PFAGongTiming(PFAGong):
"""Performance Factor Analysis combining some aspects of both
the Yue Gong's PFA and the ACT-R model.
:param gamma: The significance of the update when the student
answers correctly.
:type gamma: float
:param delta: The significance of the update when the student
answers incorrectly.
:type delta: float
:param time_effect_fun: Time effect function.
:type time_effect_fun: callable or string
"""
ABBR = 'PFA/G/T old'
def __init__(self, *args, **kwargs):
kwargs.setdefault('gamma', 1.7)
kwargs.setdefault('delta', 0.5)
time_effect = kwargs.pop('time_effect_fun', 'pow')
if isinstance(time_effect, basestring):
self.a, self.c = kwargs.pop('a', None), kwargs.pop('c', None)
self.time_effect = init_time_effect(self, time_effect)
else:
self.time_effect = time_effect
super(PFAGong, self).__init__(*args, **kwargs)
def get_weights(self, item, question):
"""Returns weights of previous answers to the given item.
:param item: *Item* (i.e. practiced place by a user).
:type item: :class:`Item`
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
correct_weights = [
max(ans.is_correct * self.time_effect(diff), 0) for ans, diff
in izip(item.practices, item.get_diffs(question.inserted))
]
incorrect_weights = [
(1 - ans.is_correct) * self.time_effect(diff) for ans, diff
in izip(item.practices, item.get_diffs(question.inserted))
]
return sum(correct_weights), sum(incorrect_weights)
class PFATiming(PFAGong):
"""Performance Factor Analysis combining some aspects of both
the Yue Gong's PFA and the ACT-R model.
:param gamma: The significance of the update when the student
answers correctly.
:type gamma: float
:param delta: The significance of the update when the student
answers incorrectly.
:type delta: float
:param time_effect_good: Time effect function for correct answers.
:type time_effect_good: callable or string
:param time_effect_bad: Time effect function for wrong answers.
:type time_effect_bad: callable or string
"""
ABBR = 'PFA/G/T'
def __init__(self, *args, **kwargs):
kwargs.setdefault('gamma', 1) # these parameters should not be
kwargs.setdefault('delta', 1) # modified, i.e. kept equal to 1
time_effect_good = kwargs.pop('time_effect_good', 'pow')
time_effect_bad = kwargs.pop('time_effect_bad', 'pow')
if isinstance(time_effect_good, basestring):
self.a, self.c = kwargs.pop('a', None), kwargs.pop('c', None)
self.time_effect_good = init_time_effect(
self, time_effect_good, parameters=('a', 'c'))
else:
self.time_effect_good = time_effect_good
if isinstance(time_effect_bad, basestring):
self.b, self.d = kwargs.pop('b', None), kwargs.pop('d', None)
self.time_effect_bad = init_time_effect(
self, time_effect_bad, parameters=('b', 'd'))
else:
self.time_effect_bad = time_effect_bad
super(PFAGong, self).__init__(*args, **kwargs)
def get_weights(self, item, question):
"""Returns weights of previous answers to the given item.
:param item: *Item* (i.e. practiced place by a user).
:type item: :class:`Item`
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
correct_weights = [
ans.is_correct * self.time_effect_good(diff) for ans, diff
in izip(item.practices, item.get_diffs(question.inserted))
]
incorrect_weights = [
(1 - ans.is_correct) * self.time_effect_bad(diff) for ans, diff
in izip(item.practices, item.get_diffs(question.inserted))
]
return sum(correct_weights), sum(incorrect_weights)
|
mit
| 3,786,921,495,421,080,000 | 30.098242 | 78 | 0.614758 | false |
jarnoln/cvdb
|
viewcv/cv.py
|
1
|
4641
|
import logging
import weasyprint
from django.core.exceptions import PermissionDenied
from django.urls import reverse, reverse_lazy
from django.views.generic import DetailView, ListView, RedirectView
from django.views.generic.edit import UpdateView, DeleteView
from django.http import Http404, HttpResponse
from django.contrib import auth
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string
from .models import Cv
class CvOwnList(ListView):
model = Cv
def get_queryset(self):
# Only list own CVs
return Cv.objects.filter(user=self.request.user)
def get_context_data(self, **kwargs):
context = super(CvOwnList, self).get_context_data(**kwargs)
context['messages'] = self.request.GET.get('message', '')
return context
class CvPublicList(ListView):
model = Cv
template_name = 'viewcv/cv_public_list.html'
def get_queryset(self):
# Only list public CVs
return Cv.objects.filter(public=True)
def get_context_data(self, **kwargs):
context = super(CvPublicList, self).get_context_data(**kwargs)
context['messages'] = self.request.GET.get('message', '')
return context
class CvDetail(DetailView):
model = Cv
def get_object(self, queryset=None):
# logger = logging.getLogger(__name__)
if 'slug' in self.kwargs:
username = self.kwargs['slug']
# logger.debug('CvDetail:username={}'.format(username))
user = get_object_or_404(auth.get_user_model(), username=username)
cv = Cv.objects.filter(user=user, public=True, primary=True)
cv_count = cv.count()
if cv_count == 0:
raise Http404
else:
return cv.first()
else:
return super(CvDetail, self).get_object()
def get_context_data(self, **kwargs):
context = super(CvDetail, self).get_context_data(**kwargs)
context['messages'] = self.request.GET.get('message', '')
context['display'] = self.request.GET.get('display', '')
context['format'] = self.request.GET.get('format', '')
return context
def render_to_response(self, context, **response_kwargs):
print_format = context['format']
if print_format:
template = 'viewcv/cv_print.html'
html_string = render_to_string(template, context=context)
if context['format'] == 'pdf':
file_name = '{}.pdf'.format(self.object.user.username)
# html_string = render_to_string(self.get_template_names()[0], context=context)
html = weasyprint.HTML(string=html_string)
html.write_pdf(file_name)
# fs = FileSystemStorage('/tmp')
with open(file_name, 'rb') as pdf:
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="{}"'.format(file_name)
return response
else:
return HttpResponse(html_string)
return super(CvDetail, self).render_to_response(context, **response_kwargs)
class CvUpdate(UpdateView):
model = Cv
fields = ['name', 'title', 'summary', 'public', 'primary', 'css', 'css_url']
def get_object(self):
cv = super(CvUpdate, self).get_object()
if cv.can_edit(self.request.user):
return cv
# Todo: Smarter way to handle this
raise Http404
def get_context_data(self, **kwargs):
context = super(CvUpdate, self).get_context_data(**kwargs)
context['message'] = self.request.GET.get('message', '')
return context
def get_success_url(self):
if self.object:
return reverse_lazy('cv', args=[self.object.id])
else:
return reverse('cv_list')
class CvSetAsPrimary(RedirectView):
def get_redirect_url(self, *args, **kwargs):
cv = get_object_or_404(Cv, pk=kwargs['pk'])
if cv.can_edit(self.request.user):
cv.set_as_primary()
return reverse('cv_list')
class CvDelete(DeleteView):
model = Cv
success_url = reverse_lazy('cv_list')
def get_object(self):
cv = super(CvDelete, self).get_object()
if cv.can_edit(self.request.user):
return cv
# Todo: Smarter way to handle this
# raise PermissionDenied
raise Http404
def render_to_response(self, context, **response_kwargs):
return super(CvDelete, self).render_to_response(context, **response_kwargs)
|
mit
| -3,008,300,420,551,048,000 | 33.634328 | 99 | 0.613876 | false |
51x/IP_Blocker-Flask_Iptables
|
flask_iptables.py
|
1
|
1578
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Block using iptables through Flask
import subprocess # Run iptables
from IPy import IP # Valide IP
import re # Make sure to validate
from flask import Flask # Run Flask
from flask import request # For url arguments
app = Flask(__name__)
# wget "127.0.0.1:9000/block?ip=8.8.8.8"
# wget -qO- "127.0.0.1:9000/block?ip=8.8.8.8&lock=xxxx"
@app.route('/block',methods=["GET"])
def blocker_def():
block_ip = request.args.get('ip')
# If used for localhost and not through proxy
lock = request.args.get('lock')
if lock == "xxxx":
pass
else:
return "0"
# If there is a-zA-Z chars, empty it, otherwise IP validate
noalpha = re.search('[a-zA-Z]', block_ip)
if noalpha == None:
try:
IP(block_ip)
except:
block_ip = "Invalid"
print "Invalid input has been sent to us!!"
if block_ip != "Invalid":
if block_ip and len(block_ip) <= 15:
block_ip = block_ip
else:
block_ip = "Invalid"
print "Invalid input has been sent to us!!"
else:
block_ip = "Invalid"
print "Invalid input has been sent to us!!"
if block_ip != "Invalid":
subprocess.call(['/sbin/iptables', '-I', 'INPUT', '-s', block_ip, '-j', 'DROP'])
# Alternatively: iptables -I INPUT -s 8.8.8.8 -m time --utc --datestart 2017-08-23T00:00 --datestop 2099-08-25T00:00 -j DROP
else:
pass
return "0"
if __name__ == '__main__':
app.run('127.0.0.1',9000)
|
gpl-3.0
| 1,360,077,447,781,619,500 | 28.222222 | 128 | 0.569075 | false |
mahabs/nitro
|
nssrc/com/citrix/netscaler/nitro/resource/config/ssl/sslglobal_sslpolicy_binding.py
|
1
|
12604
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class sslglobal_sslpolicy_binding(base_resource) :
""" Binding class showing the sslpolicy that can be bound to sslglobal.
"""
def __init__(self) :
self._policyname = ""
self._type = ""
self._priority = 0
self._gotopriorityexpression = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self.___count = 0
@property
def priority(self) :
"""The priority of the policy binding.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""The priority of the policy binding.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policyname(self) :
"""The name for the SSL policy.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""The name for the SSL policy.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the virtual server or user-defined policy label to invoke if the policy evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
"""Name of the virtual server or user-defined policy label to invoke if the policy evaluates to TRUE.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a number that is larger than the largest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a number that is larger than the largest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def invoke(self) :
"""Invoke policies bound to a virtual server, service, or policy label. After the invoked policies are evaluated, the flow returns to the policy with the next priority.
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
"""Invoke policies bound to a virtual server, service, or policy label. After the invoked policies are evaluated, the flow returns to the policy with the next priority.
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def type(self) :
"""Global bind point to which the policy is bound.<br/>Possible values = CONTROL_OVERRIDE, CONTROL_DEFAULT, DATA_OVERRIDE, DATA_DEFAULT.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
"""Global bind point to which the policy is bound.<br/>Possible values = CONTROL_OVERRIDE, CONTROL_DEFAULT, DATA_OVERRIDE, DATA_DEFAULT
"""
try :
self._type = type
except Exception as e:
raise e
@property
def labeltype(self) :
"""Type of policy label to invoke. Specify virtual server for a policy label associated with a virtual server, or policy label for a user-defined policy label.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
"""Type of policy label to invoke. Specify virtual server for a policy label associated with a virtual server, or policy label for a user-defined policy label.
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(sslglobal_sslpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.sslglobal_sslpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = sslglobal_sslpolicy_binding()
updateresource.policyname = resource.policyname
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.type = resource.type
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [sslglobal_sslpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].policyname = resource[i].policyname
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].type = resource[i].type
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = sslglobal_sslpolicy_binding()
deleteresource.policyname = resource.policyname
deleteresource.type = resource.type
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [sslglobal_sslpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].type = resource[i].type
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
""" Use this API to fetch a sslglobal_sslpolicy_binding resources.
"""
try :
obj = sslglobal_sslpolicy_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
""" Use this API to fetch filtered set of sslglobal_sslpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslglobal_sslpolicy_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
""" Use this API to count sslglobal_sslpolicy_binding resources configued on NetScaler.
"""
try :
obj = sslglobal_sslpolicy_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
""" Use this API to count the filtered set of sslglobal_sslpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslglobal_sslpolicy_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Type:
CONTROL_OVERRIDE = "CONTROL_OVERRIDE"
CONTROL_DEFAULT = "CONTROL_DEFAULT"
DATA_OVERRIDE = "DATA_OVERRIDE"
DATA_DEFAULT = "DATA_DEFAULT"
class Labeltype:
vserver = "vserver"
service = "service"
policylabel = "policylabel"
class sslglobal_sslpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.sslglobal_sslpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.sslglobal_sslpolicy_binding = [sslglobal_sslpolicy_binding() for _ in range(length)]
|
apache-2.0
| -3,906,446,395,814,470,000 | 36.736527 | 430 | 0.730482 | false |
numirias/firefed
|
firefed/feature/bookmarks.py
|
1
|
2683
|
from attr import attrib, attrs
from firefed.feature import Feature, formatter
from firefed.output import csv_writer, good, out
from firefed.util import moz_to_unix_timestamp
DIRECTORY_TYPE = 2
@attrs
class Bookmark:
id = attrib()
parent = attrib()
type = attrib()
title = attrib()
guid = attrib()
added = attrib(converter=moz_to_unix_timestamp)
last_modified = attrib(converter=moz_to_unix_timestamp)
url = attrib()
@attrs
class Bookmarks(Feature):
"""List bookmarks."""
def prepare(self):
bmarks = self.load_sqlite(
db='places.sqlite',
query='''SELECT b.id, b.parent, b.type, b.title, b.guid, b.dateAdded,
b.lastModified, p.url FROM moz_bookmarks b LEFT JOIN moz_places p
ON b.fk = p.id
''',
cls=Bookmark,
column_map={
'lastModified': 'last_modified',
'dateAdded': 'added'
},
)
# Remove pseudo-bookmarks from list
bmarks = (b for b in bmarks if not str(b.url).startswith('place:'))
self.bmarks = bmarks
def summarize(self):
out('%d bookmarks found.' % len(list(self.bmarks)))
def run(self):
self.build_format()
@formatter('tree', default=True)
def tree(self):
bmarks = list(self.bmarks)
bmark_map = {b.id: b for b in bmarks}
def walk(node, depth=0):
if node.type == DIRECTORY_TYPE:
text = good('[%s]') % node.title
out('%s%s' % (depth * 4 * ' ', text))
else:
out('%s* %s' % (depth * 4 * ' ', node.title))
out('%s%s' % ((depth + 1) * 4 * ' ', node.url))
children = [n for n in bmarks if n.parent == node.id]
for child in children:
walk(child, depth + 1)
for bmark in bmarks:
try:
parent_guid = bmark_map[bmark.parent].guid
except KeyError:
continue
if bmark.title == '':
continue
if parent_guid != 'root________':
continue
walk(bmark)
@formatter('list')
def list(self):
for bmark in self.bmarks:
if not bmark.url:
continue
out('%s\n %s' % (bmark.title, bmark.url))
@formatter('csv')
def csv(self):
writer = csv_writer()
writer.writerow(('title', 'url', 'added', 'last_modified'))
for b in self.bmarks:
if not b.url:
continue
writer.writerow((b.title, b.url, b.added,
b.last_modified))
|
mit
| 7,441,449,831,997,626,000 | 28.483516 | 81 | 0.51025 | false |
plaidml/plaidml
|
demos/TransferLearning/TransferLearningDemo.py
|
1
|
10313
|
import argparse
import os
import warnings
import numpy as np
import tensorflow as tf
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from tensorflow import keras
from tensorflow.keras.applications import MobileNetV2, ResNet50
from tensorflow.keras.models import Sequential
from tensorflow.python.keras import optimizers
warnings.simplefilter('ignore')
class Demo:
# Cats & Dogs classes
NUM_CLASSES = 2
# RGB
CHANNELS = 3
RESNET50_POOLING_AVERAGE = 'avg'
DENSE_LAYER_ACTIVATION = 'softmax'
OBJECTIVE_FUNCTION = 'categorical_crossentropy'
# Common accuracy metric for all outputs, but can use different metrics for different output
LOSS_METRICS = ['accuracy']
SGD = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
IMAGE_SIZE = 224
IMAGE_SHAPE = (IMAGE_SIZE, IMAGE_SIZE, 3)
def __init__(self,
training=0,
warmup=0,
predict=0,
epochs=5,
batch_size=16,
model_name='ResNet50',
backend='CPU',
workers=1,
verbose=1,
callbacks=None):
self.epochs = epochs
self.batch_size = batch_size
self.workers = workers
self.verbose = verbose
# Images
self.train_generator = None
self.validation_generator = None
self.test_generator = None
self.test_class_indices = []
self.init_images()
self.setup_ngraph_bridge(backend=backend)
self.compile_model(model_name)
if training:
if warmup:
self.train(epochs=1, callbacks=callbacks)
h = self.train(epochs=self.epochs, callbacks=callbacks)
if predict:
p = self.predict(callbacks)
def init_images(self):
zip_file = tf.keras.utils.get_file(
origin=
"https://github.com/plaidml/depot/raw/master/datasets/cats_and_dogs_filtered.zip",
fname="cats_and_dogs_filtered.zip",
extract=True)
base_dir, _ = os.path.splitext(zip_file)
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
self.test_dir = os.path.join(base_dir, 'test')
# Directory with our training cat pictures
train_cats_dir = os.path.join(train_dir, 'cats')
# Directory with our training dog pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
# Directory with our validation cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
# Directory with our validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
# Directory with our test cat pictures
test_cats_dir = os.path.join(self.test_dir, 'cats')
# Directory with our test dog pictures
test_dogs_dir = os.path.join(self.test_dir, 'dogs')
# Preprocess images
train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
validation_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
#with self.out_stats:
# Flow training images in batches of 20 using train_datagen generator
self.train_generator = train_datagen.flow_from_directory(
train_dir, # Source directory for the training images
target_size=(self.IMAGE_SIZE, self.IMAGE_SIZE),
batch_size=self.batch_size,
class_mode='categorical')
# Flow validation images in batches of 20 using test_datagen generator
self.validation_generator = validation_datagen.flow_from_directory(
validation_dir, # Source directory for the validation images
target_size=(self.IMAGE_SIZE, self.IMAGE_SIZE),
batch_size=self.batch_size,
class_mode='categorical')
# Flow validation images in batches of 20 using test_datagen generator
self.test_generator = validation_datagen.flow_from_directory(
self.test_dir, # Source directory for the test images
target_size=(self.IMAGE_SIZE, self.IMAGE_SIZE),
batch_size=self.batch_size,
class_mode=None,
shuffle=False,
seed=42)
# Test Correct Values (0 Cat, 1 Dog)
for file in self.test_generator.filenames:
if "cat" in file:
self.test_class_indices.append(0)
elif "dog" in file:
self.test_class_indices.append(1)
else:
print("Error, unclassifiable image " + file)
def setup_ngraph_bridge(self, backend):
# Enviornment variables
os.environ['PLAIDML_USE_STRIPE'] = '1'
if self.workers < 1:
os.environ['OMP_NUM_THREADS'] = 1
else:
# Use default
if os.getenv('OMP_NUM_THREADS') is not None:
del os.environ['OMP_NUM_THREADS']
import ngraph_bridge
if backend == 'DISABLED' or backend == 'TF':
ngraph_bridge.disable()
elif backend == 'CPU':
ngraph_bridge.set_backend('CPU')
ngraph_bridge.enable()
elif backend == 'PLAIDML':
ngraph_bridge.set_backend('PLAIDML')
ngraph_bridge.enable()
else:
print("ERROR: Unsupported backend " + backend + " selected.")
def compile_model(self, model_name, fine=0):
if model_name == 'ResNet50':
self.base_model = ResNet50(pooling=self.RESNET50_POOLING_AVERAGE,
include_top=False,
weights='imagenet')
self.model = tf.keras.Sequential([
self.base_model,
keras.layers.Dense(self.NUM_CLASSES, activation=self.DENSE_LAYER_ACTIVATION)
])
elif model_name == 'MobileNet v2':
self.base_model = MobileNetV2(input_shape=self.IMAGE_SHAPE,
include_top=False,
weights='imagenet')
self.model = tf.keras.Sequential([
self.base_model,
keras.layers.GlobalAveragePooling2D(),
keras.layers.Dense(self.NUM_CLASSES, activation=self.DENSE_LAYER_ACTIVATION)
])
# Fine Tuning
if fine:
self.base_model.trainable = True
# Fine tune from this layer onwards
self.fine_tune_at = fine
# Freeze all the layers before the `fine_tune_at` layer
for layer in self.base_model.layers[:self.fine_tune_at]:
layer.trainable = False
else:
self.base_model.trainable = False
self.model.compile(optimizer=self.SGD,
loss=self.OBJECTIVE_FUNCTION,
metrics=self.LOSS_METRICS)
def train(self, epochs, callbacks=None):
steps_per_epoch = self.train_generator.n // self.batch_size
validation_steps = self.validation_generator.n // self.batch_size
history = self.model.fit_generator(self.train_generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
workers=self.workers,
validation_data=self.validation_generator,
validation_steps=validation_steps,
verbose=self.verbose,
callbacks=callbacks)
return history
def predict(self, callbacks=None):
probabilities = self.model.predict_generator(self.test_generator,
verbose=self.verbose,
workers=self.workers,
callbacks=callbacks)
return probabilities
def positive_int(v):
value = int(v)
if value < 0:
msg = '%r is not a positive value' % value
raise argparse.ArgumentTypeError(msg)
return value
def positive_nonzero_int(v):
value = int(v)
if value <= 0:
msg = '%r is not a positive non-zero value' % value
raise argparse.ArgumentTypeError(msg)
return value
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='TransferLearningDemo')
parser.add_argument('--training',
help='performs the training phase of the demo',
action='store_true')
parser.add_argument('--predict',
help='performs the inference phase of the demo',
action='store_true')
parser.add_argument(
'--network_type',
help='selects the network used for training/classification [ResNet50]/MobileNet V2',
default='ResNet50')
parser.add_argument(
'--backend',
help='selects the backend used for training/classification [CPU]/PLAIDML/TF]',
default='PLAIDML')
parser.add_argument('--quiet', help='disables most logging', action='store_false')
parser.add_argument('--epochs',
help='number of epochs to train',
type=positive_nonzero_int,
default=5)
parser.add_argument('--batch_size',
help='specify batch size for training',
type=positive_nonzero_int,
default=16)
parser.add_argument(
'--workers',
help='specify number of workers for threading; zero specifies main thread only',
type=positive_int,
default=1)
parser.add_argument('--warmup', help='warmup run for training', action='store_true')
args = parser.parse_args()
Demo(training=args.training,
warmup=args.warmup,
predict=args.predict,
epochs=args.epochs,
batch_size=args.batch_size,
model_name=args.network_type,
backend=args.backend,
workers=args.workers,
verbose=args.quiet)
|
apache-2.0
| -2,606,503,370,377,058,300 | 36.501818 | 96 | 0.569669 | false |
DongjunLee/kino-bot
|
kino/utils/state.py
|
1
|
2110
|
# -*- coding: utf-8 -*-
import arrow
from .arrow import ArrowUtil
from .data_handler import DataHandler
class State(object):
FLOW = "flow"
MEMORY = "memory"
ACTION = "action"
SLEEP = "sleep"
REST = "rest"
def __init__(self):
self.data_handler = DataHandler()
self.fname = "state.json"
self.current = None
def check(self):
self.current = self.data_handler.read_file(self.fname)
def save(self, key, value):
self.check()
self.current[key] = value
self.data_handler.write_file(self.fname, self.current)
def flow_start(self, class_name, func_name):
data = {"class": class_name, "def": func_name, "step": 1}
self.save(self.FLOW, data)
def flow_next_step(self, num=1):
self.check()
current_flow = self.current[self.FLOW]
step_num = current_flow["step"] + num
current_flow["step"] = step_num
self.data_handler.write_file(self.fname, self.current)
def flow_complete(self):
self.save(self.FLOW, {})
def memory_skill(self, text, func_name, params):
data = {"text": text, "class": "Functions", "def": func_name, "params": params}
self.save(self.MEMORY, data)
def do_action(self, event):
time = ArrowUtil.get_action_time(event["time"])
data = {"action": event["action"], "time": str(time)}
self.save(self.ACTION, data)
def presence_log(self, user, presence):
data = {"user": user, "presence": presence, "time": str(arrow.now())}
self.save(self.SLEEP, data)
def advice_rest(self, diff_min):
rest_mins = 0
if diff_min > 100:
rest_mins = 20
elif diff_min > 60:
rest_mins = 6 + diff_min // 10
now = arrow.now()
advice = now.shift(minutes=rest_mins)
data = {"time": str(advice), "try": False}
self.save(self.REST, data)
def advice_check(self):
self.check()
rest_state = self.current.get(self.REST)
rest_state["try"] = True
self.save(self.REST, rest_state)
|
mit
| 4,102,606,326,711,703,000 | 26.763158 | 87 | 0.575355 | false |
ReproducibleBuilds/diffoscope
|
tests/comparators/test_gnumeric.py
|
1
|
1739
|
# -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2018 Chris Lamb <lamby@debian.org>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
import pytest
from diffoscope.comparators.gnumeric import GnumericFile
from ..utils.data import load_fixture, get_data
from ..utils.tools import skip_unless_tools_exist
from ..utils.nonexisting import assert_non_existing
gnumeric1 = load_fixture('test1.gnumeric')
gnumeric2 = load_fixture('test2.gnumeric')
def test_identification(gnumeric1):
assert isinstance(gnumeric1, GnumericFile)
def test_no_differences(gnumeric1):
difference = gnumeric1.compare(gnumeric1)
assert difference is None
@pytest.fixture
def differences(gnumeric1, gnumeric2):
return gnumeric1.compare(gnumeric2).details
@skip_unless_tools_exist('ssconvert')
def test_diff(differences):
expected_diff = get_data('gnumeric_expected_diff')
assert differences[0].unified_diff == expected_diff
@skip_unless_tools_exist('ssconvert')
def test_compare_non_existing(monkeypatch, gnumeric1):
assert_non_existing(monkeypatch, gnumeric1, has_null_source=False)
|
gpl-3.0
| 4,844,339,935,804,039,000 | 31.185185 | 70 | 0.765823 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.