repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
melodous/designate
|
designate/backend/impl_powerdns/__init__.py
|
1
|
17862
|
# Copyright 2012 Hewlett-Packard Development Company, L.P. All Rights Reserved.
# Copyright 2012 Managed I.T.
#
# Author: Patrick Galbraith <patg@hp.com>
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import threading
from oslo.config import cfg
from oslo.db import options
from sqlalchemy.sql import select
from designate.openstack.common import excutils
from designate.openstack.common import log as logging
from designate.i18n import _LC
from designate import exceptions
from designate.backend import base
from designate.backend.impl_powerdns import tables
from designate.sqlalchemy import session
from designate.sqlalchemy.expressions import InsertFromSelect
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
TSIG_SUPPORTED_ALGORITHMS = ['hmac-md5']
CONF.register_group(cfg.OptGroup(
name='backend:powerdns', title="Configuration for Powerdns Backend"
))
CONF.register_opts([
cfg.StrOpt('domain-type', default='NATIVE', help='PowerDNS Domain Type'),
cfg.ListOpt('also-notify', default=[], help='List of additional IPs to '
'send NOTIFYs to'),
] + options.database_opts, group='backend:powerdns')
# Overide the default DB connection registered above, to avoid name conflicts
# between the Designate and PowerDNS databases.
CONF.set_default('connection', 'sqlite:///$state_path/powerdns.sqlite',
group='backend:powerdns')
def _map_col(keys, col):
return dict([(keys[i], col[i]) for i in range(len(keys))])
class PowerDNSBackend(base.Backend):
__plugin_name__ = 'powerdns'
def __init__(self, *args, **kwargs):
super(PowerDNSBackend, self).__init__(*args, **kwargs)
self.local_store = threading.local()
def start(self):
super(PowerDNSBackend, self).start()
@property
def session(self):
# NOTE: This uses a thread local store, allowing each greenthread to
# have it's own session stored correctly. Without this, each
# greenthread may end up using a single global session, which
# leads to bad things happening.
global LOCAL_STORE
if not hasattr(self.local_store, 'session'):
self.local_store.session = session.get_session(self.name)
return self.local_store.session
def _create(self, table, values):
query = table.insert()
resultproxy = self.session.execute(query, values)
# Refetch the row, for generated columns etc
query = select([table])\
.where(table.c.id == resultproxy.inserted_primary_key[0])
resultproxy = self.session.execute(query)
return _map_col(query.columns.keys(), resultproxy.fetchone())
def _update(self, table, values, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = table.update()\
.where(id_col == values[id_col.name])\
.values(**values)
resultproxy = self.session.execute(query)
if resultproxy.rowcount != 1:
raise exc_notfound()
# Refetch the row, for generated columns etc
query = select([table])\
.where(id_col == values[id_col.name])
resultproxy = self.session.execute(query)
return _map_col(query.columns.keys(), resultproxy.fetchone())
def _get(self, table, id_, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = select([table])\
.where(id_col == id_)
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
if len(results) != 1:
raise exc_notfound()
# Map col keys to values in result
return _map_col(query.columns.keys(), results[0])
def _delete(self, table, id_, exc_notfound, id_col=None):
if id_col is None:
id_col = table.c.id
query = table.delete()\
.where(id_col == id_)
resultproxy = self.session.execute(query)
if resultproxy.rowcount != 1:
raise exc_notfound()
# TSIG Key Methods
def create_tsigkey(self, context, tsigkey):
"""Create a TSIG Key"""
if tsigkey['algorithm'] not in TSIG_SUPPORTED_ALGORITHMS:
raise exceptions.NotImplemented('Unsupported algorithm')
values = {
'designate_id': tsigkey['id'],
'name': tsigkey['name'],
'algorithm': tsigkey['algorithm'],
'secret': base64.b64encode(tsigkey['secret'])
}
self._create(tables.tsigkeys, values)
# NOTE(kiall): Prepare and execute query to install this TSIG Key on
# every domain. We use a manual query here since anything
# else would be impossibly slow.
query_select = select([
tables.domains.c.id,
"'TSIG-ALLOW-AXFR'",
"'%s'" % tsigkey['name']]
)
columns = [
tables.domain_metadata.c.domain_id,
tables.domain_metadata.c.kind,
tables.domain_metadata.c.content,
]
query = InsertFromSelect(tables.domain_metadata, query_select,
columns)
# NOTE(kiall): A TX is required for, at the least, SQLite.
self.session.begin()
self.session.execute(query)
self.session.commit()
def update_tsigkey(self, context, tsigkey):
"""Update a TSIG Key"""
values = self._get(
tables.tsigkeys,
tsigkey['id'],
exceptions.TsigKeyNotFound,
id_col=tables.tsigkeys.c.designate_id)
# Store a copy of the original name..
original_name = values['name']
values.update({
'name': tsigkey['name'],
'algorithm': tsigkey['algorithm'],
'secret': base64.b64encode(tsigkey['secret'])
})
self._update(tables.tsigkeys, values,
id_col=tables.tsigkeys.c.designate_id,
exc_notfound=exceptions.TsigKeyNotFound)
# If the name changed, Update the necessary DomainMetadata records
if original_name != tsigkey['name']:
query = tables.domain_metadata.update()\
.where(tables.domain_metadata.c.kind == 'TSIG_ALLOW_AXFR')\
.where(tables.domain_metadata.c.content == original_name)
query.values(content=tsigkey['name'])
self.session.execute(query)
def delete_tsigkey(self, context, tsigkey):
"""Delete a TSIG Key"""
try:
# Delete this TSIG Key itself
self._delete(
tables.tsigkeys, tsigkey['id'],
exceptions.TsigKeyNotFound,
id_col=tables.tsigkeys.c.designate_id)
except exceptions.TsigKeyNotFound:
# If the TSIG Key is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a TSIG key which is '
'not present in the backend. ID: %s') %
tsigkey['id'])
return
query = tables.domain_metadata.delete()\
.where(tables.domain_metadata.c.kind == 'TSIG-ALLOW-AXFR')\
.where(tables.domain_metadata.c.content == tsigkey['name'])
self.session.execute(query)
# Domain Methods
def create_domain(self, context, domain):
try:
self.session.begin()
servers = self.central_service.find_servers(self.admin_context)
domain_values = {
'designate_id': domain['id'],
'name': domain['name'].rstrip('.'),
'master': servers[0]['name'].rstrip('.'),
'type': CONF['backend:powerdns'].domain_type,
'account': context.tenant
}
domain_ref = self._create(tables.domains, domain_values)
# Install all TSIG Keys on this domain
query = select([tables.tsigkeys.c.name])
resultproxy = self.session.execute(query)
values = [i for i in resultproxy.fetchall()]
self._update_domainmetadata(domain_ref['id'], 'TSIG-ALLOW-AXFR',
values)
# Install all Also Notify's on this domain
self._update_domainmetadata(domain_ref['id'], 'ALSO-NOTIFY',
CONF['backend:powerdns'].also_notify)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def update_domain(self, context, domain):
domain_ref = self._get(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
try:
self.session.begin()
# Update the Records TTLs where necessary
query = tables.records.update()\
.where(tables.records.c.domain_id == domain_ref['id'])
query = query.where(tables.records.c.inherit_ttl == True) # noqa\
query = query.values(ttl=domain['ttl'])
self.session.execute(query)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def delete_domain(self, context, domain):
try:
domain_ref = self._get(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
except exceptions.DomainNotFound:
# If the Domain is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a domain which is '
'not present in the backend. ID: %s') %
domain['id'])
return
self._delete(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
# Ensure the records are deleted
query = tables.records.delete()\
.where(tables.records.c.domain_id == domain_ref['id'])
self.session.execute(query)
# Ensure domainmetadata is deleted
query = tables.domain_metadata.delete()\
.where(tables.domain_metadata.c.domain_id == domain_ref['id'])
self.session.execute(query)
# RecordSet Methods
def create_recordset(self, context, domain, recordset):
try:
self.session.begin(subtransactions=True)
# Create all the records..
for record in recordset.records:
self.create_record(context, domain, recordset, record)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def update_recordset(self, context, domain, recordset):
# TODO(kiall): This is a total kludge. Intended as the simplest
# possible fix for the issue. This needs to be
# re-implemented correctly.
try:
self.session.begin(subtransactions=True)
self.delete_recordset(context, domain, recordset)
self.create_recordset(context, domain, recordset)
except Exception:
with excutils.save_and_reraise_exception():
self.session.rollback()
else:
self.session.commit()
def delete_recordset(self, context, domain, recordset):
# Ensure records are deleted
query = tables.records.delete()\
.where(tables.records.c.designate_recordset_id == recordset['id'])
self.session.execute(query)
# Record Methods
def create_record(self, context, domain, recordset, record):
domain_ref = self._get(tables.domains, domain['id'],
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
content = self._sanitize_content(recordset['type'], record['data'])
ttl = domain['ttl'] if recordset['ttl'] is None else recordset['ttl']
record_values = {
'designate_id': record['id'],
'designate_recordset_id': record['recordset_id'],
'domain_id': domain_ref['id'],
'name': recordset['name'].rstrip('.'),
'type': recordset['type'],
'content': content,
'ttl': ttl,
'inherit_ttl': True if recordset['ttl'] is None else False,
'prio': record['priority'],
'auth': self._is_authoritative(domain, recordset, record)
}
self._create(tables.records, record_values)
def update_record(self, context, domain, recordset, record):
record_ref = self._get_record(record['id'])
content = self._sanitize_content(recordset['type'], record['data'])
ttl = domain['ttl'] if recordset['ttl'] is None else recordset['ttl']
record_ref.update({
'content': content,
'ttl': ttl,
'inherit_ttl': True if recordset['ttl'] is None else False,
'prio': record['priority'],
'auth': self._is_authoritative(domain, recordset, record)
})
self._update(tables.records, record_ref,
exc_notfound=exceptions.RecordNotFound)
def delete_record(self, context, domain, recordset, record):
try:
record_ref = self._get(tables.records, record['id'],
exceptions.RecordNotFound,
id_col=tables.records.c.designate_id)
except exceptions.RecordNotFound:
# If the Record is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a record which is '
'not present in the backend. ID: %s') %
record['id'])
else:
self._delete(tables.records, record_ref['id'],
exceptions.RecordNotFound)
# Internal Methods
def _update_domainmetadata(self, domain_id, kind, values=None,
delete=True):
"""Updates a domain's metadata with new values"""
# Fetch all current metadata of the specified kind
values = values or []
query = select([tables.domain_metadata.c.content])\
.where(tables.domain_metadata.c.domain_id == domain_id)\
.where(tables.domain_metadata.c.kind == kind)
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
for metadata_id, content in results:
if content not in values:
if delete:
LOG.debug('Deleting stale domain metadata: %r' %
([domain_id, kind, content],))
# Delete no longer necessary values
# We should never get a notfound here, so UnknownFailure is
# a reasonable choice.
self._delete(tables.domain_metadata, metadata_id,
exceptions.UnknownFailure)
else:
# Remove pre-existing values from the list of values to insert
values.remove(content)
# Insert new values
for value in values:
LOG.debug('Inserting new domain metadata: %r' %
([domain_id, kind, value],))
self._create(
tables.domain_metadata,
{
"domain_id": domain_id,
"kind": kind,
"content": value
})
def _is_authoritative(self, domain, recordset, record):
# NOTE(kiall): See http://doc.powerdns.com/dnssec-modes.html
if recordset['type'] == 'NS' and recordset['name'] != domain['name']:
return False
else:
return True
def _sanitize_content(self, type, content):
if type in ('CNAME', 'MX', 'SRV', 'NS', 'PTR'):
return content.rstrip('.')
if type in ('TXT', 'SPF'):
return '"%s"' % content.replace('"', '\\"')
return content
def _get_record(self, record_id=None, domain=None, type_=None):
query = select([tables.records])
if record_id:
query = query.where(tables.records.c.designate_id == record_id)
if type_:
query = query.where(tables.records.c.type == type_)
if domain:
query = query.where(tables.records.c.domain_id == domain['id'])
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
if len(results) < 1:
raise exceptions.RecordNotFound('No record found')
elif len(results) > 1:
raise exceptions.RecordNotFound('Too many records found')
else:
return _map_col(query.columns.keys(), results[0])
|
apache-2.0
| -5,963,925,626,582,785,000 | 36.2125 | 79 | 0.573396 | false |
scottdangelo/RemoveVolumeMangerLocks
|
cinder/volume/drivers/netapp/dataontap/nfs_7mode.py
|
1
|
8344
|
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Bob Callaway. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp NFS storage.
"""
import os
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
"""NetApp NFS driver for Data ONTAP (7-mode)."""
def __init__(self, *args, **kwargs):
super(NetApp7modeNfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(na_opts.netapp_7mode_opts)
def do_setup(self, context):
"""Do the customized set up on client if any for 7 mode."""
super(NetApp7modeNfsDriver, self).do_setup(context)
self.zapi_client = client_7mode.Client(
transport_type=self.configuration.netapp_transport_type,
username=self.configuration.netapp_login,
password=self.configuration.netapp_password,
hostname=self.configuration.netapp_server_hostname,
port=self.configuration.netapp_server_port,
vfiler=self.configuration.netapp_vfiler)
self.ssc_enabled = False
def check_for_setup_error(self):
"""Checks if setup occurred properly."""
api_version = self.zapi_client.get_ontapi_version()
if api_version:
major, minor = api_version
if major == 1 and minor < 9:
msg = _("Unsupported Data ONTAP version."
" Data ONTAP version 7.3.1 and above is supported.")
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _("Data ONTAP API version could not be determined.")
raise exception.VolumeBackendAPIException(data=msg)
super(NetApp7modeNfsDriver, self).check_for_setup_error()
def _clone_backing_file_for_volume(self, volume_name, clone_name,
volume_id, share=None):
"""Clone backing file for Cinder volume."""
(_host_ip, export_path) = self._get_export_ip_path(volume_id, share)
storage_path = self.zapi_client.get_actual_path_for_export(export_path)
target_path = '%s/%s' % (storage_path, clone_name)
self.zapi_client.clone_file('%s/%s' % (storage_path, volume_name),
target_path)
def _update_volume_stats(self):
"""Retrieve stats info from vserver."""
self._ensure_shares_mounted()
LOG.debug('Updating volume stats')
data = {}
netapp_backend = 'NetApp_NFS_7mode_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or netapp_backend
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'nfs'
data['pools'] = self._get_pool_stats()
self._spawn_clean_cache_job()
self.zapi_client.provide_ems(self, netapp_backend, self._app_version,
server_type="7mode")
self._stats = data
def _get_pool_stats(self):
"""Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
pools = []
for nfs_share in self._mounted_shares:
capacity = self._get_share_capacity_info(nfs_share)
pool = dict()
pool['pool_name'] = nfs_share
pool['QoS_support'] = False
pool.update(capacity)
thick = not self.configuration.nfs_sparsed_volumes
pool['thick_provisioning_support'] = thick
pool['thin_provisioning_support'] = not thick
pools.append(pool)
return pools
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
file_list = []
(_, export_path) = self._get_export_ip_path(share=share)
exported_volume = self.zapi_client.get_actual_path_for_export(
export_path)
for old_file in old_files:
path = os.path.join(exported_volume, old_file)
u_bytes = self.zapi_client.get_file_usage(path)
file_list.append((old_file, u_bytes))
LOG.debug('Shortlisted files eligible for deletion: %s', file_list)
return file_list
def _is_filer_ip(self, ip):
"""Checks whether ip is on the same filer."""
try:
ifconfig = self.zapi_client.get_ifconfig()
if_info = ifconfig.get_child_by_name('interface-config-info')
if if_info:
ifs = if_info.get_children()
for intf in ifs:
v4_addr = intf.get_child_by_name('v4-primary-address')
if v4_addr:
ip_info = v4_addr.get_child_by_name('ip-address-info')
if ip_info:
address = ip_info.get_child_content('address')
if ip == address:
return True
else:
continue
except Exception:
return False
return False
def _share_match_for_ip(self, ip, shares):
"""Returns the share that is served by ip.
Multiple shares can have same dir path but
can be served using different ips. It finds the
share which is served by ip on same nfs server.
"""
if self._is_filer_ip(ip) and shares:
for share in shares:
ip_sh = share.split(':')[0]
if self._is_filer_ip(ip_sh):
LOG.debug('Share match found for ip %s', ip)
return share
LOG.debug('No share match found for ip %s', ip)
return None
def _is_share_clone_compatible(self, volume, share):
"""Checks if share is compatible with volume to host its clone."""
thin = self.configuration.nfs_sparsed_volumes
return self._share_has_space_for_clone(share, volume['size'], thin)
def _check_volume_type(self, volume, share, file_name, extra_specs):
"""Matches a volume type for share file."""
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
if qos_policy_group:
raise exception.ManageExistingVolumeTypeMismatch(
reason=(_("Setting file qos policy group is not supported"
" on this storage family and ontap version.")))
volume_type = na_utils.get_volume_type_from_volume(volume)
if volume_type and 'qos_spec_id' in volume_type:
raise exception.ManageExistingVolumeTypeMismatch(
reason=_("QoS specs are not supported"
" on this storage family and ONTAP version."))
def _do_qos_for_volume(self, volume, extra_specs, cleanup=False):
"""Set QoS policy on backend from volume type information."""
# 7-mode DOT does not support QoS.
return
|
apache-2.0
| -3,751,682,040,180,921,300 | 40.512438 | 79 | 0.607742 | false |
openstack/networking-plumgrid
|
networking_plumgrid/neutronclient/policy/policy_tag.py
|
1
|
5554
|
# Copyright 2016 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from networking_plumgrid._i18n import _
from neutronclient.common import extension
class PolicyTag(extension.NeutronClientExtension):
resource = 'policy_tag'
resource_plural = 'policy_tags'
path = 'policy-tags'
object_path = '/%s' % path
resource_path = '/%s/%%s' % path
versions = ['2.0']
def args2body(self, parsed_args):
try:
if parsed_args.name:
ptag_name = parsed_args.name
body = {'policy_tag': {'name': ptag_name}}
else:
body = {'policy_tag': {}}
if parsed_args.tag_type:
if (str(parsed_args.tag_type).lower() == 'fip' or
str(parsed_args.tag_type).lower() == 'dot1q' or
str(parsed_args.tag_type).lower() == 'nsh'):
body['policy_tag']['tag_type'] \
= parsed_args.tag_type
else:
raise Exception("Supported values for policy tag type are:"
" 'fip', 'dot1q', 'nsh'")
else:
raise Exception("Policy tag type is required to be specified. "
"Supported values for policy tag type are:"
" 'fip', 'dot1q', 'nsh'")
if parsed_args.tag_id:
body['policy_tag']['tag_id'] = parsed_args.tag_id
if parsed_args.router_id:
body['policy_tag']['router_id'] = parsed_args.router_id
if parsed_args.floatingip_id:
body['policy_tag']['floatingip_id'] = parsed_args.floatingip_id
if (parsed_args.tag_type and parsed_args.tag_type.lower() == 'fip'
and not parsed_args.floatingip_id):
raise Exception("Floating IP UUID must be specified when "
"using tag type=fip")
if (parsed_args.tag_type and (parsed_args.tag_type.lower() == 'dot1q'
or parsed_args.tag_type.lower() == 'nsh')
and not parsed_args.tag_id):
raise Exception("ID in range (257-2047) must be specified when "
"using tag type=dot1q or type=nsh")
if (parsed_args.router_id and parsed_args.tag_type.lower() != 'fip'):
raise Exception("Tag type='fip' must be specified when using "
"Router ID")
if (parsed_args.tag_type.lower() == 'fip' and parsed_args.tag_id):
raise Exception("Tag type=='fip' does not support tag id.")
if (parsed_args.floatingip_id and
parsed_args.tag_type.lower() != 'fip'):
raise Exception('Floating ip cannot be associated with tag type:'
+ parsed_args.tag_type.lower())
return body
except KeyError as err:
raise Exception("KeyError: " + str(err))
class PolicyTagCreate(extension.ClientExtensionCreate,
PolicyTag):
"""Create a Policy Tag."""
shell_command = 'policy-tag-create'
def add_known_arguments(self, parser):
parser.add_argument(
'name', metavar='<POLICY-TAG-NAME>',
help=_('Descriptive name for policy tag.'))
parser.add_argument('--type', dest='tag_type',
help=_('Type'
' of policy tag. Options:'
' fip, dot1q, nsh'))
parser.add_argument('--floating-ip', dest='floatingip_id',
help=_('UUID of Floating IP to associate '
' with the Policy Tag.'))
parser.add_argument('--tag-id', dest='tag_id',
help=_('ID in range 257-2047 '))
parser.add_argument('--router-id', dest='router_id',
help=_('Router ID to be specified in case '
'of multiple External Gateways, when '
'associating a Floating IP.'))
def args2body(self, parsed_args):
body = args2body(self, parsed_args)
if parsed_args.tenant_id:
(body['policy_tag']
['tenant_id']) = parsed_args.tenant_id
return body
class PolicyTagList(extension.ClientExtensionList,
PolicyTag):
"""List policy tags that belong to a given tenant."""
shell_command = 'policy-tag-list'
list_columns = ['id', 'name', 'tag_type', 'tag_id', 'floating_ip_address']
pagination_support = True
sorting_support = True
class PolicyTagShow(extension.ClientExtensionShow,
PolicyTag):
"""Show information of a given policy tag."""
shell_command = 'policy-tag-show'
class PolicyTagDelete(extension.ClientExtensionDelete,
PolicyTag):
"""Delete a given policy tag."""
shell_command = 'policy-tag-delete'
class PolicyTagUpdate(extension.ClientExtensionUpdate,
PolicyTag):
"""Update a given policy-tag."""
shell_command = 'policy-tag-update'
|
apache-2.0
| -8,675,215,597,151,297,000 | 38.956835 | 78 | 0.566619 | false |
friend0/tower
|
tower/map/space.py
|
1
|
4589
|
# coding=utf-8
"""
Region will serve as an abstract base class (ABC) to implement a standard interface amongst both Map and Surface objects
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import abc
import collections
from future.utils import with_metaclass
from builtins import *
class Space(with_metaclass(abc.ABCMeta, object)):
"""
`Space` represents a base class for flat, three-dimensional space. Concrete implementations of Space, will
implement abstractions like curved space (geodetic map) while exposing only fundamental abstractions of flat space
to planning algorithms.
Space defines as attribute the notion of a Point. Concrete implementations of Space may extend this idea to
geographic coordinates, and etc., for example by making `lat` a property of the class `Map,` implementing Space.
Eg.
Space has a class attribute `Point`, which provides the Cartesian idea of a point in a plane
For our purposes, we'd like to align Optitrack's origin with that of the Surface defined by height-field
or function. For Maps, we'd like to align the origin to some coordinate representing the center of the
geographic region covered by the Tower.
We take standard world coordinates as our convention. This means delta(y) is proportional to delta(lat)
and that delta(x) corresponds to delta(lon). The relations between these quantities is abstracted
"""
# todo: include auto conversion dictionary, i.e. enable user to request target unit conversion from base unit
@abc.abstractproperty
def units(self):
"""
A point should travel with its units, in case it needs to be converted
:return:
"""
pass
@abc.abstractproperty
def x(self):
"""
Define how we refer to the x axis in concrete implementation
:return: A string corresponding to x axis in concrete implementation. For example, a map
implementation could expose a point's longitude through the x variable by returning 'lon'
"""
pass
@abc.abstractproperty
def y(self):
"""
Define how we refer to the x axis in concrete implementation
:return: A string corresponding to y axis in concrete implementation. For example, a map
implementation could expose a point's longitude through the y variable by returning 'lat'
"""
pass
@abc.abstractproperty
def name(self):
"""
"""
pass
@abc.abstractmethod
def point(self):
"""
The point function is a named-tuple factory that wraps the underlying `point` abstraction of a space into
a universal container with x first, followed by y, and then the units. This gives users of the Space ABC
a way to define x and y once, then retrive a custom named-tuple object that is universally indexed by
[x, y, units], allowing them to be passed around with well-defined compatibility criteria.
A Map implementation of a space might do:
Coord = Map.point('Coordinate')
With `x` and `y` defined appropriately as 'lon' and 'lat' respectively, we could do:
point_a = Coord('lon'=-122.0264, 'lat=36.9741')
:param name: Provide a custom name for `point`, default is `Point`
:return: A named tuple with fields corresponding to x, y, and units for concrete implementation of Space
"""
return collections.namedtuple(self.name, [self.x, self.y, self.units])
@abc.abstractproperty
def origin(self):
"""
¯\_(ツ)_/¯
:return:
"""
pass
@abc.abstractmethod
def get_point_elevation(self):
pass
@abc.abstractmethod
def get_distance_between(self, point_a, point_b, *args, **kwargs):
"""
:return: the distance between two
"""
pass
@abc.abstractmethod
def get_edge(self, from_, to):
"""
Sample data between two points
:return: An array of points
"""
pass
@abc.abstractmethod
def get_elevation_along_edge(self, from_, to):
"""
Take as input a edge, which is an iterable of points, and get a set of elevations corresponding to
the elevations at those points.
:return: An iterable of the same length as input, where each output corresponds to the input coordinate given
in the se
"""
pass
@abc.abstractmethod
def get_surrounding_elevation(self):
pass
|
isc
| -5,526,841,282,349,837,000 | 29.364238 | 120 | 0.663468 | false |
tensorflow/models
|
official/nlp/data/create_xlnet_pretraining_data_test.py
|
1
|
10930
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.data.create_xlnet_pretraining_data."""
import os
import tempfile
from typing import List
from absl import logging
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.data import create_xlnet_pretraining_data as cpd
_VOCAB_WORDS = ["vocab_1", "vocab_2"]
# pylint: disable=invalid-name
def _create_files(
temp_dir: str, file_contents: List[List[str]]) -> List[str]:
"""Writes arbitrary documents into files."""
root_dir = tempfile.mkdtemp(dir=temp_dir)
files = []
for i, file_content in enumerate(file_contents):
destination = os.path.join(root_dir, "%d.txt" % i)
with open(destination, "wb") as f:
for line in file_content:
f.write(line.encode("utf-8"))
files.append(destination)
return files
def _get_mock_tokenizer():
"""Creates a mock tokenizer."""
class MockSpieceModel:
"""Mock Spiece model for testing."""
def __init__(self):
self._special_piece_to_id = {
"<unk>": 0,
}
for piece in set(list('!"#$%&\"()*+,-./:;?@[\\]^_`{|}~')):
self._special_piece_to_id[piece] = 1
def EncodeAsPieces(self, inputs: str) -> List[str]:
return inputs
def SampleEncodeAsPieces(self,
inputs: str,
nbest_size: int,
theta: float) -> List[str]:
del nbest_size, theta
return inputs
def PieceToId(self, piece: str) -> int:
return ord(piece[0])
def IdToPiece(self, id_: int) -> str:
return chr(id_) * 3
class Tokenizer:
"""Mock Tokenizer for testing."""
def __init__(self):
self.sp_model = MockSpieceModel()
def convert_ids_to_tokens(self, ids: List[int]) -> List[str]:
return [self.sp_model.IdToPiece(id_) for id_ in ids]
return Tokenizer()
class PreprocessDataTest(tf.test.TestCase):
def test_remove_extraneous_space(self):
line = " abc "
output = cpd._preprocess_line(line)
self.assertEqual(output, "abc")
def test_symbol_replacements(self):
self.assertEqual(cpd._preprocess_line("``abc``"), "\"abc\"")
self.assertEqual(cpd._preprocess_line("''abc''"), "\"abc\"")
def test_accent_replacements(self):
self.assertEqual(cpd._preprocess_line("åbc"), "abc")
def test_lower_case(self):
self.assertEqual(cpd._preprocess_line("ABC", do_lower_case=True), "abc")
def test_end_to_end(self):
self.assertEqual(
cpd._preprocess_line("HelLo ``wórLd``", do_lower_case=True),
"hello \"world\"")
class PreprocessAndTokenizeFilesTest(tf.test.TestCase):
def test_basic_end_to_end(self):
documents = [
[
"This is sentence 1.\n",
"This is sentence 2.\n",
"Sentence 3 is what this is.\n",
],
[
"This is the second document.\n",
"This is the second line of the second document.\n"
],
]
input_files = _create_files(temp_dir=self.get_temp_dir(),
file_contents=documents)
all_data = cpd.preprocess_and_tokenize_input_files(
input_files=input_files,
tokenizer=_get_mock_tokenizer(),
log_example_freq=1)
self.assertEqual(len(all_data), len(documents))
for token_ids, sentence_ids in all_data:
self.assertEqual(len(token_ids), len(sentence_ids))
def test_basic_correctness(self):
documents = [["a\n", "b\n", "c\n"]]
input_files = _create_files(temp_dir=self.get_temp_dir(),
file_contents=documents)
all_data = cpd.preprocess_and_tokenize_input_files(
input_files=input_files,
tokenizer=_get_mock_tokenizer(),
log_example_freq=1)
token_ids, sentence_ids = all_data[0]
self.assertAllClose(token_ids, [97, 98, 99])
self.assertAllClose(sentence_ids, [True, False, True])
def test_correctness_with_spaces_and_accents(self):
documents = [[
" å \n",
"b \n",
" c \n",
]]
input_files = _create_files(temp_dir=self.get_temp_dir(),
file_contents=documents)
all_data = cpd.preprocess_and_tokenize_input_files(
input_files=input_files,
tokenizer=_get_mock_tokenizer(),
log_example_freq=1)
token_ids, sentence_ids = all_data[0]
self.assertAllClose(token_ids, [97, 98, 99])
self.assertAllClose(sentence_ids, [True, False, True])
class BatchReshapeTests(tf.test.TestCase):
def test_basic_functionality(self):
per_host_batch_size = 3
mock_shape = (20,)
# Should truncate and reshape.
expected_result_shape = (3, 6)
tokens = np.zeros(mock_shape)
sentence_ids = np.zeros(mock_shape)
reshaped_data = cpd._reshape_to_batch_dimensions(
tokens=tokens,
sentence_ids=sentence_ids,
per_host_batch_size=per_host_batch_size)
for values in reshaped_data:
self.assertEqual(len(values.flatten()) % per_host_batch_size, 0)
self.assertAllClose(values.shape, expected_result_shape)
class CreateSegmentsTest(tf.test.TestCase):
def test_basic_functionality(self):
data_length = 10
tokens = np.arange(data_length)
sentence_ids = np.concatenate([np.zeros(data_length // 2),
np.ones(data_length // 2)])
begin_index = 0
total_length = 8
a_data, b_data, label = cpd._create_a_and_b_segments(
tokens=tokens,
sentence_ids=sentence_ids,
begin_index=begin_index,
total_length=total_length,
no_cut_probability=0.)
self.assertAllClose(a_data, [0, 1, 2, 3])
self.assertAllClose(b_data, [5, 6, 7, 8])
self.assertEqual(label, 1)
def test_no_cut(self):
data_length = 10
tokens = np.arange(data_length)
sentence_ids = np.zeros(data_length)
begin_index = 0
total_length = 8
a_data, b_data, label = cpd._create_a_and_b_segments(
tokens=tokens,
sentence_ids=sentence_ids,
begin_index=begin_index,
total_length=total_length,
no_cut_probability=0.)
self.assertGreater(len(a_data), 0)
self.assertGreater(len(b_data), 0)
self.assertEqual(label, 0)
def test_no_cut_with_probability(self):
data_length = 10
tokens = np.arange(data_length)
sentence_ids = np.concatenate([np.zeros(data_length // 2),
np.ones(data_length // 2)])
begin_index = 0
total_length = 8
a_data, b_data, label = cpd._create_a_and_b_segments(
tokens=tokens,
sentence_ids=sentence_ids,
begin_index=begin_index,
total_length=total_length,
no_cut_probability=1.)
self.assertGreater(len(a_data), 0)
self.assertGreater(len(b_data), 0)
self.assertEqual(label, 0)
class CreateInstancesTest(tf.test.TestCase):
"""Tests conversions of Token/Sentence IDs to training instances."""
def test_basic(self):
data_length = 12
tokens = np.arange(data_length)
sentence_ids = np.zeros(data_length)
seq_length = 8
instances = cpd._convert_tokens_to_instances(
tokens=tokens,
sentence_ids=sentence_ids,
per_host_batch_size=2,
seq_length=seq_length,
reuse_length=4,
tokenizer=_get_mock_tokenizer(),
bi_data=False,
num_cores_per_host=1,
logging_frequency=1)
for instance in instances:
self.assertEqual(len(instance.data), seq_length)
self.assertEqual(len(instance.segment_ids), seq_length)
self.assertIsInstance(instance.label, int)
self.assertIsInstance(instance.boundary_indices, list)
class TFRecordPathTests(tf.test.TestCase):
def test_basic(self):
base_kwargs = dict(
per_host_batch_size=1,
num_cores_per_host=1,
seq_length=2,
reuse_length=1)
config1 = dict(
prefix="test",
suffix="",
bi_data=True,
use_eod_token=False,
do_lower_case=True)
config1.update(base_kwargs)
expectation1 = "test_seqlen-2_reuse-1_bs-1_cores-1_uncased_bi.tfrecord"
self.assertEqual(cpd.get_tfrecord_name(**config1), expectation1)
config2 = dict(
prefix="",
suffix="test",
bi_data=False,
use_eod_token=False,
do_lower_case=False)
config2.update(base_kwargs)
expectation2 = "seqlen-2_reuse-1_bs-1_cores-1_cased_uni_test.tfrecord"
self.assertEqual(cpd.get_tfrecord_name(**config2), expectation2)
config3 = dict(
prefix="",
suffix="",
use_eod_token=True,
bi_data=False,
do_lower_case=True)
config3.update(base_kwargs)
expectation3 = "seqlen-2_reuse-1_bs-1_cores-1_uncased_eod_uni.tfrecord"
self.assertEqual(cpd.get_tfrecord_name(**config3), expectation3)
class TestCreateTFRecords(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
("bi_data_only", True, False, False),
("eod_token_only", False, True, True),
("lower_case_only", False, False, True),
("all_enabled", True, True, True),
)
def test_end_to_end(self,
bi_data: bool,
use_eod_token: bool,
do_lower_case: bool):
tokenizer = _get_mock_tokenizer()
num_documents = 5
sentences_per_document = 10
document_length = 50
documents = [
["a " * document_length for _ in range(sentences_per_document)]
for _ in range(num_documents)]
save_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
files = _create_files(temp_dir=self.get_temp_dir(), file_contents=documents)
cpd.create_tfrecords(
tokenizer=tokenizer,
input_file_or_files=",".join(files),
use_eod_token=use_eod_token,
do_lower_case=do_lower_case,
per_host_batch_size=8,
seq_length=8,
reuse_length=4,
bi_data=bi_data,
num_cores_per_host=2,
save_dir=save_dir)
self.assertTrue(any(filter(lambda x: x.endswith(".json"),
os.listdir(save_dir))))
self.assertTrue(any(filter(lambda x: x.endswith(".tfrecord"),
os.listdir(save_dir))))
if __name__ == "__main__":
np.random.seed(0)
logging.set_verbosity(logging.INFO)
tf.test.main()
|
apache-2.0
| 8,178,356,071,302,560,000 | 29.780282 | 80 | 0.617004 | false |
anselal/antminer-monitor
|
antminermonitor/app.py
|
1
|
3074
|
from flask import Flask
from antminermonitor.blueprints.asicminer import antminer, antminer_json
from antminermonitor.blueprints.user import user
from antminermonitor.extensions import login_manager, migrate
from antminermonitor.blueprints.asicminer.models.miner import Miner
from antminermonitor.blueprints.asicminer.models.settings import Settings
from antminermonitor.blueprints.user.models import User
from antminermonitor.database import db_session, init_db
import logging
import os
basedir = os.path.abspath(os.path.dirname(__file__))
def create_app(script_info=None, settings_override=None):
"""
Create a Flask application using the app factory pattern.
:return: Flask app
"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_object('config.settings')
app.config.from_pyfile('settings.py', silent=True)
if settings_override:
app.config.update(settings_override)
app.register_blueprint(antminer)
app.register_blueprint(antminer_json)
app.register_blueprint(user, url_prefix='/user')
authentication(app, User)
extensions(app)
@app.shell_context_processor
def make_shell_context():
return dict(app=app, db=db, Miner=Miner, Settings=Settings, User=User)
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
return app
def create_logger(app=None):
"""
"""
app = app or create_app()
gunicorn_error_logger = logging.getLogger('gunicorn.error')
app.logger.handlers.extend(gunicorn_error_logger.handlers)
app.logger.setLevel(app.config['LOG_LEVEL'])
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
# create a file handler
handler = logging.FileHandler(os.path.join(
basedir, 'logs/antminer_monitor.log'), mode='a') # mode 'a' is default
handler.setLevel(logging.WARNING)
# create a logging format
formatter = logging.Formatter(
'%(asctime)s | %(name)s | %(levelname)s | %(message)s')
handler.setFormatter(formatter)
# add handlers to the logger
logger.addHandler(handler)
return logger
def extensions(app):
"""
Register 0 or more extensions (mutates the app passed in).
:param app: Flask application instance
:return: None
"""
login_manager.init_app(app)
migrate.init_app(app, db_session)
return
def authentication(app, user_model):
"""
Initialize the Flask-Login extension (mutates the app passed in).
:param app: Flask application instance
:param user_model: Model that contains the authentication information
:type user_model: SQLAlchemy model
:return: None
"""
login_manager.login_view = 'user.login'
# login_manager.login_message = ''
login_manager.refresh_view = 'user.login'
login_manager.needs_refresh_message = 'You need to login again to access'
' this page!!!'
@login_manager.user_loader
def load_user(uid):
return user_model.query.get(uid)
|
gpl-3.0
| 268,709,785,204,979,900 | 27.728972 | 79 | 0.704945 | false |
dschien/simple-MC
|
simplemc/__init__.py
|
1
|
3544
|
'''
simple-MC: Main module
Copyright 2014, Dan Schien
Licensed under MIT.
'''
import importlib
import xlrd
__author__ = 'schien'
NAME = 'name'
TYPE = 'type'
PARAM_A = 'param_a'
PARAM_B = 'param_b'
PARAM_C = 'param_c'
MODULE = 'module'
LABEL = 'label'
UNIT = 'unit'
TABLE_STRUCT = {
NAME: 0,
MODULE: 1,
TYPE: 2,
PARAM_A: 3,
PARAM_B: 4,
PARAM_C: 5,
UNIT: 6,
LABEL: 7,
'comment': 8,
'description': 9
}
class ModelLoader(object):
def __init__(self, file, size=1):
self.wb = load_workbook(file)
self.size = size
def get_row(self, name):
i = [row[TABLE_STRUCT[NAME]] for row in self.wb].index(name)
return self.wb[i]
def get_val(self, name, args=None):
"""
Apply function to arguments from excel table
args: optional additonal args
If no args are given, applies default size from constructor
"""
row = self.get_row(name)
f, p = build_distribution(row)
if args is not None:
ret = f(*p, **args)
assert ret.shape == (self.size,)
return ret
else:
ret = f(*p, size=self.size)
assert ret.shape == (self.size,)
return ret
def get_label(self, name):
try:
row = self.get_row(name)
except:
return name
return row[TABLE_STRUCT[LABEL]]
def get_property(self, name, prop):
try:
row = self.get_row(name)
except:
return name
return row[TABLE_STRUCT[prop]]
def __getitem__(self, name):
"""
Get the distribution for a item name from the table
Then execute and return the result array
"""
return self.get_val(name)
def build_distribution(row):
module = importlib.import_module(row[TABLE_STRUCT[MODULE]])
func = getattr(module, row[TABLE_STRUCT[TYPE]])
if row[TABLE_STRUCT[TYPE]] == 'choice':
cell = row[TABLE_STRUCT[PARAM_A]]
if type(cell) in [float, int]:
params = ([cell],)
else:
tokens = cell.split(',')
params = [float(token.strip()) for token in tokens]
params = (params, )
elif row[TABLE_STRUCT[TYPE]] == 'Distribution':
func = func()
params = tuple(row[TABLE_STRUCT[i]] for i in [PARAM_A, PARAM_B, PARAM_C] if row[TABLE_STRUCT[i]])
else:
params = tuple(row[TABLE_STRUCT[i]] for i in [PARAM_A, PARAM_B, PARAM_C] if row[TABLE_STRUCT[i]])
return func, params
def load_workbook(file):
wb = xlrd.open_workbook(file)
sh = wb.sheet_by_index(0)
var_column = sh.col_values(TABLE_STRUCT[NAME])
module_column = sh.col_values(TABLE_STRUCT[MODULE])
distribution_type_column = sh.col_values(TABLE_STRUCT[TYPE])
param_a_colum = sh.col_values(TABLE_STRUCT[PARAM_A])
param_b_colum = sh.col_values(TABLE_STRUCT[PARAM_B])
param_c_colum = sh.col_values(TABLE_STRUCT[PARAM_C])
unit_colum = sh.col_values(TABLE_STRUCT[UNIT])
label_colum = sh.col_values(TABLE_STRUCT[LABEL])
rows_es = zip(var_column, module_column, distribution_type_column, param_a_colum, param_b_colum, param_c_colum,
unit_colum, label_colum)
return rows_es
def main():
'''
Main function of the boilerplate code is the entry point of the 'simplemc' executable script (defined in setup.py).
Use doctests, those are very helpful.
>>> main()
Hello
>>> 2 + 2
4
'''
print("Hello")
|
mit
| -6,855,969,719,708,827,000 | 24.681159 | 119 | 0.579007 | false |
wakatime/wakatime
|
tests/test_dependencies.py
|
1
|
16387
|
# -*- coding: utf-8 -*-
from wakatime.main import execute
from wakatime.packages import requests
import logging
import os
import time
import shutil
from testfixtures import log_capture
from wakatime.compat import is_py26, u
from wakatime.constants import SUCCESS
from wakatime.exceptions import NotYetImplemented
from wakatime.dependencies import DependencyParser, TokenParser
from wakatime.stats import get_lexer_by_name
from .utils import mock, ANY, CustomResponse, TemporaryDirectory, TestCase
if is_py26:
from wakatime.packages.py26.pygments.lexers import ClassNotFound, PythonLexer
else:
from wakatime.packages.py27.pygments.lexers import ClassNotFound, PythonLexer
class DependenciesTestCase(TestCase):
patch_these = [
'wakatime.packages.requests.adapters.HTTPAdapter.send',
'wakatime.offlinequeue.Queue.push',
['wakatime.offlinequeue.Queue.pop', None],
['wakatime.offlinequeue.Queue.connect', None],
'wakatime.session_cache.SessionCache.save',
'wakatime.session_cache.SessionCache.delete',
['wakatime.session_cache.SessionCache.get', requests.session],
['wakatime.session_cache.SessionCache.connect', None],
]
def shared(self, expected_dependencies=[], expected_language=ANY, expected_lines=ANY, entity='', config='good_config.cfg', extra_args=[]):
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
config = os.path.join('tests/samples/configs', config)
with TemporaryDirectory() as tempdir:
shutil.copy(os.path.join('tests/samples/codefiles', entity), os.path.join(tempdir, os.path.basename(entity)))
entity = os.path.realpath(os.path.join(tempdir, os.path.basename(entity)))
now = u(int(time.time()))
args = ['--file', entity, '--config', config, '--time', now] + extra_args
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
heartbeat = {
'language': expected_language,
'lines': expected_lines,
'entity': os.path.realpath(entity),
'project': ANY,
'branch': ANY,
'dependencies': expected_dependencies,
'time': float(now),
'type': 'file',
'is_write': False,
'user_agent': ANY,
}
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
def test_token_parser(self):
with self.assertRaises(NotYetImplemented):
source_file = 'tests/samples/codefiles/c_only/non_empty.h'
parser = TokenParser(source_file)
parser.parse()
with mock.patch('wakatime.dependencies.TokenParser._extract_tokens') as mock_extract_tokens:
source_file = 'tests/samples/codefiles/see.h'
parser = TokenParser(source_file)
parser.tokens
mock_extract_tokens.assert_called_once_with()
parser = TokenParser(None)
parser.append('one.two.three', truncate=True, truncate_to=1)
parser.append('one.two.three', truncate=True, truncate_to=2)
parser.append('one.two.three', truncate=True, truncate_to=3)
parser.append('one.two.three', truncate=True, truncate_to=4)
expected = [
'one',
'one.two',
'one.two.three',
'one.two.three',
]
self.assertEquals(parser.dependencies, expected)
@log_capture()
def test_dependency_parser(self, logs):
logging.disable(logging.NOTSET)
lexer = PythonLexer
lexer.__class__.__name__ = 'FooClass'
parser = DependencyParser(None, lexer)
dependencies = parser.parse()
log_output = u("\n").join([u(' ').join(x) for x in logs.actual()])
self.assertEquals(log_output, '')
self.assertNothingPrinted()
expected = []
self.assertEquals(dependencies, expected)
@log_capture()
def test_missing_dependency_parser_in_debug_mode(self, logs):
logging.disable(logging.NOTSET)
# turn on debug mode
log = logging.getLogger('WakaTime')
log.setLevel(logging.DEBUG)
lexer = PythonLexer
lexer.__class__.__name__ = 'FooClass'
parser = DependencyParser(None, lexer)
# parse dependencies
dependencies = parser.parse()
log_output = u("\n").join([u(' ').join(x) for x in logs.actual()])
expected = 'WakaTime DEBUG Parsing dependencies not supported for python.FooClass'
self.assertEquals(log_output, expected)
self.assertNothingPrinted()
expected = []
self.assertEquals(dependencies, expected)
@log_capture()
def test_missing_dependency_parser_importerror_in_debug_mode(self, logs):
logging.disable(logging.NOTSET)
# turn on debug mode
log = logging.getLogger('WakaTime')
log.setLevel(logging.DEBUG)
with mock.patch('wakatime.dependencies.import_module') as mock_import:
mock_import.side_effect = ImportError('foo')
lexer = PythonLexer
lexer.__class__.__name__ = 'FooClass'
parser = DependencyParser(None, lexer)
# parse dependencies
dependencies = parser.parse()
log_output = u("\n").join([u(' ').join(x) for x in logs.actual()])
expected = 'WakaTime DEBUG Parsing dependencies not supported for python.FooClass'
self.assertEquals(log_output, expected)
self.assertNothingPrinted()
expected = []
self.assertEquals(dependencies, expected)
def test_io_error_suppressed_when_parsing_dependencies(self):
with mock.patch('wakatime.dependencies.open') as mock_open:
mock_open.side_effect = IOError('')
self.shared(
expected_dependencies=[],
expected_language='Python',
expected_lines=38,
entity='python.py',
)
def test_classnotfound_error_raised_when_passing_none_to_pygments(self):
with self.assertRaises(ClassNotFound):
get_lexer_by_name(None)
def test_classnotfound_error_suppressed_when_parsing_dependencies(self):
with mock.patch('wakatime.stats.guess_lexer_using_filename') as mock_guess:
mock_guess.return_value = (None, None)
with mock.patch('wakatime.stats.get_filetype_from_buffer') as mock_filetype:
mock_filetype.return_value = 'foo'
self.shared(
expected_dependencies=[],
expected_language=None,
expected_lines=38,
entity='python.py',
)
def test_dependencies_still_detected_when_alternate_language_used(self):
with mock.patch('wakatime.stats.guess_lexer') as mock_guess_lexer:
mock_guess_lexer.return_value = None
self.shared(
expected_dependencies=[
'app',
'django',
'first',
'flask',
'jinja',
'mock',
'pygments',
'second',
'simplejson',
'sqlalchemy',
'unittest',
],
expected_language='Python',
expected_lines=38,
entity='python.py',
extra_args=['--alternate-language', 'PYTHON'],
)
def test_long_dependencies_removed(self):
self.shared(
expected_dependencies=[
'django',
'flask',
'notlongenoughnotlongenoughnotlongenoughnotlongenoughnotlongenoughnotlongenoughnotlongenoughnotlongenoughnotlongenoughnotlongenoughnotlongenoughnotlongenoughnotlongenoughnotlongenoughnotlongenoughnotlo',
],
expected_language='Python',
expected_lines=20,
entity='python_with_long_import.py',
)
def test_python_dependencies_detected(self):
self.shared(
expected_dependencies=[
'app',
'django',
'first',
'flask',
'jinja',
'mock',
'pygments',
'second',
'simplejson',
'sqlalchemy',
'unittest',
],
expected_language='Python',
expected_lines=38,
entity='python.py',
)
def test_bower_dependencies_detected(self):
self.shared(
expected_dependencies=[
'bootstrap',
'bootstrap-daterangepicker',
'moment',
'moment-timezone',
'bower',
'animate.css',
],
expected_language='JSON',
expected_lines=11,
entity='bower.json',
)
def test_grunt_dependencies_detected(self):
self.shared(
expected_dependencies=[
'grunt',
],
expected_language=None,
expected_lines=23,
entity='Gruntfile',
)
def test_java_dependencies_detected(self):
self.shared(
expected_dependencies=[
'colorfulwolf.webcamapplet',
'foobar',
'googlecode.javacv',
'apackage.something',
'anamespace.other',
],
expected_language='Java',
expected_lines=22,
entity='java.java',
)
def test_c_dependencies_detected(self):
self.shared(
expected_dependencies=[
'openssl',
],
expected_language='C',
expected_lines=8,
entity='c_only/non_empty.c',
)
def test_cpp_dependencies_detected(self):
self.shared(
expected_dependencies=[
'openssl',
],
expected_language='C++',
expected_lines=8,
entity='c_and_cpp/non_empty.cpp',
)
def test_csharp_dependencies_detected(self):
self.shared(
expected_dependencies=[
'Proper',
'Fart',
'Math',
'WakaTime',
],
expected_language='C#',
expected_lines=18,
entity='csharp/seesharp.cs',
)
def test_php_dependencies_detected(self):
self.shared(
expected_dependencies=[
'Interop',
'FooBarOne',
'FooBarTwo',
'FooBarThree',
'FooBarFour',
'FooBarSeven',
'FooBarEight',
'ArrayObject',
"'ServiceLocator.php'",
"'ServiceLocatorTwo.php'",
],
expected_language='PHP',
expected_lines=116,
entity='php.php',
)
def test_php_in_html_dependencies_detected(self):
self.shared(
expected_dependencies=[
'"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"',
],
expected_language='HTML',
expected_lines=22,
entity='html-with-php.html',
)
def test_html_django_dependencies_detected(self):
self.shared(
expected_dependencies=[
'"libs/json2.js"',
],
expected_language='HTML',
expected_lines=40,
entity='html-django.html',
)
def test_go_dependencies_detected(self):
self.shared(
expected_dependencies=[
'"compress/gzip"',
'"direct"',
'"foobar"',
'"github.com/golang/example/stringutil"',
'"image/gif"',
'"log"',
'"math"',
'"oldname"',
'"os"',
'"supress"',
],
expected_language='Go',
expected_lines=24,
entity='go.go',
)
def test_es6_dependencies_detected(self):
self.shared(
expected_dependencies=[
'bravo',
'foxtrot',
'india',
'kilo',
'november',
'oscar',
'quebec',
'tango',
'uniform',
'victor',
'whiskey',
],
expected_language='JavaScript',
expected_lines=37,
entity='es6.js',
)
def test_typescript_dependencies_detected(self):
self.shared(
expected_dependencies=[
'bravo',
'foxtrot',
'india',
'kilo',
'november',
'oscar',
'quebec',
'tango',
'uniform',
'victor',
'whiskey',
],
expected_language='TypeScript',
expected_lines=37,
entity='typescript.ts',
)
def test_swift_dependencies_detected(self):
self.shared(
expected_dependencies=[
'UIKit',
'PromiseKit',
],
expected_language='Swift',
expected_lines=16,
entity='swift.swift',
)
def test_objective_c_dependencies_detected(self):
self.shared(
expected_dependencies=[
'SomeViewController',
'UIKit',
'PromiseKit',
],
expected_language='Objective-C',
expected_lines=18,
entity='objective-c.m',
)
def test_scala_dependencies_detected(self):
self.shared(
expected_dependencies=[
'com.alpha.SomeClass',
'com.bravo.something',
'com.charlie',
'golf',
'com.hotel.india',
'juliett.kilo',
],
expected_language='Scala',
expected_lines=14,
entity='scala.scala',
)
def test_rust_dependencies_detected(self):
self.shared(
expected_dependencies=[
'proc_macro',
'phrases',
'syn',
'quote',
],
expected_language='Rust',
expected_lines=21,
entity='rust.rs',
)
def test_kotlin_dependencies_detected(self):
self.shared(
expected_dependencies=[
'alpha.time',
'bravo.charlie',
'delta.io',
'echo.Foxtrot',
'h',
],
expected_language='Kotlin',
expected_lines=24,
entity='kotlin.kt',
)
def test_haxe_dependencies_detected(self):
self.shared(
expected_dependencies=[
'alpha',
'bravo',
'Math',
'charlie',
'delta',
],
expected_language='Haxe',
expected_lines=18,
entity='haxe.hx',
)
def test_haskell_dependencies_detected(self):
self.shared(
expected_dependencies=[
'Control',
'Data',
'Network',
'System',
],
expected_language='Haskell',
expected_lines=20,
entity='haskell.hs',
)
def test_elm_dependencies_detected(self):
self.shared(
expected_dependencies=[
'Color',
'Dict',
'TempFontAwesome',
'Html',
'Markdown',
'String',
],
expected_language='Elm',
expected_lines=21,
entity='elm.elm',
)
|
bsd-3-clause
| -2,234,770,744,256,403,200 | 30.153992 | 219 | 0.511198 | false |
cr1901/HDMI2USB-litex-firmware
|
targets/nexys_video/video.py
|
1
|
4745
|
from litevideo.input import HDMIIn
from litevideo.output import VideoOut
from litex.soc.cores.frequency_meter import FrequencyMeter
from litescope import LiteScopeAnalyzer
from targets.utils import csr_map_update, period_ns
from targets.nexys_video.net import NetSoC as BaseSoC
class VideoSoC(BaseSoC):
csr_peripherals = (
"hdmi_out0",
"hdmi_in0",
"hdmi_in0_freq",
"hdmi_in0_edid_mem",
)
csr_map_update(BaseSoC.csr_map, csr_peripherals)
interrupt_map = {
"hdmi_in0": 4,
}
interrupt_map.update(BaseSoC.interrupt_map)
def __init__(self, platform, *args, **kwargs):
BaseSoC.__init__(self, platform, *args, **kwargs)
mode = "ycbcr422"
if mode == "ycbcr422":
dw = 16
elif mode == "rgb":
dw = 32
else:
raise SystemError("Unknown pixel mode.")
pix_freq = 148.50e6
# hdmi in 0
hdmi_in0_pads = platform.request("hdmi_in")
self.submodules.hdmi_in0 = HDMIIn(
hdmi_in0_pads,
self.sdram.crossbar.get_port(mode="write"),
fifo_depth=512,
device="xc7")
self.submodules.hdmi_in0_freq = FrequencyMeter(period=self.clk_freq)
self.comb += [
self.hdmi_in0_freq.clk.eq(self.hdmi_in0.clocking.cd_pix.clk),
hdmi_in0_pads.txen.eq(1)
]
self.platform.add_period_constraint(self.hdmi_in0.clocking.cd_pix.clk, period_ns(1*pix_freq))
self.platform.add_period_constraint(self.hdmi_in0.clocking.cd_pix1p25x.clk, period_ns(1.25*pix_freq))
self.platform.add_period_constraint(self.hdmi_in0.clocking.cd_pix5x.clk, period_ns(5*pix_freq))
self.platform.add_false_path_constraints(
self.crg.cd_sys.clk,
self.hdmi_in0.clocking.cd_pix.clk,
self.hdmi_in0.clocking.cd_pix1p25x.clk,
self.hdmi_in0.clocking.cd_pix5x.clk)
# hdmi out 0
hdmi_out0_pads = platform.request("hdmi_out")
hdmi_out0_dram_port = self.sdram.crossbar.get_port(
mode="read",
dw=dw,
cd="hdmi_out0_pix",
reverse=True)
self.submodules.hdmi_out0 = VideoOut(
platform.device,
hdmi_out0_pads,
hdmi_out0_dram_port,
mode=mode,
fifo_depth=4096)
self.platform.add_false_path_constraints(
self.crg.cd_sys.clk,
self.hdmi_out0.driver.clocking.cd_pix.clk)
self.platform.add_period_constraint(self.hdmi_out0.driver.clocking.cd_pix.clk, period_ns(1*pix_freq))
self.platform.add_period_constraint(self.hdmi_out0.driver.clocking.cd_pix5x.clk, period_ns(5*pix_freq))
self.platform.add_false_path_constraints(
self.crg.cd_sys.clk,
self.hdmi_out0.driver.clocking.cd_pix.clk,
self.hdmi_out0.driver.clocking.cd_pix5x.clk)
for name, value in sorted(self.platform.hdmi_infos.items()):
self.add_constant(name, value)
class VideoSoCDebug(VideoSoC):
csr_peripherals = (
"analyzer",
)
csr_map_update(VideoSoC.csr_map, csr_peripherals)
def __init__(self, platform, *args, **kwargs):
VideoSoC.__init__(self, platform, *args, **kwargs)
# # #
# analyzer
analyzer_signals = [
self.hdmi_in0.data0_charsync.raw_data,
self.hdmi_in0.data1_charsync.raw_data,
self.hdmi_in0.data2_charsync.raw_data,
self.hdmi_in0.data0_charsync.synced,
self.hdmi_in0.data1_charsync.synced,
self.hdmi_in0.data2_charsync.synced,
self.hdmi_in0.data0_charsync.data,
self.hdmi_in0.data1_charsync.data,
self.hdmi_in0.data2_charsync.data,
self.hdmi_in0.syncpol.valid_o,
self.hdmi_in0.syncpol.de,
self.hdmi_in0.syncpol.hsync,
self.hdmi_in0.syncpol.vsync,
]
self.submodules.analyzer = LiteScopeAnalyzer(analyzer_signals, 1024, cd="hdmi_in0_pix", cd_ratio=2)
# leds
pix_counter = Signal(32)
self.sync.hdmi_in0_pix += pix_counter.eq(pix_counter + 1)
self.comb += platform.request("user_led", 0).eq(pix_counter[26])
pix1p25x_counter = Signal(32)
self.sync.pix1p25x += pix1p25x_counter.eq(pix1p25x_counter + 1)
self.comb += platform.request("user_led", 1).eq(pix1p25x_counter[26])
pix5x_counter = Signal(32)
self.sync.hdmi_in0_pix5x += pix5x_counter.eq(pix5x_counter + 1)
self.comb += platform.request("user_led", 2).eq(pix5x_counter[26])
def do_exit(self, vns):
self.analyzer.export_csv(vns, "test/analyzer.csv")
SoC = VideoSoC
|
bsd-2-clause
| -5,000,189,978,101,860,000 | 31.5 | 111 | 0.60295 | false |
googleads/google-ads-python
|
google/ads/googleads/v6/errors/types/function_parsing_error.py
|
1
|
1514
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.errors",
marshal="google.ads.googleads.v6",
manifest={"FunctionParsingErrorEnum",},
)
class FunctionParsingErrorEnum(proto.Message):
r"""Container for enum describing possible function parsing
errors.
"""
class FunctionParsingError(proto.Enum):
r"""Enum describing possible function parsing errors."""
UNSPECIFIED = 0
UNKNOWN = 1
NO_MORE_INPUT = 2
EXPECTED_CHARACTER = 3
UNEXPECTED_SEPARATOR = 4
UNMATCHED_LEFT_BRACKET = 5
UNMATCHED_RIGHT_BRACKET = 6
TOO_MANY_NESTED_FUNCTIONS = 7
MISSING_RIGHT_HAND_OPERAND = 8
INVALID_OPERATOR_NAME = 9
FEED_ATTRIBUTE_OPERAND_ARGUMENT_NOT_INTEGER = 10
NO_OPERANDS = 11
TOO_MANY_OPERANDS = 12
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| 6,158,383,707,332,914,000 | 29.28 | 74 | 0.682959 | false |
BakanovKirill/Medicine
|
src/medicine/settings.py
|
1
|
5625
|
# Django settings for medicine project.
import os
from django.conf import global_settings
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../../')
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'medicine.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ocyom8ze-5%rlr5^tysfor2!xy%q6-#(+f9wnmp#aq@k0*q)h^'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'medicine.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'medicine.wsgi.application'
import os
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), '..', 'templates').replace('\\','/'),)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'south',
'medicine',
)
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
'django.core.context_processors.csrf',
'medicine.context_processors.settings',
)
LOGIN_URL = '/login/'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
MIDDLEWARE_CLASSES = ('medicine.middleware.QueryCountDebugMiddleware',) + MIDDLEWARE_CLASSES
|
mit
| -757,473,652,958,754,400 | 33.09697 | 108 | 0.685156 | false |
sam-falvo/kestrel
|
cores/S16X4B/rtl/nmigen/interfaces.py
|
1
|
3319
|
from nmigen import Signal
# Unprefixed opcodes are 100% backward compatible with S16X4A.
# New addition is the use of opcode 8 as an escape prefix.
# Additionally, opcode 9 is reserved as a prefix for future
# use.
OPC_NOP = 0
OPC_LIT = 1
OPC_FWM = 2
OPC_SWM = 3
OPC_ADD = 4
OPC_AND = 5
OPC_XOR = 6
OPC_ZGO = 7
OPC_prefix8 = 8
OPC_prefix9 = 9
OPC_FBM = 10
OPC_SBM = 11
OPC_LCALL = 12
OPC_ICALL = 13
OPC_GO = 14
OPC_NZGO = 15
# 8-prefixed opcodes below.
PFX8_FCR = 0 # Fetch Control Register
PFX8_SCR = 1 # Store Control Register
PFX8_INW = 2 # Read word from I/O device
PFX8_OUTW = 3 # Write word to I/O device
PFX8_unk4 = 4
PFX8_unk5 = 5
PFX8_unk6 = 6
PFX8_unk7 = 7
PFX8_unk8 = 8
PFX8_unk9 = 9
PFX8_unkA = 10
PFX8_unkB = 11
PFX8_unkC = 12
PFX8_unkD = 13
PFX8_unkE = 14
PFX8_unkF = 15
# Address Types
#
# AT_O is a 3 bit signal. 5 out of the 8 cycle types are defined.
# Values are defined so that AT_O[0:2] can be tied directly to
# hardware expecting VPA_O and VDA_O of a 65816 or S16X4A.
#
# 2 1 0
# +-------+-------+-------+
# | IOREQ | VPA | VDA |
# +-------+-------+-------+
#
# (I avoid the use of "Cycle Type" because this term has some
# prior-defined meaning in the context of a Wishbone interconnect.)
AT_IDLE = 0 # Bus is idle; address is meaningless.
AT_DAT = 1 # Bus is presenting a data memory address.
AT_PGM = 2 # Bus is presenting a program memory address.
AT_ARG = 3 # Bus is presenting a program memory address, but for an operand.
AT_unk4 = 4 #
AT_IO = 5 # Bus is presenting an I/O port address.
AT_unk6 = 6 #
AT_unk7 = 7 #
def create_s16x4b_interface(self, platform=''):
self.adr_o = Signal(15) # Word address
self.we_o = Signal(1)
self.cyc_o = Signal(1)
self.stb_o = Signal(1)
self.sel_o = Signal(2)
self.at_o = Signal(3) # New with S16X4B; replaces vda_o and vpa_o
self.dat_o = Signal(16)
self.ack_i = Signal(1)
self.err_i = Signal(1) # New with S16X4A (then called ABORT_I)
self.dat_i = Signal(16)
self.irq_i = Signal(16) # New with S16X4B
self.trap_o = Signal(1) # New with S16X4B (acks all exceptions)
self.intack_o = Signal(1) # New with S16X4B (acks only interrupts)
if platform == 'formal':
self.fv_pc = Signal(15)
self.fv_iw = Signal(16)
self.fv_f_e = Signal(1)
self.fv_u = Signal(16)
self.fv_v = Signal(16)
self.fv_w = Signal(16)
self.fv_x = Signal(16)
self.fv_y = Signal(16)
self.fv_z = Signal(16)
self.fv_opc = Signal(4)
self.fv_cycle_done = Signal(1)
self.fv_current_slot = Signal(2)
self.fv_epc = Signal(len(self.fv_pc))
self.fv_eat = Signal(len(self.at_o))
self.fv_ecs = Signal(len(self.fv_current_slot))
self.fv_efe = Signal(len(self.fv_f_e))
self.fv_eiw = Signal(len(self.fv_iw))
self.fv_eipa = Signal(len(self.fv_pc))
self.fv_ipa = Signal(len(self.fv_eipa))
self.fv_ie = Signal(len(self.dat_i))
self.fv_eie = Signal(len(self.fv_ie))
self.fv_ehpc = Signal(len(self.fv_pc))
self.fv_ihpc = Signal(len(self.fv_pc))
self.fv_take_int = Signal(1)
self.fv_sample_fe = Signal(1)
self.fv_sample_at = Signal(len(self.fv_eat))
self.fv_take_trap = Signal(1)
|
mpl-2.0
| -6,783,078,153,429,094,000 | 29.449541 | 78 | 0.610124 | false |
neocortex/paletti
|
paletti/utils.py
|
1
|
3459
|
import numpy as np
def rgb2lab(image):
""" Transforms an RGB-image to a LAB-image. """
return xyz2lab(rgb2xyz(image))
def lab2rgb(image):
""" Transforms a LAB-image to an RGB-image. """
return xyz2rgb(lab2xyz(image))
def rgb2xyz(image):
""" Transforms an RGB-mage to a XYZ-image. """
image = np.array(image, dtype='float64')
r = image[:, :, 0] / 255.
g = image[:, :, 1] / 255.
b = image[:, :, 2] / 255.
ri = r > .04045
r[ri] = ((r[ri] + .055) / 1.055) ** 2.4
r[~ri] = r[~ri] / 12.92
gi = g > .04045
g[gi] = ((g[gi] + .055) / 1.055) ** 2.4
g[~gi] = g[~gi] / 12.92
bi = b > .04045
b[bi] = ((b[bi] + .055) / 1.055) ** 2.4
b[~bi] = b[~bi] / 12.92
r *= 100.
g *= 100.
b *= 100.
x = r * .4124 + g * .3576 + b * .1805
y = r * .2126 + g * .7152 + b * .0722
z = r * .0193 + g * .1192 + b * .9505
return np.transpose(np.array([x, y, z]), (1, 2, 0))
def xyz2rgb(image):
""" Transforms a XYZ-image to an RGB-image. """
x = image[:, :, 0] / 100.
y = image[:, :, 1] / 100.
z = image[:, :, 2] / 100.
var_R = x * 3.2406 + y * -1.5372 + z * -0.4986
var_G = x * -0.9689 + y * 1.8758 + z * 0.0415
var_B = x * 0.0557 + y * -0.2040 + z * 1.0570
def convert(var):
i = var > 0.0031308
var[i] = 1.055 * (var[i] ** (1 / 2.4)) - 0.055
var[~i] = var[~i] * 12.92
return var
var_R = convert(var_R)
var_G = convert(var_G)
var_B = convert(var_B)
var_R[var_R < 0] = 0
var_B[var_B < 0] = 0
var_G[var_G < 0] = 0
var_R[var_R > 1] = 1
var_B[var_B > 1] = 1
var_G[var_G > 1] = 1
R = var_R * 255
G = var_G * 255
B = var_B * 255
return np.transpose(np.array([R, G, B], dtype='uint8'), (1, 2, 0))
def xyz2lab(image):
""" Transforms a XYZ-image to a LAB-image. """
var_X = image[:, :, 0] / 95.047
var_Y = image[:, :, 1] / 100.
var_Z = image[:, :, 2] / 108.883
xi = var_X > .008856
var_X[xi] = var_X[xi] ** (1. / 3.)
var_X[~xi] = (7.787 * var_X[~xi]) + (16. / 116.)
yi = var_Y > .008856
var_Y[yi] = var_Y[yi] ** (1. / 3.)
var_Y[~yi] = (7.787 * var_Y[~yi]) + (16. / 116.)
zi = var_Z > .008856
var_Z[zi] = var_Z[zi] ** (1. / 3.)
var_Z[~zi] = (7.787 * var_Z[~zi]) + (16. / 116.)
L = (116 * var_Y) - 16
a = 500. * (var_X - var_Y)
b = 200. * (var_Y - var_Z)
return np.transpose(np.array([L, a, b]), (1, 2, 0))
def lab2xyz(image):
""" Transforms a LAB-image to a XYZ-image. """
var_Y = (image[:, :, 0] + 16.) / 116.
var_X = image[:, :, 1] / 500. + var_Y
var_Z = var_Y - image[:, :, 2] / 200.
yi = var_Y > 0.2069
var_Y[yi] = var_Y[yi] ** 3
var_Y[~yi] = (var_Y[~yi] - 16. / 116.) / 7.787
xi = var_X > 0.2069
var_X[xi] = var_X[xi] ** 3
var_X[~xi] = (var_X[~xi] - 16. / 116.) / 7.787
zi = var_Z > 0.2069
var_Z[zi] = var_Z[zi] ** 3
var_Z[~zi] = (var_Z[~zi] - 16. / 116.) / 7.787
X = 95.047 * var_X
Y = 100. * var_Y
Z = 108.883 * var_Z
return np.transpose(np.array([X, Y, Z]), (1, 2, 0))
def hex2rgb(hexcolor):
""" Convert a color in Hex format to RGB. """
value = hexcolor.lstrip('#') if hexcolor.startswith('#') else hexcolor
lv = len(value)
return [int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3)]
def rgb2hex(rgb):
""" Convert an RGB color to Hex format. """
return '#%02x%02x%02x' % rgb
|
mit
| 965,635,616,745,144,700 | 25.204545 | 74 | 0.46632 | false |
indolentriffraff/fihndos
|
cogs/customcmds.py
|
1
|
40126
|
import math
import re
import json
from github import Github
from PythonGists import PythonGists
from discord.ext import commands
from cogs.utils.checks import cmd_prefix_len, load_config
'''Module for custom commands adding, removing, and viewing.'''
class Customcmds:
def __init__(self, bot):
self.bot = bot
async def githubUpload(self, username, password, repo_name):
g = Github(username, password)
repo = g.get_user().get_repo(repo_name)
with open('settings/commands.json', 'r') as fp:
contents = fp.read()
updateFile = '/settings/commands.json'
sha = repo.get_contents(updateFile).sha
repo.update_file('/settings/commands.json', 'Updating customcommands', contents, sha)
async def check(self, ctx, val, pre):
def is_numb(msg):
if msg.content.isdigit() and val != 0:
return 0 < int(msg.content) < val
elif val == 0:
return True
else:
return False
reply = await self.bot.wait_for_message(author=ctx.message.author, check=is_numb)
return reply
# view customcmds
async def customcommands(self, ctx):
with open('settings/commands.json', 'r') as c:
cmds = json.load(c)
sortedcmds = sorted(cmds.keys(), key=lambda x: x.lower())
msgs = []
part = ''
pre = cmd_prefix_len()
if ctx.message.content[10 + pre:].strip() and ctx.message.content[10 + pre:].strip() != 'gist':
one_cmd = True
list_cmd = ctx.message.content.strip().split(' ')[1]
for cmd in sortedcmds:
if one_cmd and list_cmd == cmd:
if type(cmds[cmd]) is list:
part = cmd + ': '
for i in cmds[cmd]:
part += str(i[0]) + ' | '
part = part.rstrip(' | ')
break
else:
part = cmd
else:
for cmd in sortedcmds:
if type(cmds[cmd]) is list:
check = cmd + ': '
for i in cmds[cmd]:
check += str(i[0]) + ' | '
check = check.rstrip(' | ') + '\n\n'
else:
check = cmd + '\n\n'
if len(part + check) > 1900:
msgs.append(part)
part = check
else:
part += check
msgs.append(part)
if 'gist' in ctx.message.content or 'Gist' in ctx.message.content:
msgs = '\n'.join(msgs)
url = PythonGists.Gist(description='Custom Commands', content=str(msgs), name='commands.txt')
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'List of Custom Commands: %s' % url)
else:
if len(msgs) == 1:
await self.bot.send_message(ctx.message.channel,
'```css\n[List of Custom Commands]\n%s ```' % msgs[0].rstrip())
else:
for b, i in enumerate(msgs):
await self.bot.send_message(ctx.message.channel,
'```css\n[List of Custom Commands %s/%s]\n%s ```' % (
b + 1, len(msgs), i.rstrip()))
# List all custom commands
@commands.group(pass_context=True)
async def customcmds(self, ctx):
"""Lists all customcmds. >help customcmds for more info
>customcmds - normal output with all the customcmds and subcommands (response names).
>customcmds <command_name> - output only this specific command.
>customcmds gist - normal output but posted to Gist to avoid cluttering the chat."""
if ctx.invoked_subcommand is None:
await self.customcommands(ctx)
await self.bot.delete_message(ctx.message)
@customcmds.command(pass_context=True)
async def long(self, ctx):
"""Lists detailed version of customcmds. Ex: >customcmds long"""
with open('settings/commands.json') as commands:
if 'gist' in ctx.message.content or 'Gist' in ctx.message.content:
cmds = commands.read()
link = PythonGists.Gist(description='Full commands.json', content=cmds, name='commands.json')
return await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Full commands.json: %s' % link)
else:
cmds = json.load(commands)
msg = ''
sortedcmds = sorted(cmds.keys(), key=lambda x: x.lower())
if ctx.message.content[17:] and ctx.message.content[17:] != 'gist':
one_cmd = True
list_cmd = ctx.message.content.strip().split('long')[1].strip()
for cmd in sortedcmds:
if one_cmd and list_cmd == cmd:
msg += '"' + cmd + '" : "'
if type(cmds[cmd]) == list:
for i in cmds[cmd]:
msg += str(i) + ', '
msg = msg[:-2] + '",\n\n'
else:
msg += str(cmds[cmd]) + '",\n\n'
else:
for cmd in sortedcmds:
msg += '"' + cmd + '" : "'
if type(cmds[cmd]) == list:
for i in cmds[cmd]:
msg += str(i) + ', '
msg = msg[:-2] + '",\n\n'
else:
msg += str(cmds[cmd]) + '",\n\n'
msg = msg[:-3]
msg += '}```'
part = int(math.ceil(len(msg) / 1900))
if part == 1:
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + '```json\nList of Custom Commands: {\n' + msg)
else:
msg = msg[7:-3]
splitList = [msg[i:i + 1900] for i in range(0, len(msg), 1900)]
allWords = []
splitmsg = ''
for i, blocks in enumerate(splitList):
splitmsg += 'List of Custom Commands: %s of %s\n\n' % (i + 1, part)
for b in blocks.split('\n'):
splitmsg += b + '\n'
allWords.append(splitmsg)
splitmsg = ''
for i in allWords:
await self.bot.send_message(ctx.message.channel, '```%s```' % i)
# Change customcmd embed color
@customcmds.command(pass_context=True, aliases=['colour'])
async def color(self, ctx, *, msg: str = None):
'''Set color (hex) of a custom command image. Ex: >customcmds color 000000'''
if msg:
try:
msg = msg.lstrip('#')
int(msg, 16)
except:
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Invalid color.')
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Successfully set color for customcmd embeds.')
else:
msg = ''
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Removed embed color for customcmd embeds.')
with open('settings/optional_config.json', 'r+') as fp:
opt = json.load(fp)
opt['customcmd_color'] = msg
fp.seek(0)
fp.truncate()
json.dump(opt, fp, indent=4)
@customcmds.command(pass_context=True)
async def update(self, ctx):
"""Needs GitHub repo set for an update"""
with open('settings/github.json', 'r+') as fp:
opt = json.load(fp)
if opt['username'] != "":
try:
await self.githubUpload(opt['username'], opt['password'], opt['reponame'])
except:
await self.bot.send_message(ctx.message.channel, "Incorrect GitHub credentials")
else:
await self.bot.send_message(ctx.message.channel, "GitHub account and repo not specified in `github.json`")
# Toggle auto-embed for images/gifs
@customcmds.command(pass_context=True)
async def embed(self, ctx):
"""Toggle auto embeding of images for custom commands."""
with open('settings/optional_config.json', 'r+') as fp:
opt = json.load(fp)
if opt['rich_embed'] == 'on':
opt['rich_embed'] = 'off'
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Turned off auto-embeding images/gifs for customcmds.')
else:
opt['rich_embed'] = 'on'
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Turned on auto-embeding images/gifs for customcmds.')
fp.seek(0)
fp.truncate()
json.dump(opt, fp, indent=4)
# Add a custom command
@commands.command(pass_context=True)
async def add(self, ctx, *, msg: str = None):
"""Add a new customcmd. >help add for more info
Simply do: >add
This will trigger the menu which you can navigate through and add your custom command that way.
-----------------------------------------------------------
Legacy method:
There are two ways to add custom commands. The first way:
----Simple----
>add <command> <response> Now, if you do .<command> you will receive <response>.
Example: >add nervous http://i.imgur.com/K9gMjWo.gifv
Then, doing .nervous will output this imgur link (images and gifs will auto embed) Assuming that your customcmd_prefix is set to "."
---Multiple responses to the same command----
>add <command> <response_name> <response>. This way, you can add multiple responses to the same command.
Example:
>add cry k-on http://i.imgur.com/tWtXttk.gif
Then you can add another to the .cry command:
>add cry nichijou https://media.giphy.com/media/3fmRTfVIKMRiM/giphy.gif
Note: If anything you are adding/removing is more than one word, you MUST put each part in quotes.
Example: >add "cry" "mugi why" "http://i.imgur.com/tWtXttk.gif" or >add "copypasta" "I identify as an attack helicopter."
Then invoke a specific response with .<command> <response_name> or get a random response for that command with .<command>
So: .cry k-on would give you that specific link but .cry would give you one of the two you added to the cry command."""
if not msg:
await self.bot.delete_message(ctx.message)
pre = ctx.message.content.split('add')[0]
customcmd_prefix = load_config()['customcmd_prefix']
menu = await self.bot.send_message(ctx.message.channel,
self.bot.bot_prefix + '```\n\u2795 Choose type of customcmd to add. Enter a number:\n\n1. Simple customcmd (1 cmd with 1 response).\n2. Customcmd with multiple responses.\n3. View current customcmds.```')
reply = await self.check(ctx, 4, pre)
if reply:
await self.bot.delete_message(reply)
# Add simple customcmd
if reply.content == "1":
menu = await self.bot.edit_message(menu,
self.bot.bot_prefix + '```\n\u2795 Enter a cmd name. This is how you will invoke your response.```')
reply = await self.check(ctx, 0, pre)
# Grab the cmd name
if reply:
await self.bot.delete_message(reply)
entry_cmd = reply.content
menu = await self.bot.edit_message(menu,
self.bot.bot_prefix + '```\n\u2795 Enter the response for this cmd. This is what the bot will output when you send the cmd you specified.```')
reply = await self.check(ctx, 0, pre)
# Grab the response
if reply:
try:
await self.bot.delete_message(reply)
except:
pass
entry_response = reply.content
with open('settings/commands.json', 'r+') as commands:
cmds = json.load(commands)
save = cmds
commands.seek(0)
commands.truncate()
try:
cmds[entry_cmd] = entry_response
json.dump(cmds, commands, indent=4)
await self.bot.edit_message(menu,
self.bot.bot_prefix + 'Successfully added ``{}`` to ``{}`` Invoke this response by doing: ``{}``'.format(
entry_response, entry_cmd,
customcmd_prefix + entry_cmd))
except Exception as e:
json.dump(save, commands, indent=4)
await self.bot.edit_message(menu,
self.bot.bot_prefix + 'Error, something went wrong. Exception: ``%s``' % e)
# Add complex customcmd
elif reply.content == "2":
menu = await self.bot.edit_message(menu,
self.bot.bot_prefix + '```\n\u2795 What to add? Pick a number.\n\n1. Add new command.\n2. Add response to existing command.```')
reply = await self.check(ctx, 3, pre)
if reply:
await self.bot.delete_message(reply)
# Create new list cmd
if reply.content == '1':
menu = await self.bot.edit_message(menu,
self.bot.bot_prefix + '```\n\u2795 Enter the cmd name.```')
reply = await self.check(ctx, 0, pre)
# Grab cmd name
if reply:
await self.bot.delete_message(reply)
entry_cmd = reply.content
menu = await self.bot.edit_message(menu,
self.bot.bot_prefix + '```\n\u2795 Since you selected to have this cmd have multiple responses, these multiple responses must have different names to map them. Enter a response name.```')
reply = await self.check(ctx, 0, pre)
# Grab response name
if reply:
await self.bot.delete_message(reply)
entry_response = reply.content
menu = await self.bot.edit_message(menu,
self.bot.bot_prefix + '```\n\u2795 Now enter the response.```')
reply = await self.check(ctx, 0, pre)
# Grab the response
if reply:
try:
await self.bot.delete_message(reply)
except:
pass
response = reply.content
with open('settings/commands.json', 'r+') as commands:
cmds = json.load(commands)
save = cmds
commands.seek(0)
commands.truncate()
try:
cmds[entry_cmd] = [[entry_response, response]]
json.dump(cmds, commands, indent=4)
await self.bot.edit_message(menu,
self.bot.bot_prefix + 'Successfully added response with response name ``{}`` to command ``{}`` Invoke this specific response with ``{}`` or get a random response from the list of responses for this command with ``{}``'.format(
entry_response, entry_cmd,
customcmd_prefix + entry_cmd + ' ' + entry_response,
customcmd_prefix + entry_cmd))
except Exception as e:
json.dump(save, commands, indent=4)
await self.bot.edit_message(menu,
self.bot.bot_prefix + 'Error, something went wrong. Exception: ``%s``' % e)
# Add to existing list cmd
elif reply.content == '2':
list_cmds = []
with open('settings/commands.json') as commands:
cmds = json.load(commands)
for i in cmds:
if type(cmds[i]) is list:
list_cmds.append(i)
msg = '1. '
count = 0
for count, word in enumerate(list_cmds):
msg += '{} {}.'.format(word, count + 2)
msg = msg[:-(len(str(count + 2)) + 2)]
if count == 0:
return await self.bot.edit_message(menu,
self.bot.bot_prefix + 'There are no cmds you can add multiple responses to. Create a cmd that enables multiple responses and then add a response to it.')
menu = await self.bot.edit_message(menu,
self.bot.bot_prefix + '```\n\u2795 Enter the number of the cmd name to add a response to.\n\n {}```'.format(msg))
reply = await self.check(ctx, count + 2, pre)
if reply:
await self.bot.delete_message(reply)
entry_cmd = list_cmds[int(reply.content)-1]
menu = await self.bot.edit_message(menu,
self.bot.bot_prefix + '```\n\u2795 Enter a response name.```')
reply = await self.check(ctx, 0, pre)
# Grab response name
if reply:
await self.bot.delete_message(reply)
entry_response = reply.content
menu = await self.bot.edit_message(menu,
self.bot.bot_prefix + '```\n\u2795 Now enter the response.```')
reply = await self.check(ctx, 0, pre)
# Grab the response
if reply:
try:
await self.bot.delete_message(reply)
except:
pass
response = reply.content
with open('settings/commands.json', 'r+') as commands:
save = cmds
commands.seek(0)
commands.truncate()
try:
cmds[entry_cmd].append([entry_response, response])
json.dump(cmds, commands, indent=4)
await self.bot.edit_message(menu,
self.bot.bot_prefix + 'Successfully added response with response name ``{}`` to command ``{}`` Invoke this specific response with ``{}`` or get a random response from the list of responses for this command with ``{}``'.format(
entry_response, entry_cmd,
customcmd_prefix + entry_cmd + ' ' + entry_response,
customcmd_prefix + entry_cmd))
except Exception as e:
json.dump(save, commands, indent=4)
await self.bot.edit_message(menu,
self.bot.bot_prefix + 'Error, something went wrong. Exception: ``%s``' % e)
elif reply.content == '3':
await self.bot.delete_message(menu)
await self.customcommands(ctx)
else:
words = msg.strip()
with open('settings/commands.json', 'r') as commands:
cmds = json.load(commands)
save = cmds
try:
# If there are quotes in the message (meaning multiple words for each param)
if '"' in words:
entry = re.findall('"([^"]+)"', words)
# Item for key is list
if len(entry) == 3:
# Key exists
if entry[0] in cmds:
entries = []
for i in cmds[entry[0]]:
entries.append(tuple((i[0], i[1])))
entries.append(tuple([entry[1], entry[2]]))
cmds[entry[0]] = entries
else:
cmds[entry[0]] = [(entry[1], entry[2])]
# Item for key is string
else:
if entry[0] in cmds:
if type(cmds[entry[0]]) is list:
return await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Error, this is a list command. To append to this command, you need a <response name>. Ex: ``>add cmd response_name response``')
cmds[entry[0]] = entry[1]
# No quotes so spaces seperate params
else:
# Item for key is list
if len(words.split(' ')) == 3:
entry = words.split(' ', 2)
# Key exists
if entry[0] in cmds:
entries = []
for i in cmds[entry[0]]:
entries.append(tuple((i[0], i[1])))
entries.append(tuple([entry[1], entry[2]]))
cmds[entry[0]] = entries
else:
cmds[entry[0]] = [(entry[1], entry[2])]
# Item for key is string
else:
entry = words.split(' ', 1)
if entry[0] in cmds:
if type(cmds[entry[0]]) is list:
return await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Error, this is a list command. To append to this command, you need a <response name>. Ex: ``>add cmd response_name response``')
cmds[entry[0]] = entry[1]
await self.bot.send_message(ctx.message.channel,
self.bot.bot_prefix + 'Successfully added ``%s`` to ``%s``' % (entry[1], entry[0]))
except Exception as e:
with open('settings/commands.json', 'w') as commands:
commands.truncate()
json.dump(save, commands, indent=4)
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Error, something went wrong. Exception: ``%s``' % e)
# Update commands.json
with open('settings/commands.json', 'w') as commands:
commands.truncate()
json.dump(cmds, commands, indent=4)
# Remove a custom command
@commands.command(pass_context=True)
async def remove(self, ctx, *, msg: str = None):
"""Remove a customcmd. >help remove for more info.
Simply do: >remove
This will trigger the menu which you can navigate through and remove your custom command that way.
-----------------------------------------------------------
Legacy method:
>remove <command> or >remove <command> <response_name> if you want to remove a specific response for a command.
Just like with the add cmd, note that if anything you are adding/removing is more than one word, you must put each part in quotes.
Example: If "cry" is the command and "mugi why" is the name for one of the links, removing that link would be: >remove "cry" "mugi why" """
if not msg:
await self.bot.delete_message(ctx.message)
pre = ctx.message.content.split('remove')[0]
menu = await self.bot.send_message(ctx.message.channel,
self.bot.bot_prefix + '```\n\u2796 Choose what to remove. Enter a number:\n\n1. A command and all its responses.\n2. A single response from a command that has more than one.```')
reply = await self.check(ctx, 3, pre)
if reply:
await self.bot.delete_message(reply)
# Remove a cmd
if reply.content == '1':
with open('settings/commands.json') as commands:
cmds = json.load(commands)
msg = '1. '
count = 0
all_cmds = []
for count, word in enumerate(cmds):
all_cmds.append(word)
msg += '{} {}.'.format(word, count + 2)
msg = msg[:-(len(str(count + 2)) + 2)]
if count == 0:
return await self.bot.edit_message(menu,
self.bot.bot_prefix + 'There are no cmds to remove.')
menu = await self.bot.edit_message(menu,
self.bot.bot_prefix + '```\n\u2796 Enter the number of the cmd to remove.\n\n {}```'.format(
msg))
reply = await self.check(ctx, count + 2, pre)
if reply:
await self.bot.delete_message(reply)
with open('settings/commands.json', 'r+') as commands:
save = cmds
commands.seek(0)
commands.truncate()
try:
cmd_to_remove = all_cmds[int(reply.content) - 1]
del cmds[cmd_to_remove]
json.dump(cmds, commands, indent=4)
await self.bot.edit_message(menu,
self.bot.bot_prefix + 'Successfully removed command ``{}``'.format(cmd_to_remove))
except Exception as e:
json.dump(save, commands, indent=4)
await self.bot.edit_message(menu,
self.bot.bot_prefix + 'Error, something went wrong. Exception: ``%s``' % e)
# Remove a specific response from a cmd
elif reply.content == '2':
list_cmds = []
with open('settings/commands.json') as commands:
cmds = json.load(commands)
for i in cmds:
if type(cmds[i]) is list:
list_cmds.append(i)
msg = '1. '
count = 0
for count, word in enumerate(list_cmds):
msg += '{} {}.'.format(word, count + 2)
msg = msg[:-(len(str(count + 2)) + 2)]
if count == 0:
return await self.bot.edit_message(menu,
self.bot.bot_prefix + 'There are no cmds with multiple responses. If you are looking to remove a cmd with just one response, select 1 in the main menu for this command.')
menu = await self.bot.edit_message(menu,
self.bot.bot_prefix + '```\n\u2796 Enter the number of the cmd that you want to remove a response from.\n\n {}```'.format(
msg))
reply = await self.check(ctx, count + 2, pre)
# List responses from this cmd
if reply:
await self.bot.delete_message(reply)
cmd_to_remove_from = list_cmds[int(reply.content) - 1]
cmd_responses = []
msg = '1. '
count = 0
for count, word in enumerate(cmds[cmd_to_remove_from]):
cmd_responses.append(word[0])
msg += '{} {}.'.format(word[0], count + 2)
msg = msg[:-(len(str(count + 2)) + 2)]
menu = await self.bot.edit_message(menu,
self.bot.bot_prefix + '```\n\u2796 Enter the number of the response to remove.\n\n {}```'.format(
msg))
reply = await self.check(ctx, count + 2, pre)
if reply:
await self.bot.delete_message(reply)
with open('settings/commands.json', 'r+') as commands:
save = cmds
commands.seek(0)
commands.truncate()
try:
response_to_remove = cmd_responses[int(reply.content) - 1]
for i in cmds[cmd_to_remove_from]:
if i[0] == response_to_remove:
cmds[cmd_to_remove_from].remove(i)
if cmds[cmd_to_remove_from] == []:
del cmds[cmd_to_remove_from]
json.dump(cmds, commands, indent=4)
await self.bot.edit_message(menu,
self.bot.bot_prefix + 'Successfully removed response with name ``{}`` from command ``{}``'.format(
response_to_remove, cmd_to_remove_from))
except Exception as e:
json.dump(save, commands, indent=4)
await self.bot.edit_message(menu,
self.bot.bot_prefix + 'Error, something went wrong. Exception: ``%s``' % e)
else:
words = msg.strip()
with open('settings/commands.json', 'r') as commands:
cmds = json.load(commands)
save = cmds
try:
# If there are quotes in the message (meaning multiple words for each param)
success = False
def check(msg):
if msg:
return msg.content.lower().strip() == 'y' or msg.content.lower().strip() == 'n'
else:
return False
if '"' in words:
entry = re.findall('"([^"]+)"', words)
# Item for key is list
if len(entry) == 2:
# Key exists
if entry[0] in cmds:
entries = []
for i in cmds[entry[0]]:
if entry[1] == i[0]:
cmds[entry[0]].remove(i)
await self.bot.send_message(ctx.message.channel,
self.bot.bot_prefix + 'Successfully removed ``%s`` from ``%s``' % (
entry[1], entry[0]))
success = True
else:
if entry[0] in cmds:
del cmds[entry[0]]
success = True
await self.bot.send_message(ctx.message.channel,
self.bot.bot_prefix + 'Successfully removed ``%s`` from ``%s``' % (
entry[1], entry[0]))
# Item for key is string
else:
if entry[0] in cmds:
if type(cmds[entry[0]]) is list:
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'This will delete all responses for this list command. Are you sure you want to do this? (y/n).')
reply = await self.bot.wait_for_message(timeout=10, author=ctx.message.author, check=check)
if reply:
if reply.content.lower().strip() == 'n':
return await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Cancelled.')
else:
return await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Cancelled.')
oldValue = cmds[entry[0]]
del cmds[entry[0]]
success = True
await self.bot.send_message(ctx.message.channel,
self.bot.bot_prefix + 'Successfully removed ``%s`` from ``%s``' % (oldValue, entry[0]))
# No quotes so spaces seperate params
else:
# Item for key is list
if len(words.split(' ')) == 2:
entry = words.split(' ')
# Key exists
if entry[0] in cmds:
for i in cmds[entry[0]]:
if entry[1] == i[0]:
cmds[entry[0]].remove(i)
await self.bot.send_message(ctx.message.channel,
self.bot.bot_prefix + 'Successfully removed ``%s`` from ``%s``' % (
entry[1], entry[0]))
success = True
else:
if entry[0] in cmds:
del cmds[entry[0]]
success = True
await self.bot.send_message(ctx.message.channel,
self.bot.bot_prefix + 'Successfully removed ``%s`` from ``%s``' % (entry[1], entry[0]))
# Item for key is string
else:
entry = words.split(' ', 1)
if entry[0] in cmds:
if type(cmds[entry[0]]) is list:
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'This will delete all responses for this list command. Are you sure you want to do this? (y/n).')
reply = await self.bot.wait_for_message(timeout=10, author=ctx.message.author, check=check)
if reply:
if reply.content.lower().strip() == 'n':
return await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Cancelled.')
else:
return await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Cancelled.')
oldValue = cmds[entry[0]]
del cmds[entry[0]]
success = True
await self.bot.send_message(ctx.message.channel,
self.bot.bot_prefix + 'Successfully removed ``%s`` from ``%s``' % (oldValue, entry[0]))
if success is False:
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Could not find specified command.')
except Exception as e:
with open('settings/commands.json', 'w') as commands:
commands.truncate()
json.dump(save, commands, indent=4)
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + 'Error, something went wrong. Exception: ``%s``' % e)
# Update commands.json
with open('settings/commands.json', 'w') as commands:
commands.truncate()
json.dump(cmds, commands, indent=4)
def setup(bot):
bot.add_cog(Customcmds(bot))
|
gpl-3.0
| -204,136,081,114,417,100 | 52.371274 | 302 | 0.421123 | false |
sanguinariojoe/FreeCAD
|
src/Mod/Fem/femexamples/constraint_transform_beam_hinged.py
|
8
|
7322
|
# ***************************************************************************
# * Copyright (c) 2020 Sudhanshu Dubey <sudhanshu.thethunder@gmail.com> *
# * Copyright (c) 2021 Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCAD
from FreeCAD import Rotation
from FreeCAD import Vector
from CompoundTools import CompoundFilter
import Fem
import ObjectsFem
from . import manager
from .manager import get_meshname
from .manager import init_doc
def get_information():
return {
"name": "Constraint Transform Beam Hinged",
"meshtype": "solid",
"meshelement": "Tet10",
"constraints": ["pressure", "displacement", "transform"],
"solvers": ["calculix"],
"material": "solid",
"equation": "mechanical"
}
def get_explanation(header=""):
return header + """
To run the example from Python console use:
from femexamples.constraint_transform_beam_hinged import setup
setup()
See forum topic post:
https://forum.freecadweb.org/viewtopic.php?f=18&t=20238#p157643
Constraint transform on a beam
"""
def setup(doc=None, solvertype="ccxtools"):
# init FreeCAD document
if doc is None:
doc = init_doc()
# explanation object
# just keep the following line and change text string in get_explanation method
manager.add_explanation_obj(doc, get_explanation(manager.get_header(get_information())))
# geometric object
# name is important because the other method in this module use obj name
cube = doc.addObject("Part::Box", "Cube")
cube.Height = "20 mm"
cube.Length = "100 mm"
cylinder = doc.addObject("Part::Cylinder", "Cylinder")
cylinder.Height = "20 mm"
cylinder.Radius = "6 mm"
cylinder.Placement = FreeCAD.Placement(
Vector(10, 12, 10), Rotation(0, 0, 90), Vector(0, 0, 0),
)
cut = doc.addObject("Part::Cut", "Cut")
cut.Base = cube
cut.Tool = cylinder
# mirroring
mirror = doc.addObject("Part::Mirroring", "Mirror")
mirror.Source = cut
mirror.Normal = (1, 0, 0)
mirror.Base = (100, 100, 20)
# fusing
fusion = doc.addObject("Part::Fuse", "Fusion")
fusion.Base = cut
fusion.Tool = mirror
fusion.Refine = True
# compound filter
geom_obj = CompoundFilter.makeCompoundFilter(name='CompoundFilter')
geom_obj.Base = fusion
geom_obj.FilterType = 'window-volume'
doc.recompute()
if FreeCAD.GuiUp:
geom_obj.Base.ViewObject.hide()
geom_obj.ViewObject.Document.activeView().viewAxonometric()
geom_obj.ViewObject.Document.activeView().fitAll()
# analysis
analysis = ObjectsFem.makeAnalysis(doc, "Analysis")
# solver
if solvertype == "calculix":
solver_obj = ObjectsFem.makeSolverCalculix(doc, "SolverCalculiX")
elif solvertype == "ccxtools":
solver_obj = ObjectsFem.makeSolverCalculixCcxTools(doc, "CalculiXccxTools")
solver_obj.WorkingDir = u""
else:
FreeCAD.Console.PrintWarning(
"Not known or not supported solver type: {}. "
"No solver object was created.\n".format(solvertype)
)
if solvertype == "calculix" or solvertype == "ccxtools":
solver_obj.SplitInputWriter = False
solver_obj.AnalysisType = "static"
solver_obj.GeometricalNonlinearity = "linear"
solver_obj.ThermoMechSteadyState = False
solver_obj.MatrixSolverType = "default"
solver_obj.IterationsControlParameterTimeUse = False
analysis.addObject(solver_obj)
# material
material_obj = ObjectsFem.makeMaterialSolid(doc, "FemMaterial")
mat = material_obj.Material
mat["Name"] = "CalculiX-Steel"
mat["YoungsModulus"] = "210000 MPa"
mat["PoissonRatio"] = "0.30"
mat["Density"] = "7900 kg/m^3"
mat["ThermalExpansionCoefficient"] = "0.012 mm/m/K"
material_obj.Material = mat
# constraint pressure
con_pressure = ObjectsFem.makeConstraintPressure(doc, name="FemConstraintPressure")
con_pressure.References = [(geom_obj, "Face8")]
con_pressure.Pressure = 10.0
con_pressure.Reversed = False
analysis.addObject(con_pressure)
# constraint displacement
con_disp = ObjectsFem.makeConstraintDisplacement(doc, name="FemConstraintDisplacment")
con_disp.References = [(geom_obj, "Face4"), (geom_obj, "Face5")]
con_disp.xFree = False
con_disp.xFix = True
analysis.addObject(con_disp)
# constraints transform
con_transform1 = ObjectsFem.makeConstraintTransform(doc, name="FemConstraintTransform1")
con_transform1.References = [(geom_obj, "Face4")]
con_transform1.TransformType = "Cylindrical"
con_transform1.X_rot = 0.0
con_transform1.Y_rot = 0.0
con_transform1.Z_rot = 0.0
analysis.addObject(con_transform1)
con_transform2 = ObjectsFem.makeConstraintTransform(doc, name="FemConstraintTransform2")
con_transform2.References = [(geom_obj, "Face5")]
con_transform2.TransformType = "Cylindrical"
con_transform2.X_rot = 0.0
con_transform2.Y_rot = 0.0
con_transform2.Z_rot = 0.0
analysis.addObject(con_transform2)
# mesh
from .meshes.mesh_transform_beam_hinged_tetra10 import create_nodes, create_elements
fem_mesh = Fem.FemMesh()
control = create_nodes(fem_mesh)
if not control:
FreeCAD.Console.PrintError("Error on creating nodes.\n")
control = create_elements(fem_mesh)
if not control:
FreeCAD.Console.PrintError("Error on creating elements.\n")
femmesh_obj = analysis.addObject(ObjectsFem.makeMeshGmsh(doc, get_meshname()))[0]
femmesh_obj.FemMesh = fem_mesh
femmesh_obj.Part = geom_obj
femmesh_obj.SecondOrderLinear = False
femmesh_obj.CharacteristicLengthMax = '7 mm'
doc.recompute()
return doc
|
lgpl-2.1
| -893,893,622,936,874,100 | 36.548718 | 92 | 0.612264 | false |
victor-gil-sepulveda/PhD-ANMPythonHelpers
|
anmichelpers/tools/tools.py
|
1
|
3490
|
"""
Created on 9/2/2015
@author: victor
"""
import numpy
import math
import prody
try:
from pyproct.data.handler.sourceGenerator import SourceGenerator
from pyproct.data.handler.protein.proteinEnsembleDataLoader import ProteinEnsembleDataLoader
except:
print "[WARNING] pyProCT was not found. Some functions cannot be used"
def norm(v):
"""
Numpy compliant norm implementation.
@param v: The vector used to calculate the norm.
@return: A norm or an array of norms.
"""
if len(v.shape) == 1:
return numpy.sqrt(numpy.dot(v,v))
elif len(v.shape) == 2:
norms = []
for i in range(len(v)):
norms.append(norm(v[i]))
return numpy.array(norms)
else:
return None
def frec_from_eigenvalue(e_val):
"""
Calculates the proportional frequency of a given eigenvalue (if it comes from a
vibrational study).
@param e_val: The eigenvalue.
@return: The computed frequency (no units).
"""
return e_val / (2*math.pi)
def ensure_modes_layout(modes):
"""
If the layout of the modes is flat, it converts it to a (M,N,3) layout.
@param modes: [In/Out] A numpy array containing all the modes.
@return: The same numpy array with a (M,N,3) layout or (N,3)
"""
if len(modes.shape) == 3:
return modes
elif len(modes.shape) == 2:
number_of_modes = len(modes)
number_of_nodes = modes.shape[1] / 3
return numpy.reshape(modes, (number_of_modes, number_of_nodes, 3))
else:
raise ValueError("The array has an unexpected size")
def load_all_pdbs_ca(pdb_list):
"""
Loads a list of pdbs in pyproct format (this includes the use of globs and 'base_selection'.
@param pdb_list: A list of pdbs in pyproct format.
@return: The pyproct data object and the list of sources (prody pdb structure -> data.structure_ensemble
source from pyproct source -> s.source["source"] )
"""
class MockParams:
def __init__(self):
pass
def get_value(self,a,b):
return ""
sources = SourceGenerator(pdb_list).source_list
loader = ProteinEnsembleDataLoader(MockParams())
for source in sources:
loader.load(source)
# Retrieve the data object
data = loader.close()
return data, sources
def get_all_betas(sources):
"""
Loads CA temperature factors from a list of pyproct sources.
@return: A matrix with all the beta factors.
"""
betas = []
for s in sources:
pdb = prody.parsePDB(s.source["source"]).select("name CA")
betas.append(pdb.getBetas())
betas = numpy.array(betas)
mean_betas = betas.mean(axis = 0)
for beta_array in betas:
for i in range(len(beta_array)):
if beta_array[i] == 0.0:
print "Zero beta value @ %d; exchanging with mean."%i
beta_array[i] = mean_betas[i]
return betas
def normalize(v):
max_val = max(v)
return v / abs(max_val)
def normalize_zero_one(v):
max_val = max(v)
min_val = min(v)
val_range = max_val - min_val
return (v - min_val) / val_range
def is_int(this_str):
try:
int(this_str)
return True
except ValueError:
return False
def find(f, seq):
"""Return first item in sequence where f(item) == True."""
for item in seq:
if f(item):
return item
|
mit
| 1,331,841,634,051,595,000 | 25.846154 | 108 | 0.605444 | false |
mnunezdm/cazasteroides
|
karmaserver/modules/selection/provider/__init__.py
|
1
|
3116
|
''' EFES Provider module '''
from karmaserver.modules.selection.provider.evaluator import ObservationEvaluator
from karmaserver.modules.selection.provider.filter import ObservationFilter
from karmaserver.modules.selection.provider.eraser import ObservationEraser
from karmaserver.modules.selection.provider.selector import RandomObservationSelector
import karmaserver.utils.print as print_
from karmaserver.utils import start_timer, stop_timer
from karmaserver.data.content_resolver import content_resolver
from karmaserver.data.models.observation import Observation
class ObservationSelectionProviderAbstract: # pragma: no cover
''' Abstract class of the EFES Provider class '''
def select_observation_for_discover(self, user_id, karma_level):
''' Returns the Observation based on the karma_level and the user_id '''
raise NotImplementedError('Abstract class, this method should have been implemented')
def select_observation_for_votation(self, user_id, karma_level):
''' Returns the Observation based on the karma_level and the user_id '''
raise NotImplementedError('Abstract class, this method should have been implemented')
def print_info(self):
''' Prints the Provider Configuration '''
raise NotImplementedError('Abstract class, this method should have been implemented')
class ObservationSelectionProvider(ObservationSelectionProviderAbstract):
''' Implementation of the EFES Provider class '''
def __init__(self, number_of_karma_levels, number_of_filter_levels):
self.number_of_filter_levels = number_of_filter_levels
self.evaluator = ObservationEvaluator()
self.filter = ObservationFilter(number_of_karma_levels, number_of_filter_levels)
self.eraser = ObservationEraser()
self.selector = RandomObservationSelector()
self.print_info()
def print_info(self):
print_.initialize_info(self.__class__.__name__, True)
print_.key_value_list('Maximum Filter Level', self.number_of_filter_levels)
def select_observation_for_discover(self, user_id, karma_level):
observation_list = content_resolver.get(Observation)
return self.__get_observation(observation_list, user_id, karma_level)
def select_observation_for_votation(self, user_id, karma_level):
observation_list = content_resolver.get(Observation)
return self.__get_observation(observation_list, user_id, karma_level)
def __get_observation(self, observation_list, user_id, karma_level):
evaluated_observations = self.evaluator.evaluate(observation_list)
filtered_observations, level = self.filter.observations(evaluated_observations,
karma_level)
erased__observations = self.eraser.erase(filtered_observations, user_id)
selected__observation = self.selector.select(erased__observations)
if selected__observation:
serialized = selected__observation.serialize(id_position=True)
serialized['filter_level'] = level
return serialized
|
mit
| -1,755,481,269,962,427,100 | 52.724138 | 93 | 0.723363 | false |
veselosky/schemazoid
|
schemazoid/micromodels/models.py
|
1
|
6716
|
import six
from .fields import Field
class MetaModel(type):
"""The metaclass for :class:`~schemazoid.micromodels.Model`.
The main function of this metaclass
is to move all of fields into the ``_clsfields`` variable on the class.
"""
def __new__(cls, name, bases, attrs):
fields = {}
for base in bases[::-1]:
if hasattr(base, '_clsfields'):
fields.update(base._clsfields)
# Somehow if you iterate over attrs before creating the class, the
# class docstring gets lost. So we create the class first and
# manipulate its attrs after.
newclass = super(MetaModel, cls).__new__(cls, name, bases, attrs)
to_remove = []
for name in dir(newclass):
if isinstance(getattr(newclass, name), Field):
fields[name] = getattr(newclass, name)
to_remove.append(name)
for name in to_remove:
delattr(newclass, name)
newclass._clsfields = fields
return newclass
# TODO Add model-level validation to support cross-field dependencies.
@six.add_metaclass(MetaModel)
class Model(object):
"""The ``Model`` is the key class of the micromodels framework.
To begin modeling your data structure, subclass ``Model`` and add
Fields describing its structure. ::
>>> from schemazoid import micromodels as m
>>> class Thing(m.Model):
... name = m.CharField()
... description = m.CharField()
A Model instance may be constructed as with any Python object. ::
>>> thing = Thing()
More useful and typical is to instatiate a model from a dictionary. ::
>>> data = {'name': 'spoon', 'description': 'There is no spoon.'}
>>> thing = Thing(data)
>>> thing.name
u'spoon'
>>> thing.description
u'There is no spoon.'
>>> thing.update(name='spork')
>>> thing.name
u'spork'
>>> fork = {'name': 'fork', 'description': "Stick it in me, I'm done."}
>>> thing.update(fork)
>>> thing.description
u"Stick it in me, I'm done."
"""
def __init__(self, *args, **kwargs):
super(Model, self).__init__()
# an edge case, we can't call our own __setattr__ before
# _instance_fields is initialized, since it calls get_field()
super(Model, self).__setattr__('_instance_fields', {})
if args:
self.update(args[0])
if kwargs:
self.update(kwargs)
# We override __setattr__ so that setting attributes passes through field
# conversion/validation functions.
def __setattr__(self, key, value):
field = self.get_field(key)
if field:
super(Model, self).__setattr__(key, field.to_python(value))
else:
super(Model, self).__setattr__(key, value)
@classmethod
def get_class_field(cls, name):
"""Return the Field instance for the class field of the given name.
Returns None if there is no Field by that name on the class.
"""
return cls._clsfields.get(name, None)
@classmethod
def get_class_fields(cls):
"""Return a dictionary of Fields on this class, keyed by name."""
return cls._clsfields
@classmethod
def add_class_field(cls, name, field):
"""Extend a class by adding a new field to the class definition."""
if not isinstance(field, Field):
msg = "Second argument to add_class_field must be a Field instance"
raise TypeError(msg)
cls._clsfields[name] = field
def get_field(self, name):
"""Return the Field instance for the given name on this object.
This instance method searches both the instance and the class.
"""
field = self._instance_fields.get(name, None)
if not field:
field = self.__class__.get_class_field(name)
return field
def get_all_fields(self):
"""Return a dictionary of all Fields on this instance, keyed by name.
Includes both class fields and instance fields.
"""
return dict(self.__class__.get_class_fields(), **self._instance_fields)
def update(self, *args, **kwargs):
"""As with the :class:`dict` method of the same name, given a
dictionary or keyword arguments, sets the values of the instance
attributes corresponding to the key names, overriding any existing
value.
"""
data = args[0] if args else {}
for name in self.get_all_fields():
if name in kwargs:
setattr(self, name, kwargs[name])
elif name in data:
setattr(self, name, data[name])
def add_field(self, name, field):
"""Adds an instance field to this Model instance.
Instance fields allow you to validate and serialize arbitrary
attributes on a Model instance even if the class does not support them.
"""
self._instance_fields[name] = field
if hasattr(self, name):
# Should raise exception if current value not valid
setattr(self, name, getattr(self, name))
def to_dict(self, serial=False):
"""Returns a dictionary representing the data of the instance,
containing native Python objects which might not be serializable
(for example, :class:`~datetime.datetime` objects). To obtain a
serializable dictionary, call the
:meth:`~schemazoid.micromodels.Model.to_serial` method instead,
or pass the ``serial`` argument with a True value.
Note that only attributes declared as Fields will be included in the
dictionary. Although you may set other attributes on the instance,
those additional attributes will not be returned.
"""
if serial:
return dict(
(key, self.get_field(key).to_serial(getattr(self, key)))
for key in self.get_all_fields() if hasattr(self, key))
else:
return dict((key, getattr(self, key))
for key in self.get_all_fields() if hasattr(self, key))
# Fields have to_serial, for symmetry models should have it to.
def to_serial(self):
"""Returns a serializable dictionary representing the data of the
instance. It should be safe to hand this dictionary as-is to
:func:`json.dumps`.
Note that only attributes declared as Fields will be included in the
dictionary. Although you may set other attributes on the instance,
those additional attributes will not be returned.
"""
return self.to_dict(serial=True)
|
apache-2.0
| 7,780,856,994,009,770,000 | 36.104972 | 79 | 0.607504 | false |
subena-io/subena
|
base.py
|
1
|
1245
|
#!/usr/local/bin/python2.7
# -*- coding: utf-8-sig -*-
import argparse
import logging
import os
import sqlalchemy
from sqlalchemy.ext.declarative.api import declarative_base
from sqlalchemy.orm.session import sessionmaker
#if no env variable has been defined, a default one is set
if not(os.environ.has_key("SUBDB")):
os.environ['SUBDB'] = 'mysql://root:cnim@127.0.0.1:3306/sub_ai'
URLS = {
'SQL':os.environ['SUBDB'],
}
#print message or not
parser = argparse.ArgumentParser()
parser.add_argument('-v','--verbose',action='store_true')
parser.add_argument('-a','--alerts',action='store_true')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.DEBUG)
else:
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING)
try:
engine = sqlalchemy.create_engine(URLS['SQL'])
Session = sessionmaker(bind=engine)
Base = declarative_base(bind=engine)
DBSession = sessionmaker(bind = engine)
Base.metadata.create_all(engine)
except Exception:
logging.error('Database is not reachable with provided path : %s',URLS['SQL'])
logging.error('Please check database instance is running and database name exists')
exit(0)
|
apache-2.0
| 3,170,101,479,318,066,000 | 30.923077 | 87 | 0.715663 | false |
paulcronk/psinsights
|
psinsights/error.py
|
1
|
1641
|
###############################################################################
# Copyright 2012 FastSoft Inc.
# Copyright 2012 Devin Anderson <danderson (at) fastsoft (dot) com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
###############################################################################
from psinsights.issue import Issue as _Issue
class Error(Exception):
"""Exception class for service errors."""
__issues = None
def __del__(self):
self.__data = None
self.__issues = None
def __init__(self, data):
data = data["error"]
code = data["code"]
message = data["message"]
super(Error, self).__init__((message, code))
self.__code = code
self.__data = data
self.__message = message
@property
def code(self):
return self.__code
@property
def issues(self):
issues = self.__issues
if issues is None:
issues = tuple((_Issue(d) for d in self.__data["errors"]))
self.__issues = issues
return issues
@property
def message(self):
return self.__message
|
apache-2.0
| 6,101,192,896,032,187,000 | 29.962264 | 79 | 0.572821 | false |
rbelzile/python-myfitnesspal
|
myfitnesspal/entry.py
|
1
|
1558
|
import re
from myfitnesspal.base import MFPBase
class Entry(MFPBase):
def __init__(self, name, nutrition):
self._name = name
self._nutrition = nutrition
#split out quantity and measuring unit out of entry name
regex = r'(?P<short_name>.+), (?P<quantity>\d[\d\.]*) (?P<unit>[\w\(\)]+)(?: \(.*\))?'
match = re.search(regex, name)
self._quantity = None
self._unit = None
self._short_name = None
if match:
self._quantity = match.group('quantity')
self._unit = match.group('unit')
self._short_name = match.group('short_name')
def __getitem__(self, value):
return self.totals[value]
def keys(self):
return self.totals.keys()
@property
def name(self):
return self._name.strip()
@property
def nutrition_information(self):
return self._nutrition
@property
def totals(self):
return self.nutrition_information
def get_as_dict(self):
return {
'name': self.name,
'nutrition_information': self.nutrition_information,
}
def __unicode__(self):
return u'%s %s' % (
self.name,
self.nutrition_information,
)
@property
def short_name(self):
if self._short_name:
return self._short_name.strip()
return self._short_name
@property
def unit(self):
return self._unit
@property
def quantity(self):
return self._quantity
|
mit
| 735,692,817,802,413,600 | 22.606061 | 94 | 0.549422 | false |
vanant/googleads-dfa-reporting-samples
|
python/v2.1/get_content_categories.py
|
1
|
2155
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example displays all available content categories.
Tags: contentCategories.list
"""
__author__ = ('api.jimper@gmail.com (Jonathon Imperiosi)')
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to look up content categories for')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.1', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
try:
# Construct the request.
request = service.contentCategories().list(profileId=profile_id)
while True:
# Execute request and print response.
response = request.execute()
for category in response['contentCategories']:
print ('Found content category with ID %s and name "%s".'
% (category['id'], category['name']))
if response['contentCategories'] and response['nextPageToken']:
request = service.contentCategories().list_next(request, response)
else:
break
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
|
apache-2.0
| 4,475,333,366,051,865,600 | 30.231884 | 77 | 0.702552 | false |
kyuupichan/electrum
|
gui/qt/network_dialog.py
|
1
|
20239
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import PyQt5.QtCore as QtCore
from electrum.i18n import _
from electrum import constants
from electrum.util import print_error
from electrum.network import serialize_server, deserialize_server
from .util import *
protocol_names = ['TCP', 'SSL']
protocol_letters = 'ts'
class NetworkDialog(QDialog):
def __init__(self, network, config, network_updated_signal_obj):
QDialog.__init__(self)
self.setWindowTitle(_('Network'))
self.setMinimumSize(500, 20)
self.nlayout = NetworkChoiceLayout(network, config)
self.network_updated_signal_obj = network_updated_signal_obj
vbox = QVBoxLayout(self)
vbox.addLayout(self.nlayout.layout())
vbox.addLayout(Buttons(CloseButton(self)))
self.network_updated_signal_obj.network_updated_signal.connect(
self.on_update)
network.register_callback(self.on_network, ['updated', 'interfaces'])
def on_network(self, event, *args):
self.network_updated_signal_obj.network_updated_signal.emit(event, args)
def on_update(self):
self.nlayout.update()
class NodesListWidget(QTreeWidget):
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Connected node'), _('Height')])
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
is_server = not bool(item.data(0, Qt.UserRole))
menu = QMenu()
if is_server:
server = item.data(1, Qt.UserRole)
menu.addAction(_("Use as server"), lambda: self.parent.follow_server(server))
else:
index = item.data(1, Qt.UserRole)
menu.addAction(_("Follow this branch"), lambda: self.parent.follow_branch(index))
menu.exec_(self.viewport().mapToGlobal(position))
def keyPressEvent(self, event):
if event.key() in [ Qt.Key_F2, Qt.Key_Return ]:
self.on_activated(self.currentItem(), self.currentColumn())
else:
QTreeWidget.keyPressEvent(self, event)
def on_activated(self, item, column):
# on 'enter' we show the menu
pt = self.visualItemRect(item).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def update(self, network):
self.clear()
self.addChild = self.addTopLevelItem
chains = network.get_blockchains()
n_chains = len(chains)
for k, items in chains.items():
b = network.blockchains[k]
name = b.get_name()
if n_chains >1:
x = QTreeWidgetItem([name + '@%d'%b.get_checkpoint(), '%d'%b.height()])
x.setData(0, Qt.UserRole, 1)
x.setData(1, Qt.UserRole, b.checkpoint)
else:
x = self
for i in items:
star = ' *' if i == network.interface else ''
item = QTreeWidgetItem([i.host + star, '%d'%i.tip])
item.setData(0, Qt.UserRole, 0)
item.setData(1, Qt.UserRole, i.server)
x.addChild(item)
if n_chains>1:
self.addTopLevelItem(x)
x.setExpanded(True)
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.ResizeToContents)
class ServerListWidget(QTreeWidget):
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Host'), _('Port')])
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
menu = QMenu()
server = item.data(1, Qt.UserRole)
menu.addAction(_("Use as server"), lambda: self.set_server(server))
menu.exec_(self.viewport().mapToGlobal(position))
def set_server(self, s):
host, port, protocol = deserialize_server(s)
self.parent.server_host.setText(host)
self.parent.server_port.setText(port)
self.parent.set_server()
def keyPressEvent(self, event):
if event.key() in [ Qt.Key_F2, Qt.Key_Return ]:
self.on_activated(self.currentItem(), self.currentColumn())
else:
QTreeWidget.keyPressEvent(self, event)
def on_activated(self, item, column):
# on 'enter' we show the menu
pt = self.visualItemRect(item).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def update(self, servers, protocol, use_tor):
self.clear()
for _host, d in sorted(servers.items()):
if _host.endswith('.onion') and not use_tor:
continue
port = d.get(protocol)
if port:
x = QTreeWidgetItem([_host, port])
server = serialize_server(_host, port, protocol)
x.setData(1, Qt.UserRole, server)
self.addTopLevelItem(x)
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.ResizeToContents)
class NetworkChoiceLayout(object):
def __init__(self, network, config, wizard=False):
self.network = network
self.config = config
self.protocol = None
self.tor_proxy = None
self.tabs = tabs = QTabWidget()
server_tab = QWidget()
proxy_tab = QWidget()
blockchain_tab = QWidget()
tabs.addTab(blockchain_tab, _('Overview'))
tabs.addTab(server_tab, _('Server'))
tabs.addTab(proxy_tab, _('Proxy'))
# server tab
grid = QGridLayout(server_tab)
grid.setSpacing(8)
self.server_host = QLineEdit()
self.server_host.setFixedWidth(200)
self.server_port = QLineEdit()
self.server_port.setFixedWidth(60)
self.autoconnect_cb = QCheckBox(_('Select server automatically'))
self.autoconnect_cb.setEnabled(self.config.is_modifiable('auto_connect'))
self.server_host.editingFinished.connect(self.set_server)
self.server_port.editingFinished.connect(self.set_server)
self.autoconnect_cb.clicked.connect(self.set_server)
self.autoconnect_cb.clicked.connect(self.update)
msg = ' '.join([
_("If auto-connect is enabled, Electrum will always use a server that is on the longest blockchain."),
_("If it is disabled, you have to choose a server you want to use. Electrum will warn you if your server is lagging.")
])
grid.addWidget(self.autoconnect_cb, 0, 0, 1, 3)
grid.addWidget(HelpButton(msg), 0, 4)
grid.addWidget(QLabel(_('Server') + ':'), 1, 0)
grid.addWidget(self.server_host, 1, 1, 1, 2)
grid.addWidget(self.server_port, 1, 3)
label = _('Server peers') if network.is_connected() else _('Default Servers')
grid.addWidget(QLabel(label), 2, 0, 1, 5)
self.servers_list = ServerListWidget(self)
grid.addWidget(self.servers_list, 3, 0, 1, 5)
# Proxy tab
grid = QGridLayout(proxy_tab)
grid.setSpacing(8)
# proxy setting
self.proxy_cb = QCheckBox(_('Use proxy'))
self.proxy_cb.clicked.connect(self.check_disable_proxy)
self.proxy_cb.clicked.connect(self.set_proxy)
self.proxy_mode = QComboBox()
self.proxy_mode.addItems(['SOCKS4', 'SOCKS5', 'HTTP'])
self.proxy_host = QLineEdit()
self.proxy_host.setFixedWidth(200)
self.proxy_port = QLineEdit()
self.proxy_port.setFixedWidth(60)
self.proxy_user = QLineEdit()
self.proxy_user.setPlaceholderText(_("Proxy user"))
self.proxy_password = QLineEdit()
self.proxy_password.setPlaceholderText(_("Password"))
self.proxy_password.setEchoMode(QLineEdit.Password)
self.proxy_password.setFixedWidth(60)
self.proxy_mode.currentIndexChanged.connect(self.set_proxy)
self.proxy_host.editingFinished.connect(self.set_proxy)
self.proxy_port.editingFinished.connect(self.set_proxy)
self.proxy_user.editingFinished.connect(self.set_proxy)
self.proxy_password.editingFinished.connect(self.set_proxy)
self.proxy_mode.currentIndexChanged.connect(self.proxy_settings_changed)
self.proxy_host.textEdited.connect(self.proxy_settings_changed)
self.proxy_port.textEdited.connect(self.proxy_settings_changed)
self.proxy_user.textEdited.connect(self.proxy_settings_changed)
self.proxy_password.textEdited.connect(self.proxy_settings_changed)
self.tor_cb = QCheckBox(_("Use Tor Proxy"))
self.tor_cb.setIcon(QIcon(":icons/tor_logo.png"))
self.tor_cb.hide()
self.tor_cb.clicked.connect(self.use_tor_proxy)
grid.addWidget(self.tor_cb, 1, 0, 1, 3)
grid.addWidget(self.proxy_cb, 2, 0, 1, 3)
grid.addWidget(HelpButton(_('Proxy settings apply to all connections: with Electrum servers, but also with third-party services.')), 2, 4)
grid.addWidget(self.proxy_mode, 4, 1)
grid.addWidget(self.proxy_host, 4, 2)
grid.addWidget(self.proxy_port, 4, 3)
grid.addWidget(self.proxy_user, 5, 2)
grid.addWidget(self.proxy_password, 5, 3)
grid.setRowStretch(7, 1)
# Blockchain Tab
grid = QGridLayout(blockchain_tab)
msg = ' '.join([
_("Electrum connects to several nodes in order to download block headers and find out the longest blockchain."),
_("This blockchain is used to verify the transactions sent by your transaction server.")
])
self.status_label = QLabel('')
grid.addWidget(QLabel(_('Status') + ':'), 0, 0)
grid.addWidget(self.status_label, 0, 1, 1, 3)
grid.addWidget(HelpButton(msg), 0, 4)
self.server_label = QLabel('')
msg = _("Electrum sends your wallet addresses to a single server, in order to receive your transaction history.")
grid.addWidget(QLabel(_('Server') + ':'), 1, 0)
grid.addWidget(self.server_label, 1, 1, 1, 3)
grid.addWidget(HelpButton(msg), 1, 4)
self.height_label = QLabel('')
msg = _('This is the height of your local copy of the blockchain.')
grid.addWidget(QLabel(_('Blockchain') + ':'), 2, 0)
grid.addWidget(self.height_label, 2, 1)
grid.addWidget(HelpButton(msg), 2, 4)
self.split_label = QLabel('')
grid.addWidget(self.split_label, 3, 0, 1, 3)
self.nodes_list_widget = NodesListWidget(self)
grid.addWidget(self.nodes_list_widget, 5, 0, 1, 5)
vbox = QVBoxLayout()
vbox.addWidget(tabs)
self.layout_ = vbox
# tor detector
self.td = td = TorDetector()
td.found_proxy.connect(self.suggest_proxy)
td.start()
self.fill_in_proxy_settings()
self.update()
def check_disable_proxy(self, b):
if not self.config.is_modifiable('proxy'):
b = False
for w in [self.proxy_mode, self.proxy_host, self.proxy_port, self.proxy_user, self.proxy_password]:
w.setEnabled(b)
def enable_set_server(self):
if self.config.is_modifiable('server'):
enabled = not self.autoconnect_cb.isChecked()
self.server_host.setEnabled(enabled)
self.server_port.setEnabled(enabled)
self.servers_list.setEnabled(enabled)
else:
for w in [self.autoconnect_cb, self.server_host, self.server_port, self.servers_list]:
w.setEnabled(False)
def update(self):
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host.setText(host)
self.server_port.setText(port)
self.autoconnect_cb.setChecked(auto_connect)
host = self.network.interface.host if self.network.interface else _('None')
self.server_label.setText(host)
self.set_protocol(protocol)
self.servers = self.network.get_servers()
self.servers_list.update(self.servers, self.protocol, self.tor_cb.isChecked())
self.enable_set_server()
height_str = "%d "%(self.network.get_local_height()) + _('blocks')
self.height_label.setText(height_str)
n = len(self.network.get_interfaces())
status = _("Connected to {0} nodes.").format(n) if n else _("Not connected")
self.status_label.setText(status)
chains = self.network.get_blockchains()
if len(chains)>1:
chain = self.network.blockchain()
checkpoint = chain.get_checkpoint()
name = chain.get_name()
msg = _('Chain split detected at block {0}').format(checkpoint) + '\n'
msg += (_('You are following branch') if auto_connect else _('Your server is on branch'))+ ' ' + name
msg += ' (%d %s)' % (chain.get_branch_size(), _('blocks'))
else:
msg = ''
self.split_label.setText(msg)
self.nodes_list_widget.update(self.network)
def fill_in_proxy_settings(self):
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
if not proxy_config:
proxy_config = {"mode": "none", "host": "localhost", "port": "9050"}
b = proxy_config.get('mode') != "none"
self.check_disable_proxy(b)
if b:
self.proxy_cb.setChecked(True)
self.proxy_mode.setCurrentIndex(
self.proxy_mode.findText(str(proxy_config.get("mode").upper())))
self.proxy_host.setText(proxy_config.get("host"))
self.proxy_port.setText(proxy_config.get("port"))
self.proxy_user.setText(proxy_config.get("user", ""))
self.proxy_password.setText(proxy_config.get("password", ""))
def layout(self):
return self.layout_
def set_protocol(self, protocol):
if protocol != self.protocol:
self.protocol = protocol
def change_protocol(self, use_ssl):
p = 's' if use_ssl else 't'
host = self.server_host.text()
pp = self.servers.get(host, constants.net.DEFAULT_PORTS)
if p not in pp.keys():
p = list(pp.keys())[0]
port = pp[p]
self.server_host.setText(host)
self.server_port.setText(port)
self.set_protocol(p)
self.set_server()
def follow_branch(self, index):
self.network.follow_chain(index)
self.update()
def follow_server(self, server):
self.network.switch_to_interface(server)
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
host, port, protocol = deserialize_server(server)
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
self.update()
def server_changed(self, x):
if x:
self.change_server(str(x.text(0)), self.protocol)
def change_server(self, host, protocol):
pp = self.servers.get(host, constants.net.DEFAULT_PORTS)
if protocol and protocol not in protocol_letters:
protocol = None
if protocol:
port = pp.get(protocol)
if port is None:
protocol = None
if not protocol:
if 's' in pp.keys():
protocol = 's'
port = pp.get(protocol)
else:
protocol = list(pp.keys())[0]
port = pp.get(protocol)
self.server_host.setText(host)
self.server_port.setText(port)
def accept(self):
pass
def set_server(self):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
host = str(self.server_host.text())
port = str(self.server_port.text())
auto_connect = self.autoconnect_cb.isChecked()
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
def set_proxy(self):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
if self.proxy_cb.isChecked():
proxy = { 'mode':str(self.proxy_mode.currentText()).lower(),
'host':str(self.proxy_host.text()),
'port':str(self.proxy_port.text()),
'user':str(self.proxy_user.text()),
'password':str(self.proxy_password.text())}
else:
proxy = None
self.tor_cb.setChecked(False)
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
def suggest_proxy(self, found_proxy):
self.tor_proxy = found_proxy
self.tor_cb.setText("Use Tor proxy at port " + str(found_proxy[1]))
if self.proxy_mode.currentIndex() == self.proxy_mode.findText('SOCKS5') \
and self.proxy_host.text() == "127.0.0.1" \
and self.proxy_port.text() == str(found_proxy[1]):
self.tor_cb.setChecked(True)
self.tor_cb.show()
def use_tor_proxy(self, use_it):
if not use_it:
self.proxy_cb.setChecked(False)
else:
socks5_mode_index = self.proxy_mode.findText('SOCKS5')
if socks5_mode_index == -1:
print_error("[network_dialog] can't find proxy_mode 'SOCKS5'")
return
self.proxy_mode.setCurrentIndex(socks5_mode_index)
self.proxy_host.setText("127.0.0.1")
self.proxy_port.setText(str(self.tor_proxy[1]))
self.proxy_user.setText("")
self.proxy_password.setText("")
self.tor_cb.setChecked(True)
self.proxy_cb.setChecked(True)
self.check_disable_proxy(use_it)
self.set_proxy()
def proxy_settings_changed(self):
self.tor_cb.setChecked(False)
class TorDetector(QThread):
found_proxy = pyqtSignal(object)
def __init__(self):
QThread.__init__(self)
def run(self):
# Probable ports for Tor to listen at
ports = [9050, 9150]
for p in ports:
if TorDetector.is_tor_port(p):
self.found_proxy.emit(("127.0.0.1", p))
return
@staticmethod
def is_tor_port(port):
try:
s = (socket._socketobject if hasattr(socket, "_socketobject") else socket.socket)(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
s.connect(("127.0.0.1", port))
# Tor responds uniquely to HTTP-like requests
s.send(b"GET\n")
if b"Tor is not an HTTP Proxy" in s.recv(1024):
return True
except socket.error:
pass
return False
|
mit
| -5,699,706,547,034,703,000 | 38.071429 | 146 | 0.611987 | false |
explosion/catalogue
|
catalogue/_importlib_metadata/__init__.py
|
1
|
19681
|
import os
import re
import abc
import csv
import sys
import zipp
import email
import pathlib
import operator
import functools
import itertools
import posixpath
import collections
from ._compat import (
NullFinder,
PyPy_repr,
install,
Protocol,
)
from configparser import ConfigParser
from contextlib import suppress
from importlib import import_module
from importlib.abc import MetaPathFinder
from itertools import starmap
from typing import Any, List, TypeVar, Union
__all__ = [
'Distribution',
'DistributionFinder',
'PackageNotFoundError',
'distribution',
'distributions',
'entry_points',
'files',
'metadata',
'requires',
'version',
]
class PackageNotFoundError(ModuleNotFoundError):
"""The package was not found."""
def __str__(self):
tmpl = "No package metadata was found for {self.name}"
return tmpl.format(**locals())
@property
def name(self):
(name,) = self.args
return name
class EntryPoint(
PyPy_repr, collections.namedtuple('EntryPointBase', 'name value group')
):
"""An entry point as defined by Python packaging conventions.
See `the packaging docs on entry points
<https://packaging.python.org/specifications/entry-points/>`_
for more information.
"""
pattern = re.compile(
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
"""
A regular expression describing the syntax for an entry point,
which might look like:
- module
- package.module
- package.module:attribute
- package.module:object.attribute
- package.module:attr [extra1, extra2]
Other combinations are possible as well.
The expression is lenient about whitespace around the ':',
following the attr, and following any extras.
"""
def load(self):
"""Load the entry point from its definition. If only a module
is indicated by the value, return that module. Otherwise,
return the named object.
"""
match = self.pattern.match(self.value)
module = import_module(match.group('module'))
attrs = filter(None, (match.group('attr') or '').split('.'))
return functools.reduce(getattr, attrs, module)
@property
def module(self):
match = self.pattern.match(self.value)
return match.group('module')
@property
def attr(self):
match = self.pattern.match(self.value)
return match.group('attr')
@property
def extras(self):
match = self.pattern.match(self.value)
return list(re.finditer(r'\w+', match.group('extras') or ''))
@classmethod
def _from_config(cls, config):
return [
cls(name, value, group)
for group in config.sections()
for name, value in config.items(group)
]
@classmethod
def _from_text(cls, text):
config = ConfigParser(delimiters='=')
# case sensitive: https://stackoverflow.com/q/1611799/812183
config.optionxform = str
config.read_string(text)
return EntryPoint._from_config(config)
def __iter__(self):
"""
Supply iter so one may construct dicts of EntryPoints easily.
"""
return iter((self.name, self))
def __reduce__(self):
return (
self.__class__,
(self.name, self.value, self.group),
)
class PackagePath(pathlib.PurePosixPath):
"""A reference to a path in a package"""
def read_text(self, encoding='utf-8'):
with self.locate().open(encoding=encoding) as stream:
return stream.read()
def read_binary(self):
with self.locate().open('rb') as stream:
return stream.read()
def locate(self):
"""Return a path-like object for this path"""
return self.dist.locate_file(self)
class FileHash:
def __init__(self, spec):
self.mode, _, self.value = spec.partition('=')
def __repr__(self):
return '<FileHash mode: {} value: {}>'.format(self.mode, self.value)
_T = TypeVar("_T")
class PackageMetadata(Protocol):
def __len__(self) -> int:
... # pragma: no cover
def __contains__(self, item: str) -> bool:
... # pragma: no cover
def __getitem__(self, key: str) -> str:
... # pragma: no cover
def get_all(self, name: str, failobj: _T = ...) -> Union[List[Any], _T]:
"""
Return all values associated with a possibly multi-valued key.
"""
class Distribution:
"""A Python distribution package."""
@abc.abstractmethod
def read_text(self, filename):
"""Attempt to load metadata file given by the name.
:param filename: The name of the file in the distribution info.
:return: The text if found, otherwise None.
"""
@abc.abstractmethod
def locate_file(self, path):
"""
Given a path to a file in this distribution, return a path
to it.
"""
@classmethod
def from_name(cls, name):
"""Return the Distribution for the given package name.
:param name: The name of the distribution package to search for.
:return: The Distribution instance (or subclass thereof) for the named
package, if found.
:raises PackageNotFoundError: When the named package's distribution
metadata cannot be found.
"""
for resolver in cls._discover_resolvers():
dists = resolver(DistributionFinder.Context(name=name))
dist = next(iter(dists), None)
if dist is not None:
return dist
else:
raise PackageNotFoundError(name)
@classmethod
def discover(cls, **kwargs):
"""Return an iterable of Distribution objects for all packages.
Pass a ``context`` or pass keyword arguments for constructing
a context.
:context: A ``DistributionFinder.Context`` object.
:return: Iterable of Distribution objects for all packages.
"""
context = kwargs.pop('context', None)
if context and kwargs:
raise ValueError("cannot accept context and kwargs")
context = context or DistributionFinder.Context(**kwargs)
return itertools.chain.from_iterable(
resolver(context) for resolver in cls._discover_resolvers()
)
@staticmethod
def at(path):
"""Return a Distribution for the indicated metadata path
:param path: a string or path-like object
:return: a concrete Distribution instance for the path
"""
return PathDistribution(pathlib.Path(path))
@staticmethod
def _discover_resolvers():
"""Search the meta_path for resolvers."""
declared = (
getattr(finder, '_catalogue_find_distributions', None) for finder in sys.meta_path
)
return filter(None, declared)
@classmethod
def _local(cls, root='.'):
from pep517 import build, meta
system = build.compat_system(root)
builder = functools.partial(
meta.build,
source_dir=root,
system=system,
)
return PathDistribution(zipp.Path(meta.build_as_zip(builder)))
@property
def metadata(self) -> PackageMetadata:
"""Return the parsed metadata for this Distribution.
The returned object will have keys that name the various bits of
metadata. See PEP 566 for details.
"""
text = (
self.read_text('METADATA')
or self.read_text('PKG-INFO')
# This last clause is here to support old egg-info files. Its
# effect is to just end up using the PathDistribution's self._path
# (which points to the egg-info file) attribute unchanged.
or self.read_text('')
)
return email.message_from_string(text)
@property
def version(self):
"""Return the 'Version' metadata for the distribution package."""
return self.metadata['Version']
@property
def entry_points(self):
return EntryPoint._from_text(self.read_text('entry_points.txt'))
@property
def files(self):
"""Files in this distribution.
:return: List of PackagePath for this distribution or None
Result is `None` if the metadata file that enumerates files
(i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
missing.
Result may be empty if the metadata exists but is empty.
"""
file_lines = self._read_files_distinfo() or self._read_files_egginfo()
def make_file(name, hash=None, size_str=None):
result = PackagePath(name)
result.hash = FileHash(hash) if hash else None
result.size = int(size_str) if size_str else None
result.dist = self
return result
return file_lines and list(starmap(make_file, csv.reader(file_lines)))
def _read_files_distinfo(self):
"""
Read the lines of RECORD
"""
text = self.read_text('RECORD')
return text and text.splitlines()
def _read_files_egginfo(self):
"""
SOURCES.txt might contain literal commas, so wrap each line
in quotes.
"""
text = self.read_text('SOURCES.txt')
return text and map('"{}"'.format, text.splitlines())
@property
def requires(self):
"""Generated requirements specified for this Distribution"""
reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
return reqs and list(reqs)
def _read_dist_info_reqs(self):
return self.metadata.get_all('Requires-Dist')
def _read_egg_info_reqs(self):
source = self.read_text('requires.txt')
return source and self._deps_from_requires_text(source)
@classmethod
def _deps_from_requires_text(cls, source):
section_pairs = cls._read_sections(source.splitlines())
sections = {
section: list(map(operator.itemgetter('line'), results))
for section, results in itertools.groupby(
section_pairs, operator.itemgetter('section')
)
}
return cls._convert_egg_info_reqs_to_simple_reqs(sections)
@staticmethod
def _read_sections(lines):
section = None
for line in filter(None, lines):
section_match = re.match(r'\[(.*)\]$', line)
if section_match:
section = section_match.group(1)
continue
yield locals()
@staticmethod
def _convert_egg_info_reqs_to_simple_reqs(sections):
"""
Historically, setuptools would solicit and store 'extra'
requirements, including those with environment markers,
in separate sections. More modern tools expect each
dependency to be defined separately, with any relevant
extras and environment markers attached directly to that
requirement. This method converts the former to the
latter. See _test_deps_from_requires_text for an example.
"""
def make_condition(name):
return name and 'extra == "{name}"'.format(name=name)
def parse_condition(section):
section = section or ''
extra, sep, markers = section.partition(':')
if extra and markers:
markers = '({markers})'.format(markers=markers)
conditions = list(filter(None, [markers, make_condition(extra)]))
return '; ' + ' and '.join(conditions) if conditions else ''
for section, deps in sections.items():
for dep in deps:
yield dep + parse_condition(section)
class DistributionFinder(MetaPathFinder):
"""
A MetaPathFinder capable of discovering installed distributions.
"""
class Context:
"""
Keyword arguments presented by the caller to
``distributions()`` or ``Distribution.discover()``
to narrow the scope of a search for distributions
in all DistributionFinders.
Each DistributionFinder may expect any parameters
and should attempt to honor the canonical
parameters defined below when appropriate.
"""
name = None
"""
Specific name for which a distribution finder should match.
A name of ``None`` matches all distributions.
"""
def __init__(self, **kwargs):
vars(self).update(kwargs)
@property
def path(self):
"""
The path that a distribution finder should search.
Typically refers to Python package paths and defaults
to ``sys.path``.
"""
return vars(self).get('path', sys.path)
@abc.abstractmethod
def _catalogue_find_distributions(self, context=Context()):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching the ``context``,
a DistributionFinder.Context instance.
"""
class FastPath:
"""
Micro-optimized class for searching a path for
children.
"""
def __init__(self, root):
self.root = str(root)
self.base = os.path.basename(self.root).lower()
def joinpath(self, child):
return pathlib.Path(self.root, child)
def children(self):
with suppress(Exception):
return os.listdir(self.root or '')
with suppress(Exception):
return self.zip_children()
return []
def zip_children(self):
zip_path = zipp.Path(self.root)
names = zip_path.root.namelist()
self.joinpath = zip_path.joinpath
return dict.fromkeys(child.split(posixpath.sep, 1)[0] for child in names)
def search(self, name):
return (
self.joinpath(child)
for child in self.children()
if name.matches(child, self.base)
)
class Prepared:
"""
A prepared search for metadata on a possibly-named package.
"""
normalized = None
suffixes = '.dist-info', '.egg-info'
exact_matches = [''][:0]
def __init__(self, name):
self.name = name
if name is None:
return
self.normalized = self.normalize(name)
self.exact_matches = [self.normalized + suffix for suffix in self.suffixes]
@staticmethod
def normalize(name):
"""
PEP 503 normalization plus dashes as underscores.
"""
return re.sub(r"[-_.]+", "-", name).lower().replace('-', '_')
@staticmethod
def legacy_normalize(name):
"""
Normalize the package name as found in the convention in
older packaging tools versions and specs.
"""
return name.lower().replace('-', '_')
def matches(self, cand, base):
low = cand.lower()
pre, ext = os.path.splitext(low)
name, sep, rest = pre.partition('-')
return (
low in self.exact_matches
or ext in self.suffixes
and (not self.normalized or name.replace('.', '_') == self.normalized)
# legacy case:
or self.is_egg(base)
and low == 'egg-info'
)
def is_egg(self, base):
normalized = self.legacy_normalize(self.name or '')
prefix = normalized + '-' if normalized else ''
versionless_egg_name = normalized + '.egg' if self.name else ''
return (
base == versionless_egg_name
or base.startswith(prefix)
and base.endswith('.egg')
)
@install
class MetadataPathFinder(NullFinder, DistributionFinder):
"""A degenerate finder for distribution packages on the file system.
This finder supplies only a find_distributions() method for versions
of Python that do not have a PathFinder find_distributions().
"""
def _catalogue_find_distributions(self, context=DistributionFinder.Context()):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching ``context.name``
(or all names if ``None`` indicated) along the paths in the list
of directories ``context.path``.
"""
found = self._search_paths(context.name, context.path)
return map(PathDistribution, found)
@classmethod
def _search_paths(cls, name, paths):
"""Find metadata directories in paths heuristically."""
return itertools.chain.from_iterable(
path.search(Prepared(name)) for path in map(FastPath, paths)
)
class PathDistribution(Distribution):
def __init__(self, path):
"""Construct a distribution from a path to the metadata directory.
:param path: A pathlib.Path or similar object supporting
.joinpath(), __div__, .parent, and .read_text().
"""
self._path = path
def read_text(self, filename):
with suppress(
FileNotFoundError,
IsADirectoryError,
KeyError,
NotADirectoryError,
PermissionError,
):
return self._path.joinpath(filename).read_text(encoding='utf-8')
read_text.__doc__ = Distribution.read_text.__doc__
def locate_file(self, path):
return self._path.parent / path
def distribution(distribution_name):
"""Get the ``Distribution`` instance for the named package.
:param distribution_name: The name of the distribution package as a string.
:return: A ``Distribution`` instance (or subclass thereof).
"""
return Distribution.from_name(distribution_name)
def distributions(**kwargs):
"""Get all ``Distribution`` instances in the current environment.
:return: An iterable of ``Distribution`` instances.
"""
return Distribution.discover(**kwargs)
def metadata(distribution_name) -> PackageMetadata:
"""Get the metadata for the named package.
:param distribution_name: The name of the distribution package to query.
:return: A PackageMetadata containing the parsed metadata.
"""
return Distribution.from_name(distribution_name).metadata
def version(distribution_name):
"""Get the version string for the named package.
:param distribution_name: The name of the distribution package to query.
:return: The version string for the package as defined in the package's
"Version" metadata key.
"""
return distribution(distribution_name).version
def entry_points():
"""Return EntryPoint objects for all installed packages.
:return: EntryPoint objects for all installed packages.
"""
eps = itertools.chain.from_iterable(dist.entry_points for dist in distributions())
by_group = operator.attrgetter('group')
ordered = sorted(eps, key=by_group)
grouped = itertools.groupby(ordered, by_group)
return {group: tuple(eps) for group, eps in grouped}
def files(distribution_name):
"""Return a list of files for the named package.
:param distribution_name: The name of the distribution package to query.
:return: List of files composing the distribution.
"""
return distribution(distribution_name).files
def requires(distribution_name):
"""
Return a list of requirements for the named package.
:return: An iterator of requirements, suitable for
packaging.requirement.Requirement.
"""
return distribution(distribution_name).requires
|
mit
| 1,404,385,101,869,043,500 | 29.418856 | 94 | 0.614349 | false |
benkonrath/django-guardian
|
guardian/__init__.py
|
1
|
1073
|
"""
Implementation of per object permissions for Django.
"""
from __future__ import unicode_literals
from . import checks
try:
from .version import version as __version__
__version__split__ = __version__.split(".")
VERSION = tuple(['1', '5', '7'])
def get_version():
"""
Returns shorter version (digit parts only) as string.
"""
return '.'.join((str(each) for each in VERSION[:3]))
except ImportError:
pass
default_app_config = 'guardian.apps.GuardianConfig'
def monkey_patch_user():
from .compat import get_user_model
from .utils import get_anonymous_user
from .models import UserObjectPermission
User = get_user_model()
# Prototype User and Group methods
setattr(User, 'get_anonymous', staticmethod(lambda: get_anonymous_user()))
setattr(User, 'add_obj_perm',
lambda self, perm, obj: UserObjectPermission.objects.assign_perm(perm, self, obj))
setattr(User, 'del_obj_perm',
lambda self, perm, obj: UserObjectPermission.objects.remove_perm(perm, self, obj))
|
bsd-2-clause
| -8,041,975,668,687,191,000 | 29.657143 | 94 | 0.657968 | false |
kitianFresh/awesome-python3-webapp
|
www/app.py
|
1
|
5548
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
async web application.
'''
import logging; logging.basicConfig(level=logging.INFO)
import asyncio, os, json, time
from datetime import datetime
from aiohttp import web
from jinja2 import Environment, FileSystemLoader
from config import configs
import orm
from coroweb import add_routes, add_static
from handlers import cookie2user, COOKIE_NAME
def init_jinja2(app, **kw):
logging.info('init jinja2...')
options = dict(
autoescape = kw.get('autoescape', True),
block_start_string = kw.get('block_start_string', '{%'),
block_end_string = kw.get('block_end_string', '%}'),
variable_start_string = kw.get('variable_start_string', '{{'),
variable_end_string = kw.get('variable_end_string', '}}'),
auto_reload = kw.get('auto_reload', True)
)
path = kw.get('path', None)
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
logging.info('set jinja2 template path: %s' % path)
env = Environment(loader=FileSystemLoader(path), **options)
filters = kw.get('filters', None)
if filters is not None:
for name, f in filters.items():
env.filters[name] = f
app['__templating__'] = env
@asyncio.coroutine
def logger_factory(app, handler):
@asyncio.coroutine
def logger(request):
logging.info('Request: %s %s' % (request.method, request.path))
# yield from asyncio.sleep(0.3)
return (yield from handler(request))
return logger
@asyncio.coroutine
def auth_factory(app, handler):
@asyncio.coroutine
def auth(request):
logging.info('check user: %s %s' % (request.method, request.path))
request.__user__ = None
cookie_str = request.cookies.get(COOKIE_NAME)
if cookie_str:
user = yield from cookie2user(cookie_str)
if user:
logging.info('set current user: %s' % user.email)
request.__user__ = user
if request.path.startswith('/manage/') and (request.__user__ is None or not request.__user__.admin):
return web.HTTPFound('/signin')
return (yield from handler(request))
return auth
@asyncio.coroutine
def data_factory(app, handler):
@asyncio.coroutine
def parse_data(request):
if request.method == 'POST':
if request.content_type.startswith('application/json'):
request.__data__ = yield from request.json()
logging.info('request json: %s' % str(request.__data__))
elif request.content_type.startswith('application/x-www-form-urlencoded'):
request.__data__ = yield from request.post()
logging.info('request form: %s' % str(request.__data__))
return (yield from handler(request))
return parse_data
@asyncio.coroutine
def response_factory(app, handler):
@asyncio.coroutine
def response(request):
logging.info('Response handler...')
r = yield from handler(request)
if isinstance(r, web.StreamResponse):
return r
if isinstance(r, bytes):
resp = web.Response(body=r)
resp.content_type = 'application/octet-stream'
return resp
if isinstance(r, str):
if r.startswith('redirect:'):
return web.HTTPFound(r[9:])
resp = web.Response(body=r.encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r, dict):
template = r.get('__template__')
if template is None:
resp = web.Response(body=json.dumps(r, ensure_ascii=False, default=lambda o: o.__dict__).encode('utf-8'))
resp.content_type = 'application/json;charset=utf-8'
return resp
else:
r['__user__'] = request.__user__
resp = web.Response(body=app['__templating__'].get_template(template).render(**r).encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r, int) and t >= 100 and t < 600:
return web.Response(t)
if isinstance(r, tuple) and len(r) == 2:
t, m = r
if isinstance(t, int) and t >= 100 and t < 600:
return web.Response(t, str(m))
# default:
resp = web.Response(body=str(r).encode('utf-8'))
resp.content_type = 'text/plain;charset=utf-8'
return resp
return response
def datetime_filter(t):
delta = int(time.time() - t)
if delta < 60:
return u'1分钟前'
if delta < 3600:
return u'%s分钟前' % (delta // 60)
if delta < 86400:
return u'%s小时前' % (delta // 3600)
if delta < 604800:
return u'%s天前' % (delta // 86400)
dt = datetime.fromtimestamp(t)
return u'%s年%s月%s日' % (dt.year, dt.month, dt.day)
@asyncio.coroutine
def init(loop):
yield from orm.create_pool(loop=loop, **configs.db)
app = web.Application(loop=loop, middlewares=[
logger_factory, auth_factory, response_factory
])
init_jinja2(app, filters=dict(datetime=datetime_filter))
add_routes(app, 'handlers')
add_static(app)
srv = yield from loop.create_server(app.make_handler(), '127.0.0.1', 8888)
logging.info('server started at http://127.0.0.1:8888...')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
|
apache-2.0
| -2,934,702,981,828,618,000 | 35.078431 | 121 | 0.597826 | false |
fernandog/Medusa
|
ext/sqlalchemy/ext/mutable.py
|
1
|
32415
|
# ext/mutable.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""Provide support for tracking of in-place changes to scalar values,
which are propagated into ORM change events on owning parent objects.
.. versionadded:: 0.7 :mod:`sqlalchemy.ext.mutable` replaces SQLAlchemy's
legacy approach to in-place mutations of scalar values; see
:ref:`07_migration_mutation_extension`.
.. _mutable_scalars:
Establishing Mutability on Scalar Column Values
===============================================
A typical example of a "mutable" structure is a Python dictionary.
Following the example introduced in :ref:`types_toplevel`, we
begin with a custom type that marshals Python dictionaries into
JSON strings before being persisted::
from sqlalchemy.types import TypeDecorator, VARCHAR
import json
class JSONEncodedDict(TypeDecorator):
"Represents an immutable structure as a json-encoded string."
impl = VARCHAR
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
The usage of ``json`` is only for the purposes of example. The
:mod:`sqlalchemy.ext.mutable` extension can be used
with any type whose target Python type may be mutable, including
:class:`.PickleType`, :class:`.postgresql.ARRAY`, etc.
When using the :mod:`sqlalchemy.ext.mutable` extension, the value itself
tracks all parents which reference it. Below, we illustrate a simple
version of the :class:`.MutableDict` dictionary object, which applies
the :class:`.Mutable` mixin to a plain Python dictionary::
from sqlalchemy.ext.mutable import Mutable
class MutableDict(Mutable, dict):
@classmethod
def coerce(cls, key, value):
"Convert plain dictionaries to MutableDict."
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
"Detect dictionary set events and emit change events."
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"Detect dictionary del events and emit change events."
dict.__delitem__(self, key)
self.changed()
The above dictionary class takes the approach of subclassing the Python
built-in ``dict`` to produce a dict
subclass which routes all mutation events through ``__setitem__``. There are
variants on this approach, such as subclassing ``UserDict.UserDict`` or
``collections.MutableMapping``; the part that's important to this example is
that the :meth:`.Mutable.changed` method is called whenever an in-place
change to the datastructure takes place.
We also redefine the :meth:`.Mutable.coerce` method which will be used to
convert any values that are not instances of ``MutableDict``, such
as the plain dictionaries returned by the ``json`` module, into the
appropriate type. Defining this method is optional; we could just as well
created our ``JSONEncodedDict`` such that it always returns an instance
of ``MutableDict``, and additionally ensured that all calling code
uses ``MutableDict`` explicitly. When :meth:`.Mutable.coerce` is not
overridden, any values applied to a parent object which are not instances
of the mutable type will raise a ``ValueError``.
Our new ``MutableDict`` type offers a class method
:meth:`~.Mutable.as_mutable` which we can use within column metadata
to associate with types. This method grabs the given type object or
class and associates a listener that will detect all future mappings
of this type, applying event listening instrumentation to the mapped
attribute. Such as, with classical table metadata::
from sqlalchemy import Table, Column, Integer
my_data = Table('my_data', metadata,
Column('id', Integer, primary_key=True),
Column('data', MutableDict.as_mutable(JSONEncodedDict))
)
Above, :meth:`~.Mutable.as_mutable` returns an instance of ``JSONEncodedDict``
(if the type object was not an instance already), which will intercept any
attributes which are mapped against this type. Below we establish a simple
mapping against the ``my_data`` table::
from sqlalchemy import mapper
class MyDataClass(object):
pass
# associates mutation listeners with MyDataClass.data
mapper(MyDataClass, my_data)
The ``MyDataClass.data`` member will now be notified of in place changes
to its value.
There's no difference in usage when using declarative::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(JSONEncodedDict))
Any in-place changes to the ``MyDataClass.data`` member
will flag the attribute as "dirty" on the parent object::
>>> from sqlalchemy.orm import Session
>>> sess = Session()
>>> m1 = MyDataClass(data={'value1':'foo'})
>>> sess.add(m1)
>>> sess.commit()
>>> m1.data['value1'] = 'bar'
>>> assert m1 in sess.dirty
True
The ``MutableDict`` can be associated with all future instances
of ``JSONEncodedDict`` in one step, using
:meth:`~.Mutable.associate_with`. This is similar to
:meth:`~.Mutable.as_mutable` except it will intercept all occurrences
of ``MutableDict`` in all mappings unconditionally, without
the need to declare it individually::
MutableDict.associate_with(JSONEncodedDict)
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(JSONEncodedDict)
Supporting Pickling
--------------------
The key to the :mod:`sqlalchemy.ext.mutable` extension relies upon the
placement of a ``weakref.WeakKeyDictionary`` upon the value object, which
stores a mapping of parent mapped objects keyed to the attribute name under
which they are associated with this value. ``WeakKeyDictionary`` objects are
not picklable, due to the fact that they contain weakrefs and function
callbacks. In our case, this is a good thing, since if this dictionary were
picklable, it could lead to an excessively large pickle size for our value
objects that are pickled by themselves outside of the context of the parent.
The developer responsibility here is only to provide a ``__getstate__`` method
that excludes the :meth:`~MutableBase._parents` collection from the pickle
stream::
class MyMutableType(Mutable):
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_parents', None)
return d
With our dictionary example, we need to return the contents of the dict itself
(and also restore them on __setstate__)::
class MutableDict(Mutable, dict):
# ....
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
In the case that our mutable value object is pickled as it is attached to one
or more parent objects that are also part of the pickle, the :class:`.Mutable`
mixin will re-establish the :attr:`.Mutable._parents` collection on each value
object as the owning parents themselves are unpickled.
Receiving Events
----------------
The :meth:`.AttributeEvents.modified` event handler may be used to receive
an event when a mutable scalar emits a change event. This event handler
is called when the :func:`.attributes.flag_modified` function is called
from within the mutable extension::
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import event
Base = declarative_base()
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(JSONEncodedDict))
@event.listens_for(MyDataClass.data, "modified")
def modified_json(instance):
print("json value modified:", instance.data)
.. _mutable_composites:
Establishing Mutability on Composites
=====================================
Composites are a special ORM feature which allow a single scalar attribute to
be assigned an object value which represents information "composed" from one
or more columns from the underlying mapped table. The usual example is that of
a geometric "point", and is introduced in :ref:`mapper_composite`.
.. versionchanged:: 0.7
The internals of :func:`.orm.composite` have been
greatly simplified and in-place mutation detection is no longer enabled by
default; instead, the user-defined value must detect changes on its own and
propagate them to all owning parents. The :mod:`sqlalchemy.ext.mutable`
extension provides the helper class :class:`.MutableComposite`, which is a
slight variant on the :class:`.Mutable` class.
As is the case with :class:`.Mutable`, the user-defined composite class
subclasses :class:`.MutableComposite` as a mixin, and detects and delivers
change events to its parents via the :meth:`.MutableComposite.changed` method.
In the case of a composite class, the detection is usually via the usage of
Python descriptors (i.e. ``@property``), or alternatively via the special
Python method ``__setattr__()``. Below we expand upon the ``Point`` class
introduced in :ref:`mapper_composite` to subclass :class:`.MutableComposite`
and to also route attribute set events via ``__setattr__`` to the
:meth:`.MutableComposite.changed` method::
from sqlalchemy.ext.mutable import MutableComposite
class Point(MutableComposite):
def __init__(self, x, y):
self.x = x
self.y = y
def __setattr__(self, key, value):
"Intercept set events"
# set the attribute
object.__setattr__(self, key, value)
# alert all parents to the change
self.changed()
def __composite_values__(self):
return self.x, self.y
def __eq__(self, other):
return isinstance(other, Point) and \
other.x == self.x and \
other.y == self.y
def __ne__(self, other):
return not self.__eq__(other)
The :class:`.MutableComposite` class uses a Python metaclass to automatically
establish listeners for any usage of :func:`.orm.composite` that specifies our
``Point`` type. Below, when ``Point`` is mapped to the ``Vertex`` class,
listeners are established which will route change events from ``Point``
objects to each of the ``Vertex.start`` and ``Vertex.end`` attributes::
from sqlalchemy.orm import composite, mapper
from sqlalchemy import Table, Column
vertices = Table('vertices', metadata,
Column('id', Integer, primary_key=True),
Column('x1', Integer),
Column('y1', Integer),
Column('x2', Integer),
Column('y2', Integer),
)
class Vertex(object):
pass
mapper(Vertex, vertices, properties={
'start': composite(Point, vertices.c.x1, vertices.c.y1),
'end': composite(Point, vertices.c.x2, vertices.c.y2)
})
Any in-place changes to the ``Vertex.start`` or ``Vertex.end`` members
will flag the attribute as "dirty" on the parent object::
>>> from sqlalchemy.orm import Session
>>> sess = Session()
>>> v1 = Vertex(start=Point(3, 4), end=Point(12, 15))
>>> sess.add(v1)
>>> sess.commit()
>>> v1.end.x = 8
>>> assert v1 in sess.dirty
True
Coercing Mutable Composites
---------------------------
The :meth:`.MutableBase.coerce` method is also supported on composite types.
In the case of :class:`.MutableComposite`, the :meth:`.MutableBase.coerce`
method is only called for attribute set operations, not load operations.
Overriding the :meth:`.MutableBase.coerce` method is essentially equivalent
to using a :func:`.validates` validation routine for all attributes which
make use of the custom composite type::
class Point(MutableComposite):
# other Point methods
# ...
def coerce(cls, key, value):
if isinstance(value, tuple):
value = Point(*value)
elif not isinstance(value, Point):
raise ValueError("tuple or Point expected")
return value
.. versionadded:: 0.7.10,0.8.0b2
Support for the :meth:`.MutableBase.coerce` method in conjunction with
objects of type :class:`.MutableComposite`.
Supporting Pickling
--------------------
As is the case with :class:`.Mutable`, the :class:`.MutableComposite` helper
class uses a ``weakref.WeakKeyDictionary`` available via the
:meth:`MutableBase._parents` attribute which isn't picklable. If we need to
pickle instances of ``Point`` or its owning class ``Vertex``, we at least need
to define a ``__getstate__`` that doesn't include the ``_parents`` dictionary.
Below we define both a ``__getstate__`` and a ``__setstate__`` that package up
the minimal form of our ``Point`` class::
class Point(MutableComposite):
# ...
def __getstate__(self):
return self.x, self.y
def __setstate__(self, state):
self.x, self.y = state
As with :class:`.Mutable`, the :class:`.MutableComposite` augments the
pickling process of the parent's object-relational state so that the
:meth:`MutableBase._parents` collection is restored to all ``Point`` objects.
"""
from ..orm.attributes import flag_modified
from .. import event, types
from ..orm import mapper, object_mapper, Mapper
from ..util import memoized_property
from ..sql.base import SchemaEventTarget
import weakref
class MutableBase(object):
"""Common base class to :class:`.Mutable`
and :class:`.MutableComposite`.
"""
@memoized_property
def _parents(self):
"""Dictionary of parent object->attribute name on the parent.
This attribute is a so-called "memoized" property. It initializes
itself with a new ``weakref.WeakKeyDictionary`` the first time
it is accessed, returning the same object upon subsequent access.
"""
return weakref.WeakKeyDictionary()
@classmethod
def coerce(cls, key, value):
"""Given a value, coerce it into the target type.
Can be overridden by custom subclasses to coerce incoming
data into a particular type.
By default, raises ``ValueError``.
This method is called in different scenarios depending on if
the parent class is of type :class:`.Mutable` or of type
:class:`.MutableComposite`. In the case of the former, it is called
for both attribute-set operations as well as during ORM loading
operations. For the latter, it is only called during attribute-set
operations; the mechanics of the :func:`.composite` construct
handle coercion during load operations.
:param key: string name of the ORM-mapped attribute being set.
:param value: the incoming value.
:return: the method should return the coerced value, or raise
``ValueError`` if the coercion cannot be completed.
"""
if value is None:
return None
msg = "Attribute '%s' does not accept objects of type %s"
raise ValueError(msg % (key, type(value)))
@classmethod
def _get_listen_keys(cls, attribute):
"""Given a descriptor attribute, return a ``set()`` of the attribute
keys which indicate a change in the state of this attribute.
This is normally just ``set([attribute.key])``, but can be overridden
to provide for additional keys. E.g. a :class:`.MutableComposite`
augments this set with the attribute keys associated with the columns
that comprise the composite value.
This collection is consulted in the case of intercepting the
:meth:`.InstanceEvents.refresh` and
:meth:`.InstanceEvents.refresh_flush` events, which pass along a list
of attribute names that have been refreshed; the list is compared
against this set to determine if action needs to be taken.
.. versionadded:: 1.0.5
"""
return {attribute.key}
@classmethod
def _listen_on_attribute(cls, attribute, coerce, parent_cls):
"""Establish this type as a mutation listener for the given
mapped descriptor.
"""
key = attribute.key
if parent_cls is not attribute.class_:
return
# rely on "propagate" here
parent_cls = attribute.class_
listen_keys = cls._get_listen_keys(attribute)
def load(state, *args):
"""Listen for objects loaded or refreshed.
Wrap the target data member's value with
``Mutable``.
"""
val = state.dict.get(key, None)
if val is not None:
if coerce:
val = cls.coerce(key, val)
state.dict[key] = val
val._parents[state.obj()] = key
def load_attrs(state, ctx, attrs):
if not attrs or listen_keys.intersection(attrs):
load(state)
def set(target, value, oldvalue, initiator):
"""Listen for set/replace events on the target
data member.
Establish a weak reference to the parent object
on the incoming value, remove it for the one
outgoing.
"""
if value is oldvalue:
return value
if not isinstance(value, cls):
value = cls.coerce(key, value)
if value is not None:
value._parents[target.obj()] = key
if isinstance(oldvalue, cls):
oldvalue._parents.pop(target.obj(), None)
return value
def pickle(state, state_dict):
val = state.dict.get(key, None)
if val is not None:
if 'ext.mutable.values' not in state_dict:
state_dict['ext.mutable.values'] = []
state_dict['ext.mutable.values'].append(val)
def unpickle(state, state_dict):
if 'ext.mutable.values' in state_dict:
for val in state_dict['ext.mutable.values']:
val._parents[state.obj()] = key
event.listen(parent_cls, 'load', load,
raw=True, propagate=True)
event.listen(parent_cls, 'refresh', load_attrs,
raw=True, propagate=True)
event.listen(parent_cls, 'refresh_flush', load_attrs,
raw=True, propagate=True)
event.listen(attribute, 'set', set,
raw=True, retval=True, propagate=True)
event.listen(parent_cls, 'pickle', pickle,
raw=True, propagate=True)
event.listen(parent_cls, 'unpickle', unpickle,
raw=True, propagate=True)
class Mutable(MutableBase):
"""Mixin that defines transparent propagation of change
events to a parent object.
See the example in :ref:`mutable_scalars` for usage information.
"""
def changed(self):
"""Subclasses should call this method whenever change events occur."""
for parent, key in self._parents.items():
flag_modified(parent, key)
@classmethod
def associate_with_attribute(cls, attribute):
"""Establish this type as a mutation listener for the given
mapped descriptor.
"""
cls._listen_on_attribute(attribute, True, attribute.class_)
@classmethod
def associate_with(cls, sqltype):
"""Associate this wrapper with all future mapped columns
of the given type.
This is a convenience method that calls
``associate_with_attribute`` automatically.
.. warning::
The listeners established by this method are *global*
to all mappers, and are *not* garbage collected. Only use
:meth:`.associate_with` for types that are permanent to an
application, not with ad-hoc types else this will cause unbounded
growth in memory usage.
"""
def listen_for_type(mapper, class_):
for prop in mapper.column_attrs:
if isinstance(prop.columns[0].type, sqltype):
cls.associate_with_attribute(getattr(class_, prop.key))
event.listen(mapper, 'mapper_configured', listen_for_type)
@classmethod
def as_mutable(cls, sqltype):
"""Associate a SQL type with this mutable Python type.
This establishes listeners that will detect ORM mappings against
the given type, adding mutation event trackers to those mappings.
The type is returned, unconditionally as an instance, so that
:meth:`.as_mutable` can be used inline::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('data', MyMutableType.as_mutable(PickleType))
)
Note that the returned type is always an instance, even if a class
is given, and that only columns which are declared specifically with
that type instance receive additional instrumentation.
To associate a particular mutable type with all occurrences of a
particular type, use the :meth:`.Mutable.associate_with` classmethod
of the particular :class:`.Mutable` subclass to establish a global
association.
.. warning::
The listeners established by this method are *global*
to all mappers, and are *not* garbage collected. Only use
:meth:`.as_mutable` for types that are permanent to an application,
not with ad-hoc types else this will cause unbounded growth
in memory usage.
"""
sqltype = types.to_instance(sqltype)
# a SchemaType will be copied when the Column is copied,
# and we'll lose our ability to link that type back to the original.
# so track our original type w/ columns
if isinstance(sqltype, SchemaEventTarget):
@event.listens_for(sqltype, "before_parent_attach")
def _add_column_memo(sqltyp, parent):
parent.info['_ext_mutable_orig_type'] = sqltyp
schema_event_check = True
else:
schema_event_check = False
def listen_for_type(mapper, class_):
for prop in mapper.column_attrs:
if (
schema_event_check and
hasattr(prop.expression, 'info') and
prop.expression.info.get('_ext_mutable_orig_type')
is sqltype
) or (
prop.columns[0].type is sqltype
):
cls.associate_with_attribute(getattr(class_, prop.key))
event.listen(mapper, 'mapper_configured', listen_for_type)
return sqltype
class MutableComposite(MutableBase):
"""Mixin that defines transparent propagation of change
events on a SQLAlchemy "composite" object to its
owning parent or parents.
See the example in :ref:`mutable_composites` for usage information.
"""
@classmethod
def _get_listen_keys(cls, attribute):
return {attribute.key}.union(attribute.property._attribute_keys)
def changed(self):
"""Subclasses should call this method whenever change events occur."""
for parent, key in self._parents.items():
prop = object_mapper(parent).get_property(key)
for value, attr_name in zip(
self.__composite_values__(),
prop._attribute_keys):
setattr(parent, attr_name, value)
def _setup_composite_listener():
def _listen_for_type(mapper, class_):
for prop in mapper.iterate_properties:
if (hasattr(prop, 'composite_class') and
isinstance(prop.composite_class, type) and
issubclass(prop.composite_class, MutableComposite)):
prop.composite_class._listen_on_attribute(
getattr(class_, prop.key), False, class_)
if not event.contains(Mapper, "mapper_configured", _listen_for_type):
event.listen(Mapper, 'mapper_configured', _listen_for_type)
_setup_composite_listener()
class MutableDict(Mutable, dict):
"""A dictionary type that implements :class:`.Mutable`.
The :class:`.MutableDict` object implements a dictionary that will
emit change events to the underlying mapping when the contents of
the dictionary are altered, including when values are added or removed.
Note that :class:`.MutableDict` does **not** apply mutable tracking to the
*values themselves* inside the dictionary. Therefore it is not a sufficient
solution for the use case of tracking deep changes to a *recursive*
dictionary structure, such as a JSON structure. To support this use case,
build a subclass of :class:`.MutableDict` that provides appropriate
coersion to the values placed in the dictionary so that they too are
"mutable", and emit events up to their parent structure.
.. versionadded:: 0.8
.. seealso::
:class:`.MutableList`
:class:`.MutableSet`
"""
def __setitem__(self, key, value):
"""Detect dictionary set events and emit change events."""
dict.__setitem__(self, key, value)
self.changed()
def setdefault(self, key, value):
result = dict.setdefault(self, key, value)
self.changed()
return result
def __delitem__(self, key):
"""Detect dictionary del events and emit change events."""
dict.__delitem__(self, key)
self.changed()
def update(self, *a, **kw):
dict.update(self, *a, **kw)
self.changed()
def pop(self, *arg):
result = dict.pop(self, *arg)
self.changed()
return result
def popitem(self):
result = dict.popitem(self)
self.changed()
return result
def clear(self):
dict.clear(self)
self.changed()
@classmethod
def coerce(cls, key, value):
"""Convert plain dictionary to instance of this class."""
if not isinstance(value, cls):
if isinstance(value, dict):
return cls(value)
return Mutable.coerce(key, value)
else:
return value
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
class MutableList(Mutable, list):
"""A list type that implements :class:`.Mutable`.
The :class:`.MutableList` object implements a list that will
emit change events to the underlying mapping when the contents of
the list are altered, including when values are added or removed.
Note that :class:`.MutableList` does **not** apply mutable tracking to the
*values themselves* inside the list. Therefore it is not a sufficient
solution for the use case of tracking deep changes to a *recursive*
mutable structure, such as a JSON structure. To support this use case,
build a subclass of :class:`.MutableList` that provides appropriate
coersion to the values placed in the dictionary so that they too are
"mutable", and emit events up to their parent structure.
.. versionadded:: 1.1
.. seealso::
:class:`.MutableDict`
:class:`.MutableSet`
"""
def __setitem__(self, index, value):
"""Detect list set events and emit change events."""
list.__setitem__(self, index, value)
self.changed()
def __setslice__(self, start, end, value):
"""Detect list set events and emit change events."""
list.__setslice__(self, start, end, value)
self.changed()
def __delitem__(self, index):
"""Detect list del events and emit change events."""
list.__delitem__(self, index)
self.changed()
def __delslice__(self, start, end):
"""Detect list del events and emit change events."""
list.__delslice__(self, start, end)
self.changed()
def pop(self, *arg):
result = list.pop(self, *arg)
self.changed()
return result
def append(self, x):
list.append(self, x)
self.changed()
def extend(self, x):
list.extend(self, x)
self.changed()
def __iadd__(self, x):
self.extend(x)
return self
def insert(self, i, x):
list.insert(self, i, x)
self.changed()
def remove(self, i):
list.remove(self, i)
self.changed()
def clear(self):
list.clear(self)
self.changed()
def sort(self):
list.sort(self)
self.changed()
def reverse(self):
list.reverse(self)
self.changed()
@classmethod
def coerce(cls, index, value):
"""Convert plain list to instance of this class."""
if not isinstance(value, cls):
if isinstance(value, list):
return cls(value)
return Mutable.coerce(index, value)
else:
return value
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class MutableSet(Mutable, set):
"""A set type that implements :class:`.Mutable`.
The :class:`.MutableSet` object implements a set that will
emit change events to the underlying mapping when the contents of
the set are altered, including when values are added or removed.
Note that :class:`.MutableSet` does **not** apply mutable tracking to the
*values themselves* inside the set. Therefore it is not a sufficient
solution for the use case of tracking deep changes to a *recursive*
mutable structure. To support this use case,
build a subclass of :class:`.MutableSet` that provides appropriate
coersion to the values placed in the dictionary so that they too are
"mutable", and emit events up to their parent structure.
.. versionadded:: 1.1
.. seealso::
:class:`.MutableDict`
:class:`.MutableList`
"""
def update(self, *arg):
set.update(self, *arg)
self.changed()
def intersection_update(self, *arg):
set.intersection_update(self, *arg)
self.changed()
def difference_update(self, *arg):
set.difference_update(self, *arg)
self.changed()
def symmetric_difference_update(self, *arg):
set.symmetric_difference_update(self, *arg)
self.changed()
def __ior__(self, other):
self.update(other)
return self
def __iand__(self, other):
self.intersection_update(other)
return self
def __ixor__(self, other):
self.symmetric_difference_update(other)
return self
def __isub__(self, other):
self.difference_update(other)
return self
def add(self, elem):
set.add(self, elem)
self.changed()
def remove(self, elem):
set.remove(self, elem)
self.changed()
def discard(self, elem):
set.discard(self, elem)
self.changed()
def pop(self, *arg):
result = set.pop(self, *arg)
self.changed()
return result
def clear(self):
set.clear(self)
self.changed()
@classmethod
def coerce(cls, index, value):
"""Convert plain set to instance of this class."""
if not isinstance(value, cls):
if isinstance(value, set):
return cls(value)
return Mutable.coerce(index, value)
else:
return value
def __getstate__(self):
return set(self)
def __setstate__(self, state):
self.update(state)
def __reduce_ex__(self, proto):
return (self.__class__, (list(self), ))
|
gpl-3.0
| -1,021,361,795,606,901,500 | 33.265328 | 79 | 0.640043 | false |
JordanP/openstack-snippets
|
ospurge/ospurge/resources/nova.py
|
1
|
1062
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from typing import Any
from typing import Dict
from typing import Iterable
from ospurge.resources import base
class Servers(base.ServiceResource):
ORDER = 15
def list(self) -> Iterable:
return self.cloud.list_servers()
def delete(self, resource: Dict[str, Any]) -> None:
self.cloud.delete_server(resource['id'])
@staticmethod
def to_str(resource: Dict[str, Any]) -> str:
return "VM (id='{}', name='{}')".format(
resource['id'], resource['name'])
|
apache-2.0
| 1,500,419,142,294,285,300 | 33.258065 | 76 | 0.69774 | false |
ericmjl/bokeh
|
examples/models/file/choropleth.py
|
1
|
2189
|
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models import ColorBar, ColumnDataSource, LinearColorMapper, Patches, Plot
from bokeh.palettes import Viridis11
from bokeh.resources import INLINE
from bokeh.sampledata import unemployment, us_counties, us_states
from bokeh.transform import transform
from bokeh.util.browser import view
us_states = us_states.data.copy()
us_counties = us_counties.data
unemployment = unemployment.data
del us_states["HI"]
del us_states["AK"]
state_source = ColumnDataSource(
data=dict(
state_xs=[us_states[code]["lons"] for code in us_states],
state_ys=[us_states[code]["lats"] for code in us_states],
)
)
cmap = LinearColorMapper(palette=Viridis11, low=min(unemployment.values()), high=max(unemployment.values()))
county_source = ColumnDataSource(
data=dict(
county_xs=[us_counties[code]["lons"] for code in us_counties if us_counties[code]["state"] not in ["ak", "hi", "pr", "gu", "vi", "mp", "as"]],
county_ys=[us_counties[code]["lats"] for code in us_counties if us_counties[code]["state"] not in ["ak", "hi", "pr", "gu", "vi", "mp", "as"]],
rate=[unemployment[code] for code in us_counties if us_counties[code]["state"] not in ["ak", "hi", "pr", "gu", "vi", "mp", "as"]],
)
)
plot = Plot(min_border=0, border_fill_color="white", plot_width=1300, plot_height=700)
plot.title.text = "2009 Unemployment Data"
plot.toolbar_location = None
county_patches = Patches(xs="county_xs", ys="county_ys", fill_color=transform("rate", cmap), fill_alpha=0.7, line_color="white", line_width=0.5)
plot.add_glyph(county_source, county_patches)
state_patches = Patches(xs="state_xs", ys="state_ys", fill_alpha=0.0, line_color="#884444", line_width=2)
plot.add_glyph(state_source, state_patches)
cbar = ColorBar(color_mapper=cmap, location=(0, 0))
plot.add_layout(cbar, 'left')
doc = Document()
doc.add_root(plot)
if __name__ == "__main__":
doc.validate()
filename = "choropleth.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Choropleth of all US counties, Unemployment 2009"))
print("Wrote %s" % filename)
view(filename)
|
bsd-3-clause
| 5,742,818,407,330,208,000 | 38.089286 | 150 | 0.688899 | false |
hds-lab/textvisdrg
|
msgvis/apps/datatable/models.py
|
1
|
30970
|
from django.db import models
from django.db.models import Q
from datetime import timedelta
import operator
from msgvis.apps.base.models import MappedValuesQuerySet
from msgvis.apps.corpus import models as corpus_models
from msgvis.apps.groups import models as groups_models
from msgvis.apps.dimensions import registry
from msgvis.apps.corpus import utils
import re
from django.db import connection
MAX_CATEGORICAL_LEVELS = 10
def find_messages(queryset):
"""If the given queryset is actually a :class:`.Dataset` model, get its messages queryset."""
if isinstance(queryset, corpus_models.Dataset):
queryset = queryset.message_set.all()
return queryset
def get_field_name(text):
pattern = re.compile('(?<=__)\w+')
results = pattern.search(text)
if results:
return results.group()
return None
def fetchall(sql):
sql = utils.convert_boolean(sql)
cursor = connection.cursor()
cursor.execute(sql)
desc = cursor.description
return [
row[0]
for row in cursor.fetchall()
]
def fetchall_table(sql):
sql = utils.convert_boolean(sql)
cursor = connection.cursor()
cursor.execute(sql)
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def group_messages_by_dimension_with_raw_query(query, dimension, callback):
queryset = corpus_models.Message.objects.raw(query)
message_id = corpus_models.Message._meta.model_name + "_id" #message_id
fieldname = get_field_name(dimension.field_name)
key = dimension.key
related_mgr = getattr(corpus_models.Message, dimension.key)
if hasattr(related_mgr, "RelatedObjectDoesNotExist"):
related_table = related_mgr.field.rel.to._meta.db_table
related_id = related_mgr.field.rel.to._meta.model._meta.model_name + "_id"
if related_id == "person_id":
related_id = "sender_id"
elif related_id == "messagetype_id":
related_id = "type_id"
final_query = "SELECT B.%s AS `%s`, count(*) AS `value` FROM (%s) AS A, `%s` AS B WHERE A.%s=B.id GROUP BY B.%s ORDER BY `value` DESC" %(fieldname, key, query, related_table, related_id, fieldname)
else:
if hasattr(related_mgr, "field"):
through_table = related_mgr.through._meta.db_table # e.g., corpus_message_hashtags
related_table = related_mgr.field.rel.to._meta.db_table # e.g., corpus_hashtag
related_id = related_mgr.field.rel.to._meta.model._meta.model_name + "_id" # e.g., hashtag_id
elif hasattr(related_mgr, "related"):
through_table = related_mgr.related.field.rel.through._meta.db_table # e.g., enhance_messageword
related_table = related_mgr.related.model._meta.db_table # e.g., enhance_word
related_id = related_mgr.related.model._meta.model_name + "_id" # e.g., word_id
final_query = "SELECT B.%s AS `%s`, count(*) AS `value` FROM (%s) AS A, `%s` AS B, `%s` AS C WHERE A.id=C.%s AND B.id=C.%s GROUP BY B.%s ORDER BY `value` DESC" %(fieldname, key, query, related_table, through_table, message_id, related_id, fieldname)
return callback(final_query)
def group_messages_by_words_with_raw_query(query, callback):
pattern = re.compile(r'T\d+.`text`')
results = pattern.search(query)
if results:
table = results.group()
query = query.replace("`corpus_message`.`id`, `corpus_message`.`dataset_id`, `corpus_message`.`original_id`, `corpus_message`.`type_id`, `corpus_message`.`sender_id`, `corpus_message`.`time`, `corpus_message`.`language_id`, `corpus_message`.`sentiment`, `corpus_message`.`timezone_id`, `corpus_message`.`replied_to_count`, `corpus_message`.`shared_count`, `corpus_message`.`contains_hashtag`, `corpus_message`.`contains_url`, `corpus_message`.`contains_media`, `corpus_message`.`contains_mention`, `corpus_message`.`text`",
"%s AS words, count(*) AS value" %(table))
query += "GROUP BY `words` ORDER BY `value` DESC"
return callback(query)
class DataTable(object):
"""
This class knows how to calculate appropriate visualization data
for a given pair of dimensions.
"""
def __init__(self, primary_dimension, secondary_dimension=None):
"""
Construct a DataTable for one or two dimensions.
Dimensions may be string dimension keys or
:class:`msgvis.apps.dimensions.models.CategoricalDimension` objects.
:type primary_dimension: registry.models.CategoricalDimension
:type secondary_dimension: registry.models.CategoricalDimension
:return:
"""
# Look up the dimensions if needed
if isinstance(primary_dimension, basestring):
primary_dimension = registry.get_dimension(primary_dimension)
if secondary_dimension is not None and isinstance(secondary_dimension, basestring):
secondary_dimension = registry.get_dimension(secondary_dimension)
# a dirty way
if secondary_dimension is not None and hasattr(secondary_dimension, 'key') and secondary_dimension.key == "groups":
secondary_dimension = None
self.primary_dimension = primary_dimension
self.secondary_dimension = secondary_dimension
self.mode = "default"
def set_mode(self, mode):
self.mode = mode
def render(self, queryset, desired_primary_bins=None, desired_secondary_bins=None):
"""
Given a set of messages (already filtered as necessary),
calculate the data table.
Optionally, a number of primary and secondary bins may be given.
The result is a list of dictionaries. Each
dictionary contains a key for each dimension
and a value key for the count.
"""
if not self.secondary_dimension:
# If there is only one dimension, we should be able to fall back
# on that dimension's group_by() implementation.
queryset = self.primary_dimension.group_by(queryset,
grouping_key=self.primary_dimension.key,
bins=desired_primary_bins)
return queryset.annotate(value=models.Count('id'))
else:
# Now it gets nasty...
primary_group = self.primary_dimension.get_grouping_expression(queryset,
bins=desired_primary_bins)
secondary_group = self.secondary_dimension.get_grouping_expression(queryset,
bins=desired_secondary_bins)
if primary_group is None or secondary_group is None:
# There is no data to group
return queryset.values()
queryset, internal_primary_key = self.primary_dimension.select_grouping_expression(
queryset,
primary_group)
queryset, internal_secondary_key = self.secondary_dimension.select_grouping_expression(
queryset,
secondary_group)
# Group the data
queryset = queryset.values(internal_primary_key,
internal_secondary_key)
# Count the messages
queryset = queryset.annotate(value=models.Count('id'))
# We may need to remap some fields
mapping = {}
if internal_primary_key != self.primary_dimension.key:
mapping[internal_primary_key] = self.primary_dimension.key
if internal_secondary_key != self.secondary_dimension.key:
mapping[internal_secondary_key] = self.secondary_dimension.key
if len(mapping) > 0:
return MappedValuesQuerySet.create_from(queryset, mapping)
else:
return queryset
def render_others(self, queryset, domains, primary_flag, secondary_flag, desired_primary_bins=None, desired_secondary_bins=None):
"""
Given a set of messages (already filtered as necessary),
calculate the data table.
Optionally, a number of primary and secondary bins may be given.
The result is a list of dictionaries. Each
dictionary contains a key for each dimension
and a value key for the count.
"""
# check if any of the dimensions is categorical
if not primary_flag and not secondary_flag:
return None
if not self.secondary_dimension and self.primary_dimension.is_categorical() and primary_flag:
# If there is only one dimension, we should be able to fall back
# on that dimension's group_by() implementation.
queryset = queryset.exclude(utils.levels_or(self.primary_dimension.field_name, domains[self.primary_dimension.key]))
domains[self.primary_dimension.key].append(u'Other ' + self.primary_dimension.name)
return [{self.primary_dimension.key: u'Other ' + self.primary_dimension.name, 'value': queryset.count()}]
elif self.secondary_dimension:
# both dimensions are categorical
if self.primary_dimension.is_categorical() and self.secondary_dimension.is_categorical():
original_queryset = queryset
others_results = []
if primary_flag:
domains[self.primary_dimension.key].append(u'Other ' + self.primary_dimension.name)
if secondary_flag:
domains[self.secondary_dimension.key].append(u'Other ' + self.secondary_dimension.name)
# primary others x secondary others
if primary_flag and secondary_flag:
queryset = queryset.exclude(utils.levels_or(self.primary_dimension.field_name, domains[self.primary_dimension.key]))
queryset = queryset.exclude(utils.levels_or(self.secondary_dimension.field_name, domains[self.secondary_dimension.key]))
others_results.append({self.primary_dimension.key: u'Other ' + self.primary_dimension.name,
self.secondary_dimension.key: u'Other ' + self.secondary_dimension.name,
'value': queryset.count()})
# primary top ones x secondary others
if secondary_flag:
queryset = original_queryset
queryset = queryset.filter(utils.levels_or(self.primary_dimension.field_name, domains[self.primary_dimension.key]))
queryset = queryset.exclude(utils.levels_or(self.secondary_dimension.field_name, domains[self.secondary_dimension.key]))
queryset = self.primary_dimension.group_by(queryset,
grouping_key=self.primary_dimension.key)
queryset = queryset.annotate(value=models.Count('id'))
results = list(queryset)
for r in results:
r[self.secondary_dimension.key] = u'Other ' + self.secondary_dimension.name
others_results.extend(results)
# primary others x secondary top ones
if primary_flag:
queryset = original_queryset
queryset = queryset.exclude(utils.levels_or(self.primary_dimension.field_name, domains[self.primary_dimension.key]))
queryset = queryset.filter(utils.levels_or(self.secondary_dimension.field_name, domains[self.secondary_dimension.key]))
queryset = self.secondary_dimension.group_by(queryset,
grouping_key=self.secondary_dimension.key)
queryset = queryset.annotate(value=models.Count('id'))
results = list(queryset)
for r in results:
r[self.primary_dimension.key] = u'Other ' + self.primary_dimension.name
others_results.extend(results)
return others_results
# primary categorical and secondary quantitative
elif self.primary_dimension.is_categorical() and primary_flag and not self.secondary_dimension.is_categorical():
queryset = queryset.exclude(utils.levels_or(self.primary_dimension.field_name, domains[self.primary_dimension.key]))
domains[self.primary_dimension.key].append(u'Other ' + self.primary_dimension.name)
queryset = self.secondary_dimension.group_by(queryset,
grouping_key=self.secondary_dimension.key,
bins=desired_secondary_bins)
queryset = queryset.annotate(value=models.Count('id'))
results = list(queryset)
for r in results:
r[self.primary_dimension.key] = u'Other ' + self.primary_dimension.name
return results
# primary quantitative and secondary categorical
elif not self.primary_dimension.is_categorical() and self.secondary_dimension.is_categorical() and secondary_flag:
queryset = queryset.exclude(utils.levels_or(self.secondary_dimension.field_name, domains[self.secondary_dimension.key]))
domains[self.secondary_dimension.key].append(u'Other ' + self.secondary_dimension.name)
queryset = self.primary_dimension.group_by(queryset,
grouping_key=self.primary_dimension.key,
bins=desired_primary_bins)
queryset = queryset.annotate(value=models.Count('id'))
results = list(queryset)
for r in results:
r[self.secondary_dimension.key] = u'Other ' + self.secondary_dimension.name
return results
def domain(self, dimension, queryset, filter=None, exclude=None, desired_bins=None):
"""Return the sorted levels in this dimension"""
if filter is not None:
queryset = dimension.filter(queryset, **filter)
if exclude is not None:
queryset = dimension.exclude(queryset, **exclude)
domain = dimension.get_domain(queryset, bins=desired_bins)
labels = dimension.get_domain_labels(domain)
return domain, labels
def groups_domain(self, dimension, queryset_all, group_querysets, desired_bins=None):
"""Return the sorted levels in the union of groups in this dimension"""
if dimension.is_related_categorical():
query = ""
for idx, queryset in enumerate(group_querysets):
if idx > 0:
query += " UNION "
query += "(%s)" %(utils.quote(str(queryset.query)))
domain = group_messages_by_dimension_with_raw_query(query, dimension, fetchall)
else:
queryset = queryset_all
domain = dimension.get_domain(queryset, bins=desired_bins)
labels = dimension.get_domain_labels(domain)
return domain, labels
def filter_search_key(self, domain, labels, search_key):
match_domain = []
match_labels = []
for i in range(len(domain)):
level = domain[i]
if level is not None and level.lower().find(search_key.lower()) != -1 :
match_domain.append(level)
if labels is not None:
match_labels.append(labels[i])
return match_domain, match_labels
def generate(self, dataset, filters=None, exclude=None, page_size=100, page=None, search_key=None, groups=None):
"""
Generate a complete data group table response.
This includes 'table', which provides the non-zero
message frequency for each combination of primary and secondary dimension values,
respecting the filters.
It also includes 'domains', which provides, for both
primary and secondary dimensions, the levels of the
dimension irrespective of filters (except on those actual dimensions).
"""
if (groups is None):
queryset = dataset.message_set.all()
# Filter out null time
queryset = queryset.exclude(time__isnull=True)
if dataset.start_time and dataset.end_time:
range = dataset.end_time - dataset.start_time
buffer = timedelta(seconds=range.total_seconds() * 0.1)
queryset = queryset.filter(time__gte=dataset.start_time - buffer,
time__lte=dataset.end_time + buffer)
unfiltered_queryset = queryset
# Filter the data (look for filters on the primary/secondary dimensions at the same time
primary_filter = None
secondary_filter = None
if filters is not None:
for filter in filters:
dimension = filter['dimension']
queryset = dimension.filter(queryset, **filter)
if dimension == self.primary_dimension:
primary_filter = filter
if dimension == self.secondary_dimension:
secondary_filter = filter
primary_exclude = None
secondary_exclude = None
if exclude is not None:
for exclude_filter in exclude:
dimension = exclude_filter['dimension']
queryset = dimension.exclude(queryset, **exclude_filter)
if dimension == self.primary_dimension:
primary_exclude = exclude_filter
if dimension == self.secondary_dimension:
secondary_exclude = exclude_filter
domains = {}
domain_labels = {}
max_page = None
queryset_for_others = None
# flag is true if the dimension is categorical and has more than MAX_CATEGORICAL_LEVELS levels
primary_flag = False
secondary_flag = False
# Include the domains for primary and (secondary) dimensions
domain, labels = self.domain(self.primary_dimension,
unfiltered_queryset,
primary_filter, primary_exclude)
# paging the first dimension, this is for the filter distribution
if primary_filter is None and self.secondary_dimension is None and page is not None:
if search_key is not None:
domain, labels = self.filter_search_key(domain, labels, search_key)
start = (page - 1) * page_size
end = min(start + page_size, len(domain))
max_page = (len(domain) / page_size) + 1
# no level left
if len(domain) == 0 or start > len(domain):
return None
domain = domain[start:end]
if labels is not None:
labels = labels[start:end]
queryset = queryset.filter(utils.levels_or(self.primary_dimension.field_name, domain))
else:
if (self.mode == 'enable_others' or self.mode == 'omit_others') and \
self.primary_dimension.is_categorical() and len(domain) > MAX_CATEGORICAL_LEVELS:
primary_flag = True
domain = domain[:MAX_CATEGORICAL_LEVELS]
queryset_for_others = queryset
queryset = queryset.filter(utils.levels_or(self.primary_dimension.field_name, domain))
if labels is not None:
labels = labels[:MAX_CATEGORICAL_LEVELS]
domains[self.primary_dimension.key] = domain
if labels is not None:
domain_labels[self.primary_dimension.key] = labels
if self.secondary_dimension:
domain, labels = self.domain(self.secondary_dimension,
unfiltered_queryset,
secondary_filter, secondary_exclude)
if (self.mode == 'enable_others' or self.mode == 'omit_others') and \
self.secondary_dimension.is_categorical() and \
len(domain) > MAX_CATEGORICAL_LEVELS:
secondary_flag = True
domain = domain[:MAX_CATEGORICAL_LEVELS]
if queryset_for_others is None:
queryset_for_others = queryset
queryset = queryset.filter(utils.levels_or(self.secondary_dimension.field_name, domain))
if labels is not None:
labels = labels[:MAX_CATEGORICAL_LEVELS]
domains[self.secondary_dimension.key] = domain
if labels is not None:
domain_labels[self.secondary_dimension.key] = labels
# Render a table
table = self.render(queryset)
if self.mode == "enable_others" and queryset_for_others is not None:
# adding others to the results
table_for_others = self.render_others(queryset_for_others, domains, primary_flag, secondary_flag)
table = list(table)
table.extend(table_for_others)
results = {
'table': table,
'domains': domains,
'domain_labels': domain_labels
}
if max_page is not None:
results['max_page'] = max_page
else:
domains = {}
domain_labels = {}
max_page = None
queryset_for_others = None
# flag is true if the dimension is categorical and has more than MAX_CATEGORICAL_LEVELS levels
primary_flag = False
secondary_flag = False
primary_filter = None
secondary_filter = None
primary_exclude = None
secondary_exclude = None
queryset = dataset.message_set.all()
queryset = queryset.exclude(time__isnull=True)
if dataset.start_time and dataset.end_time:
range = dataset.end_time - dataset.start_time
buffer = timedelta(seconds=range.total_seconds() * 0.1)
queryset = queryset.filter(time__gte=dataset.start_time - buffer,
time__lte=dataset.end_time + buffer)
if filters is not None:
for filter in filters:
dimension = filter['dimension']
queryset = dimension.filter(queryset, **filter)
if dimension == self.primary_dimension:
primary_filter = filter
if dimension == self.secondary_dimension:
secondary_filter = filter
if exclude is not None:
for exclude_filter in exclude:
dimension = exclude_filter['dimension']
queryset = dimension.exclude(queryset, **exclude_filter)
if dimension == self.primary_dimension:
primary_exclude = exclude_filter
if dimension == self.secondary_dimension:
secondary_exclude = exclude_filter
queryset_all = queryset
#queryset = corpus_models.Message.objects.none()
group_querysets = []
group_labels = []
#message_list = set()
for group in groups:
group_obj = groups_models.Group.objects.get(id=group)
if group_obj.order > 0:
group_labels.append("#%d %s"%(group_obj.order, group_obj.name))
else:
group_labels.append("%s"%(group_obj.name))
queryset = group_obj.messages
# Filter out null time
queryset = queryset.exclude(time__isnull=True)
if dataset.start_time and dataset.end_time:
range = dataset.end_time - dataset.start_time
buffer = timedelta(seconds=range.total_seconds() * 0.1)
queryset = queryset.filter(time__gte=dataset.start_time - buffer,
time__lte=dataset.end_time + buffer)
unfiltered_queryset = queryset
# Filter the data (look for filters on the primary/secondary dimensions at the same time
if filters is not None:
for filter in filters:
dimension = filter['dimension']
queryset = dimension.filter(queryset, **filter)
if exclude is not None:
for exclude_filter in exclude:
dimension = exclude_filter['dimension']
queryset = dimension.exclude(queryset, **exclude_filter)
group_querysets.append(queryset)
#########################################################################################################################
# deal with union distribution
# This is due to union of queries in django does not work...
# super ugly. Refactoring is required.
# Include the domains for primary and (secondary) dimensions
domain, labels = self.groups_domain(self.primary_dimension,
queryset_all, group_querysets)
# paging the first dimension, this is for the filter distribution
if primary_filter is None and self.secondary_dimension is None and page is not None:
if search_key is not None:
domain, labels = self.filter_search_key(domain, labels, search_key)
start = (page - 1) * page_size
end = min(start + page_size, len(domain))
max_page = (len(domain) / page_size) + 1
# no level left
if len(domain) == 0 or start > len(domain):
return None
domain = domain[start:end]
if labels is not None:
labels = labels[start:end]
else:
if (self.mode == 'enable_others' or self.mode == 'omit_others') and \
self.primary_dimension.is_categorical() and len(domain) > MAX_CATEGORICAL_LEVELS:
primary_flag = True
domain = domain[:MAX_CATEGORICAL_LEVELS]
if labels is not None:
labels = labels[:MAX_CATEGORICAL_LEVELS]
domains[self.primary_dimension.key] = domain
if labels is not None:
domain_labels[self.primary_dimension.key] = labels
if self.secondary_dimension:
domain, labels = self.groups_domain(self.secondary_dimension,
queryset_all, group_querysets)
if (self.mode == 'enable_others' or self.mode == 'omit_others') and \
self.secondary_dimension.is_categorical() and \
len(domain) > MAX_CATEGORICAL_LEVELS:
secondary_flag = True
domain = domain[:MAX_CATEGORICAL_LEVELS]
if labels is not None:
labels = labels[:MAX_CATEGORICAL_LEVELS]
domains[self.secondary_dimension.key] = domain
if labels is not None:
domain_labels[self.secondary_dimension.key] = labels
#########################################################################################################################
group_tables = []
for queryset in group_querysets:
queryset_for_others = queryset
if (self.mode == 'enable_others' or self.mode == 'omit_others') and \
self.primary_dimension.is_categorical():
queryset = queryset.filter(utils.levels_or(self.primary_dimension.field_name, domains[self.primary_dimension.key]))
if self.secondary_dimension:
if (self.mode == 'enable_others' or self.mode == 'omit_others') and \
self.secondary_dimension.is_categorical():
if queryset_for_others is None:
queryset_for_others = queryset
queryset = queryset.filter(utils.levels_or(self.secondary_dimension.field_name, domains[self.secondary_dimension.key]))
# Render a table
if self.primary_dimension.key == "words":
table = group_messages_by_words_with_raw_query(utils.quote(str(queryset.query)), fetchall_table)
else:
table = self.render(queryset)
if self.mode == "enable_others" and queryset_for_others is not None:
# adding others to the results
table_for_others = self.render_others(queryset_for_others, domains, primary_flag, secondary_flag)
table = list(table)
table.extend(table_for_others)
group_tables.append(table)
if self.secondary_dimension is None:
final_table = []
for idx, group_table in enumerate(group_tables):
for item in group_table:
item['groups'] = groups[idx]
final_table.extend(group_table)
domains['groups'] = groups
domain_labels['groups'] = group_labels
results = {
'table': final_table,
'domains': domains,
'domain_labels': domain_labels
}
else:
tables = []
for idx, group_table in enumerate(group_tables):
tables.append({
'group_id': groups[idx],
'group_name': group_labels[idx],
'table': group_table
})
results = {
'tables': tables,
'domains': domains,
'domain_labels': domain_labels
}
if max_page is not None:
results['max_page'] = max_page
return results
|
mit
| -5,398,902,057,194,541,000 | 44.410557 | 531 | 0.559768 | false |
andrefreitas/schwa
|
schwa/extraction/git_extractor.py
|
1
|
8652
|
# Copyright (c) 2015 Faculty of Engineering of the University of Porto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Module for the Git Extractor. """
import multiprocessing
import os
import git
from .abstract_extractor import *
from schwa.repository import *
from schwa.parsing import JavaParser, ParsingError
current_repo = None # Curent repository wrapper
def extract_commit_wrapper(hexsha):
""" Multiprocessing wrapper for extracting a commit"""
return current_repo.extract_commit(hexsha)
class GitExtractor(AbstractExtractor):
""" A Git Extractor.
This class relies on GitPython library to extract data from a local repository.
"""
def __init__(self, path):
super().__init__(path)
self.repo = git.Repo(path, odbt=git.GitCmdObjectDB)
def extract(self, ignore_regex="^$", max_commits=None, method_granularity=False, parallel=True):
""" Extract a repository.
It extracts commits from a repository that are important to the analysis. Therefore, only commits
related to code are important. For the sake of supporting big repositories, it is possible to set
the maximum number of commits.
Args:
ignore_regex: An optional string that is a regex pattern to ignore unnecessary files.
max_commits: An optional int that is the maximum number of commits to extract since the last one.
method_granularity: An optional boolean that enables extraction until the method granularity.
parallel: An optional boolean that enables multiprocessing extraction.
Returns:
A Repository instance.
"""
# Multiprocessing setup
global current_repo
current_repo = self
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError: # pragma: no cover
cpus = 2 # pragma: no cover
self.ignore_regex = ignore_regex
self.method_granularity = method_granularity
# Extract commits
iter_commits = self.repo.iter_commits(max_count=max_commits) if max_commits else self.repo.iter_commits()
commits = [commit.hexsha for commit in iter_commits]
pool = multiprocessing.Pool(processes=cpus)
if parallel and os.name != "nt":
commits = pool.map(extract_commit_wrapper, commits)
else:
commits = map(extract_commit_wrapper, commits)
commits = list(reversed([commit for commit in commits if commit]))
# Timestamps
try:
begin_ts = list(self.repo.iter_commits())[-1].committed_date
last_ts = list(self.repo.iter_commits(max_count=1))[0].committed_date
except TypeError:
raise RepositoryExtractionException("Error extracting repository: cannot parse begin or last timestamps!")
# Repository
repo = Repository(commits, begin_ts, last_ts)
return repo
def extract_commit(self, hexsha):
""" Extract a commit.
Iterates over commits diffs to extract important information such as changed files, classes and methods.
Args:
hexsha: A string representing the commit ID
Returns:
A Commit instance.
"""
commit = self.repo.commit(hexsha)
_id = hexsha
try:
message = commit.message
except (UnicodeDecodeError, TypeError): # pragma: no cover
return None # pragma: no cover
author = commit.author.email
timestamp = commit.committed_date
diffs_list = []
# First commit
if not commit.parents:
for blob in commit.tree.traverse():
if self.is_good_blob(blob):
diffs_list.extend(self.get_new_file_diffs(blob))
else:
for parent in commit.parents:
for diff in parent.diff(commit):
# Shortcut
if not self.is_good_blob(diff.a_blob) and not self.is_good_blob(diff.b_blob):
continue
# New file
if diff.new_file and self.is_good_blob(diff.b_blob):
diffs_list.extend(self.get_new_file_diffs(diff.b_blob))
# Renamed file
elif diff.renamed and self.is_good_blob(diff.a_blob) and self.is_good_blob(diff.b_blob):
diffs_list.extend(self.get_renamed_file_diffs(diff.a_blob, diff.b_blob))
# Deleted file
elif diff.deleted_file:
diffs_list.append(DiffFile(file_a=diff.a_blob.path, removed=True))
# Modified file
else:
diffs_list.extend(self.get_modified_file_diffs(diff.a_blob, diff.b_blob))
return Commit(_id, message, author, timestamp, diffs_list) if len(diffs_list) > 0 else None
def get_new_file_diffs(self, blob):
diffs_list = [DiffFile(file_b=blob.path, added=True)]
if can_parse_file(blob.path) and self.method_granularity:
source = GitExtractor.get_source(blob)
file_parsed = GitExtractor.parse(blob.path, source)
if file_parsed:
classes_set = file_parsed.get_classes_set()
methods_set = file_parsed.get_functions_set()
for c in classes_set:
diffs_list.append(DiffClass(file_name=blob.path, class_b=c, added=True))
for c, m in methods_set:
diffs_list.append(DiffMethod(file_name=blob.path, class_name=c, method_b=m, added=True))
return diffs_list
def get_modified_file_diffs(self, blob_a, blob_b):
diffs_list = [DiffFile(file_a=blob_a.path, file_b=blob_b.path, modified=True)]
try:
if can_parse_file(blob_a.path) and can_parse_file(blob_b.path) and self.method_granularity:
source_a = GitExtractor.get_source(blob_a)
source_b = GitExtractor.get_source(blob_b)
diffs_list.extend(GitExtractor.diff((blob_a.path, source_a), (blob_b.path, source_b)))
except ParsingError:
pass
return diffs_list
def get_renamed_file_diffs(self, blob_a, blob_b):
diffs_list = [DiffFile(file_a=blob_a.path, file_b=blob_b.path, renamed=True)]
try:
if can_parse_file(blob_a.path) and can_parse_file(blob_b.path) and self.method_granularity:
source_a = GitExtractor.get_source(blob_a)
source_b = GitExtractor.get_source(blob_b)
diffs_list.extend(GitExtractor.diff((blob_a.path, source_a), (blob_b.path, source_b)))
except ParsingError:
pass
return diffs_list
def is_good_blob(self, blob):
return blob and is_code_file(blob.path) and not re.search(self.ignore_regex, blob.path)
@staticmethod
def get_source(blob):
try:
stream = blob.data_stream.read()
source = stream.decode("UTF-8")
except AttributeError:
raise ParsingError
return source
@staticmethod
def parse(path, source):
try:
if "java" in path:
components = JavaParser.parse(source)
return components
except ParsingError:
pass
return False
@staticmethod
def diff(file_a, file_b):
try:
if "java" in file_a[0]:
components_diff = JavaParser.diff(file_a, file_b)
return components_diff
except ParsingError:
pass
return []
|
mit
| -4,259,756,716,678,877,000 | 39.816038 | 118 | 0.623555 | false |
annelida/stuff
|
Scrapy/activesport/activesport/settings.py
|
1
|
3027
|
# -*- coding: utf-8 -*-
# Scrapy settings for activesport project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'activesport'
SPIDER_MODULES = ['activesport.spiders']
NEWSPIDER_MODULE = 'activesport.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'activesport (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'activesport.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'activesport.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'activesport.pipelines.ActivesportPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
HTTPCACHE_ENABLED=True
HTTPCACHE_EXPIRATION_SECS=0
HTTPCACHE_DIR='httpcache'
HTTPCACHE_IGNORE_HTTP_CODES=[]
HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
|
mit
| -5,023,893,244,834,036,000 | 34.611765 | 109 | 0.778328 | false |
skosukhin/spack
|
var/spack/repos/builtin/packages/ncdu/package.py
|
1
|
2257
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Ncdu(Package):
"""Ncdu is a disk usage analyzer with an ncurses interface. It is designed
to find space hogs on a remote server where you don't have an entire
gaphical setup available, but it is a useful tool even on regular desktop
systems. Ncdu aims to be fast, simple and easy to use, and should be able
to run in any minimal POSIX-like environment with ncurses installed.
"""
homepage = "http://dev.yorhel.nl/ncdu"
url = "http://dev.yorhel.nl/download/ncdu-1.11.tar.gz"
version('1.11', '9e44240a5356b029f05f0e70a63c4d12')
version('1.10', '7535decc8d54eca811493e82d4bfab2d')
version('1.9', '93258079db897d28bb8890e2db89b1fb')
version('1.8', '94d7a821f8a0d7ba8ef3dd926226f7d5')
version('1.7', '172047c29d232724cc62e773e82e592a')
depends_on("ncurses")
def install(self, spec, prefix):
configure('--prefix=%s' % prefix,
'--with-ncurses=%s' % spec['ncurses'])
make()
make("install")
|
lgpl-2.1
| 6,441,542,120,662,856,000 | 42.403846 | 78 | 0.672574 | false |
bbossola/katas
|
trains/core/biz.py
|
1
|
5195
|
from math import inf
class Context():
def __init__(self, max_len, direct=False, deep=False):
self._max_len = max_len
self._direct = direct
self._deep = deep
self._routes = []
def routes(self):
return self._routes
def max_depth(self):
return self._max_len
def direct(self):
return self._direct
def deep(self):
return self._deep
def store(self, route):
self._routes.append(route)
def __len__(self):
return len(self._routes)
def defaut_context():
return Context(10)
class Filters():
@staticmethod
def all():
return lambda context, route: True
@staticmethod
def max_stops(max_stops):
return lambda context, route: route.stops() < max_stops
@staticmethod
def distance_less_than(max_distance):
return lambda context, route: route.distance() < max_distance
@staticmethod
def compose(*filters):
def check(context, route):
for filtrum in filters:
if filtrum(context, route) == False:
return False
return True
return check
@staticmethod
def valid(station_from, station_to):
def check(context, route):
return route.start() == station_from and route.end() == station_to
return check
@staticmethod
def exact_stops(max_stops):
return lambda context, route: route.stops() == max_stops
class Route():
def __init__(self, steps=[], distance=0):
self._steps = steps
self._distance = distance
def via(self, station_from, station_to):
new_steps = list(self._steps)
new_steps.append(station_to)
new_distance = self._distance + station_from.distance_to(station_to)
return Route(new_steps, new_distance)
def concatenate(self, other_route):
new_steps = list(self._steps)
new_steps.append(other_route._steps)
new_distance = self._distance + other_route._distance
return Route(new_steps, new_distance)
def distance(self):
return self._distance
def stops(self):
return max(0, len(self._steps) - 2)
def shorter_than(self, other):
return self._distance < other.distance()
def start(self):
return self._steps[0] if len(self._steps) > 0 else None
def end(self):
return self._steps[-1] if len(self._steps) > 0 else None
def __str__(self):
text = str(self._distance) + "-"
for step in self._steps:
text = text + str(step)
return text
def __len__(self):
return len(self._steps)
NO_ROUTE = Route([], inf)
class Station():
def __init__(self, iden):
self._id = iden
self._links = {}
def link(self, other, distance):
self._links[other] = distance
# A -> B -> C
def route_to(self, other, context=defaut_context(), filtrum=Filters.all()):
return self._route_to(Route([self]), other, context, filtrum)
def _route_to(self, route, other, context, filtrum):
if self.connected_to(other):
result = route.via(self, other)
if filtrum(context, result):
context.store(result)
if context.deep() == False:
return result
if len(route) < context.max_depth() and context.direct() == False:
result = NO_ROUTE
for station in self._links:
route_try = station._route_to(route.via(self, station), other, context, filtrum)
result = route_try if route_try.shorter_than(result) else result
if filtrum(context, result):
context.store(route)
return result
else:
return NO_ROUTE
def connected_to(self, other):
return other in self._links
def distance_to(self, other):
return self._links[other] if self.connected_to(other) else inf
def id(self):
return self._id
def __str__(self):
return self._id
class Railway():
def __init__(self):
self._size = 0
def newStation(self, name):
station = Station(name)
self._size = self._size + 1
return station
def all_routes(self, filters, *stations):
context = Context(max_len=10, deep=True)
self._route(stations, context, filters)
return context.routes()
def best_route(self, *stations):
context = Context(max_len=self._size)
return self._route(stations, context)
def best_direct_route(self, *stations):
context = Context(max_len=self._size, direct=True)
return self._route(stations, context)
def _route(self, stations, context, user_filter=Filters.all()):
result = Route()
start = None
for station in stations:
if start is None:
start = station
else:
filtrum = Filters.compose(user_filter, Filters.valid(start, station))
result = result.concatenate(start.route_to(station, context, filtrum))
start = station
return result
|
mit
| -6,439,781,061,811,617,000 | 25.370558 | 96 | 0.576323 | false |
rasbt/advent-of-code-2016
|
python_code/aoc_01_02.py
|
1
|
4237
|
import collections
"""
source: http://adventofcode.com/2016/day/1
DESCRIPTION
Santa's sleigh uses a very high-precision clock to guide its movements, and the
clock's oscillator is regulated by stars. Unfortunately, the stars have been
stolen... by the Easter Bunny. To save Christmas, Santa needs you to retrieve
all fifty stars by December 25th.
Collect stars by solving puzzles. Two puzzles will be made available on each
day in the advent calendar; the second puzzle is unlocked when you complete
the first. Each puzzle grants one star. Good luck!
You're airdropped near Easter Bunny Headquarters in a city somewhere. "Near",
unfortunately, is as close as you can get - the instructions on the
Easter Bunny Recruiting Document the Elves intercepted start here,
and nobody had time to work them out further.
The Document indicates that you should start at the given coordinates
(where you just landed) and face North. Then, follow the provided sequence:
either turn left (L) or right (R) 90 degrees, then walk forward the given
number of blocks, ending at a new intersection.
There's no time to follow such ridiculous instructions on foot, though, so you
take a moment and work out the destination. Given that you can only walk on
the street grid of the city, how far is the shortest path to the destination?
For example:
Following R2, L3 leaves you 2 blocks East and 3 blocks North,
or 5 blocks away.
R2, R2, R2 leaves you 2 blocks due South of your starting position,
which is 2 blocks away.
R5, L5, R5, R3 leaves you 12 blocks away.
How many blocks away is Easter Bunny HQ?"""
p_input = """R4, R3, R5, L3, L5, R2, L2, R5, L2, R5, R5, R5, R1, R3, L2, L2,
L1, R5, L3, R1, L2, R1, L3, L5, L1, R3, L4, R2, R4, L3, L1, R4, L4, R3, L5, L3,
R188, R4, L1, R48, L5, R4, R71, R3, L2, R188, L3, R2, L3, R3, L5, L1, R1, L2,
L4, L2, R5, L3, R3, R3, R4, L3, L4, R5, L4, L4, R3, R4, L4, R1, L3, L1, L1, R4,
R1, L4, R1, L1, L3, R2, L2, R2, L1, R5, R3, R4, L5, R2, R5, L5, R1, R2, L1, L3,
R3, R1, R3, L4, R4, L4, L1, R1, L2, L2, L4, R1, L3, R4, L2, R3, L1, L5, R4, R5,
R2, R5, R1, R5, R1, R3, L3, L2, L2, L5, R2, L2, R5, R5, L2, R3, L5, R5, L2, R4,
R2, L1, R3, L5, R3, R2, R5, L1, R3, L2, R2, R1"""
"""
--- Part Two ---
Then, you notice the instructions continue on the back of the
Recruiting Document. Easter Bunny HQ is actually at the first
location you visit twice.
For example, if your instructions are R8, R4, R4, R8, the first
location you visit twice is 4 blocks away, due East.
How many blocks away is the first location you visit twice?
"""
def walk(input_string):
end_position = [0, 0]
all_positions = set()
first_pos_visited_twice = ()
dq = collections.deque('NESW')
curr_direction = dq[0]
input_list = input_string.split(',')
def visit_all():
nonlocal first_pos_visited_twice
if not first_pos_visited_twice:
curr_pos = tuple(end_position)
if curr_pos in all_positions:
first_pos_visited_twice = curr_pos
else:
all_positions.add(curr_pos)
for i in input_list:
i = i.strip()
turn, strides = i[0], int(i[1:])
if turn == 'R':
dq.rotate(-1)
else:
dq.rotate()
curr_direction = dq[0]
for i in range(strides):
if curr_direction == 'N':
end_position[1] += 1
elif curr_direction == 'E':
end_position[0] += 1
elif curr_direction == 'S':
end_position[1] -= 1
else:
end_position[0] -= 1
visit_all()
return end_position, first_pos_visited_twice
def compute_manhattan_dist(end_position):
mdist = abs(0 - end_position[0]) + abs(0 - end_position[1])
return mdist
def test_1():
test_input = "R8, R4, R4, R8"
end_pos, first_pos_visited_twice = walk(test_input)
mdist = compute_manhattan_dist(first_pos_visited_twice)
assert mdist == 4
def quiz_solution_p2():
end_pos, first_pos_visited_twice = walk(p_input)
mdist = compute_manhattan_dist(first_pos_visited_twice)
print('Quiz solution part 2:', mdist)
if __name__ == "__main__":
test_1()
quiz_solution_p2()
|
mit
| 3,319,549,417,505,917,400 | 30.857143 | 79 | 0.641256 | false |
hiteshchoudhary/Airvengers
|
AirvengersGUI.py
|
1
|
7188
|
#!/usr/bin/env python
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
# Thanks for opting for GUI version of AirCrack set of tools. This project is in early stage and require your support. #
# Project is based on Aircrack-ng set of tools and is specially designed to run on KALI LINUX. #
# #
# Designed by : Hitesh Choudhary #
# Home page : www.HiteshChoudhary.com #
# Email : hitesh@hiteshchoudhary.com #
# Based on : www.Aircrack-ng.org #
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
from Canvas import Line
from Tkinter import *
from Tkinter import Frame, PhotoImage, Text, Label, Button
import subprocess
from textwrap import fill
from tkFont import Font
import tkFont
import tkMessageBox
class Feedback:
def __init__(self, master):
#min and max size of window
#master.minsize(width=410, height=700)
#master.maxsize(width=410, height=700)
#end
#title of window
master.title("Airvengers")
#end
#for scrolling the page
#end
#for the style of fonts
self.customFont = tkFont.Font(family="Helvetica", size=15)
self.myfont = tkFont.Font(family="Helvetica", size=12)
self.myfont2 = tkFont.Font(family="Helvetica", size=10)
#end
#header frame
self.frame_header = Frame(master, bg="white")
self.frame_header.pack(fill=BOTH, side=TOP, expand=True)
self.logo = PhotoImage(file = "logoair.gif")
Label(self.frame_header, image = self.logo).grid(row = 0, column = 0, sticky='sw', columnspan=2)
#end
#content frame
self.frame_content = Frame(master, bg="white")
self.frame_content.pack(fill=BOTH, side=TOP, expand=True)
Label(self.frame_content, text = 'Based on:' ,font=self.myfont, wraplength =200, bg="white").grid(row = 0, column = 0, padx = 5, sticky = 'sw')
Label(self.frame_content, text = 'GUI by Hitesh:', font=self.myfont, wraplength =200,padx=0, bg="white").grid(row = 0, column = 1, padx = 5, sticky = 'sw')
Label(self.frame_content, text = 'Aircrack-ng' ,font=self.myfont, wraplength =300, bg="white").grid(row = 1, column = 0, padx = 5, sticky = 'sw')
Label(self.frame_content, text = 'hitesh@hiteshchoudhary.com', font=self.myfont2, wraplength =300,padx=0, bg="white").grid(row = 1, column = 1, padx = 5, sticky = 'sw')
#Label(self.frame_content, text = 'Comments:').grid(row = 2, column = 0, padx = 5, sticky = 'sw')
#self.entry_name = Entry(self.frame_content, width = 24)
#self.entry_email = Entry(self.frame_content, width = 24)
#self.text_comments = Text(self.frame_content, width = 50, height = 10)
#self.entry_name.grid(row = 1, column = 0, padx = 5)
#self.entry_email.grid(row = 1, column = 1, padx = 5)
#self.text_comments.grid(row = 3, column = 0, columnspan = 2, padx = 5)
Button(self.frame_content, text = 'airmon-ng', command =AirmonNg, height=2, width=15, font=self.customFont).grid(row = 4, column = 0, padx = 5, pady = 5)
Button(self.frame_content, text = 'aircrack-ng', command=AircrackNg, height=2, width=15, font=self.customFont).grid(row = 4, column = 1, padx = 5, pady = 5 )
Button(self.frame_content, text = 'airdecap-ng' , command = AirdecapNg, height=2, width=15, font=self.customFont).grid(row = 5, column = 0, padx = 5, pady = 5 )
Button(self.frame_content, text = 'airdecloak-ng', command = AirdecloakNg, height=2, width=15, font=self.customFont).grid(row = 5, column = 1, padx = 5, pady = 5 )
Button(self.frame_content, text = 'airdrop-ng', command = AirdropNg, height=2, width=15, font=self.customFont).grid(row = 6, column = 0, padx = 5, pady = 5 )
Button(self.frame_content, text = 'aireplay-ng', command = AireplayNg, height=2, width=15, font=self.customFont).grid(row = 6, column = 1, padx = 5, pady = 5 )
Button(self.frame_content, text = 'airgraph-ng', command = AirgraphNg, height=2, width=15, font=self.customFont).grid(row = 7, column = 0, padx = 5, pady = 5 )
Button(self.frame_content, text = 'airbase-ng', command = AirbaseNg, height=2, width=15, font=self.customFont).grid(row = 7, column = 1, padx = 5, pady = 5 )
Button(self.frame_content, text = 'airodump-ng', command = AirodumpNg, height=2, width=15, font=self.customFont).grid(row = 8, column = 0, padx = 5, pady = 5 )
Button(self.frame_content, text = 'airolib-ng', command = AirolibNg, height=2, width=15, font=self.customFont).grid(row = 8, column = 1, padx = 5, pady = 5 )
Button(self.frame_content, text = 'airserv-ng ', command = AirservNg, height=2, width=15, font=self.customFont).grid(row = 9, column = 0, padx = 5, pady = 5 )
Button(self.frame_content, text = 'airtun-ng ', command = AirtunNg, height=2, width=15, font=self.customFont).grid(row = 9, column = 1, padx = 5, pady = 5)
def clear(self):
self.entry_name.delete(0, 'end')
self.entry_email.delete(0, 'end')
self.text_comments.delete(1.0, 'end')
def main():
root = Tk()
feedback = Feedback(root)
root.mainloop()
#for open the next page
def callback():
execfile("mygui3.py")
return
def AirmonNg():
subprocess.call(["python","Airmon-ng.py"])
def AirodumpNg():
subprocess.call(["python","Airodump-ng.py"])
def AirbaseNg():
subprocess.call(["python","Airbase-ng.py"])
def AircrackNg():
subprocess.call(["python","Aircrack-ng.py"])
def AirdecapNg():
subprocess.call(["python","Airdecap-ng.py"])
def AirdecloakNg():
subprocess.call(["python","Airdecloak-ng.py"])
def AirdropNg():
subprocess.call(["python","Airdrop-ng.py"])
def AireplayNg():
subprocess.call(["python","Aireplay-ng.py"])
def AirgraphNg():
subprocess.call(["python","Aigraph-ng.py"])
def AirolibNg():
subprocess.call(["python","Airolib-ng.py"])
def AirservNg():
subprocess.call(["python","Airserv-ng.py"])
def AirtunNg():
subprocess.call(["python","Airtun-ng.py"])
# end
# def openFile1(self):
# os.startfile("mygui2.py")
if __name__ == "__main__": main()
|
gpl-2.0
| 3,782,132,015,984,386,600 | 45.374194 | 176 | 0.536032 | false |
awm/dmr5200
|
dmr5200.py
|
1
|
3719
|
# -*- coding: utf-8 -*-
import io
import time
import serial
import select
class Dmr5200(object):
"""
Representation of a connection to a DMR-5200 digital multimeter.
"""
def __init__(self, port, timeout=2):
"""
Opens the serial connection to the meter.
port - The platform dependent serial port string
timeout - The timeout (in seconds) to use for serial read/write operations
"""
self.ser = serial.Serial(port, baudrate=1200, bytesize=serial.SEVENBITS,
parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_TWO, timeout=timeout)
self.sio = io.TextIOWrapper(io.BufferedRWPair(self.ser, self.ser), newline='\r')
def request(self):
"""
Request one reading from the meter. None will be returned if any error
occured when processing the returned data, otherwise a dictionary with
the following fields will be returned:
{
'function': <meter function string>,
'value': <reading value>,
'units': <measurement units string>,
'timestamp': <timestamp of reading reception>,
'raw': <raw serial message string>
}
'function' may be one of "DC", "AC", "RES", "FR", "CAP", "IND", "TEMP",
"LOG", "BUZ", or "DIO"
'value' may be numeric, True/False/None for logic levels, True/False
for continuity, or one of "OPEN"/"SHORT"/"GOOD" for the diode
setting, or None if it should be numeric but the meter registered
an overload condition
'units' is a string describing the measurement units, or None if not
applicable
'timestamp' is an arbitary floating point time value in seconds which
can be used to determine the actual interval between completed
readings
'raw' is the actual string read from the serial port, including the
trailing carriage return character
"""
try:
self.ser.write('\r')
line = self.sio.readline()
except select.error:
return None
if len(line) < 6:
return None
parts = line.split()
result = {
'function': parts[0],
'value': None,
'units': None,
'timestamp': time.time(),
'raw': line
}
if parts[0] in ["DC", "AC", "RES", "FR", "CAP", "IND", "TEMP"]:
try:
result['value'] = float(parts[1])
result['units'] = parts[2]
if parts[0] == "TEMP":
result['units'] = u"°C"
elif parts[0] == "RES":
if parts[2] == "MOHM":
result['units'] = u"MΩ"
elif parts[2] == "OHM":
result['units'] = u"Ω"
except ValueError:
result['value'] = None
except IndexError:
return None
elif parts[0] == "LOG":
try:
result['value'] = {'LOW': False, 'HIGH': True, 'UNDET': None}[parts[1]]
except IndexError:
return None
elif parts[0] == "BUZ":
try:
result['value'] = {'OPEN': False, 'SHORT': True}[parts[1]]
except IndexError:
return None
elif parts[0] == "DIO":
try:
if parts[1] in ["OPEN", "SHORT", "GOOD"]:
result['value'] = parts[1]
else:
return None
except IndexError:
return None
return result
|
bsd-3-clause
| -5,928,775,878,677,707,000 | 36.14 | 88 | 0.506731 | false |
Vauxoo/account-payment
|
res_currency_print_on_check/__init__.py
|
1
|
1030
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Management Solution
# Copyright (C) 2010 - 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import models
|
agpl-3.0
| 7,804,634,020,787,729,000 | 43.782609 | 78 | 0.603883 | false |
JensTimmerman/radical.pilot
|
examples/tutorial/coupled_tasks.py
|
1
|
8504
|
#!/usr/bin/env python
__copyright__ = "Copyright 2013-2014, http://radical.rutgers.edu"
__license__ = "MIT"
import sys
import radical.pilot as rp
""" DESCRIPTION: Tutorial 3: Coupled Tasks
For every task A1 and B1 a C1 is started.
"""
# READ: The RADICAL-Pilot documentation:
# http://radicalpilot.readthedocs.org/en/latest
#
# Try running this example with RADICAL_PILOT_VERBOSE=debug set if
# you want to see what happens behind the scences!
#------------------------------------------------------------------------------
#
def pilot_state_cb (pilot, state):
if not pilot:
return
print "[Callback]: ComputePilot '%s' state: %s." % (pilot.uid, state)
if state == rp.FAILED:
sys.exit (1)
#------------------------------------------------------------------------------
#
def unit_state_cb (unit, state):
if not unit:
return
global CNT
print "[Callback]: unit %s on %s: %s." % (unit.uid, unit.pilot_id, state)
if state == rp.FAILED:
print "stderr: %s" % unit.stderr
sys.exit(2)
#------------------------------------------------------------------------------
#
if __name__ == "__main__":
# we can optionally pass session name to RP
if len(sys.argv) > 1:
session_name = sys.argv[1]
else:
session_name = None
# Create a new session. No need to try/except this: if session creation
# fails, there is not much we can do anyways...
session = rp.Session(name=session_name)
print "session id: %s" % session.uid
# all other pilot code is now tried/excepted. If an exception is caught, we
# can rely on the session object to exist and be valid, and we can thus tear
# the whole RP stack down via a 'session.close()' call in the 'finally'
# clause...
try:
# ----- CHANGE THIS -- CHANGE THIS -- CHANGE THIS -- CHANGE THIS ------
#
# Change the user name below if you are using a remote resource
# and your username on that resource is different from the username
# on your local machine.
#
c = rp.Context('userpass')
#c.user_id = "tutorial_X"
#c.user_pass = "PutYourPasswordHere"
session.add_context(c)
# Add a Pilot Manager. Pilot managers manage one or more ComputePilots.
print "Initializing Pilot Manager ..."
pmgr = rp.PilotManager(session=session)
# Register our callback with the PilotManager. This callback will get
# called every time any of the pilots managed by the PilotManager
# change their state.
pmgr.register_callback(pilot_state_cb)
# ----- CHANGE THIS -- CHANGE THIS -- CHANGE THIS -- CHANGE THIS ------
#
# If you want to run this example on your local machine, you don't have
# to change anything here.
#
# Change the resource below if you want to run on a remote resource.
# You also might have to set the 'project' to your allocation ID if
# your remote resource does compute time accounting.
#
# A list of preconfigured resources can be found at:
# http://radicalpilot.readthedocs.org/en/latest/machconf.html#preconfigured-resources
#
pdesc = rp.ComputePilotDescription ()
pdesc.resource = "local.localhost" # NOTE: This is a "label", not a hostname
pdesc.runtime = 10 # minutes
pdesc.cores = 1
pdesc.cleanup = True
# submit the pilot.
print "Submitting Compute Pilot to Pilot Manager ..."
pilot = pmgr.submit_pilots(pdesc)
# Combine the ComputePilot, the ComputeUnits and a scheduler via
# a UnitManager object.
print "Initializing Unit Manager ..."
umgr = rp.UnitManager (session=session,
scheduler=rp.SCHED_DIRECT_SUBMISSION)
# Register our callback with the UnitManager. This callback will get
# called every time any of the units managed by the UnitManager
# change their state.
umgr.register_callback(unit_state_cb)
# Add the created ComputePilot to the UnitManager.
print "Registering Compute Pilot with Unit Manager ..."
umgr.add_pilots(pilot)
NUMBER_JOBS = 2 # the total number of CUs to chain
# submit A cus to pilot job
cudesc_list_A = []
for idx in range(NUMBER_JOBS):
# -------- BEGIN USER DEFINED CU 1 DESCRIPTION --------- #
cudesc = rp.ComputeUnitDescription()
cudesc.environment = {"CU_LIST": "A", "CU_NO": "%02d" % idx}
cudesc.executable = "/bin/echo"
cudesc.arguments = ['"$CU_LIST CU with id $CU_NO"']
cudesc.cores = 1
# -------- END USER DEFINED CU 1 DESCRIPTION --------- #
cudesc_list_A.append(cudesc)
# Submit the previously created ComputeUnit descriptions to the
# PilotManager. This will trigger the selected scheduler to start
# assigning ComputeUnits to the ComputePilots.
print "Submit Compute Units 'A' to Unit Manager ..."
cu_set_A = umgr.submit_units(cudesc_list_A)
# submit B cus to pilot job
cudesc_list_B = []
for idx in range(NUMBER_JOBS):
# -------- BEGIN USER DEFINED CU 2 DESCRIPTION --------- #
cudesc = rp.ComputeUnitDescription()
cudesc.environment = {"CU_LIST": "B", "CU_NO": "%02d" % idx}
cudesc.executable = "/bin/echo"
cudesc.arguments = ['"$CU_LIST CU with id $CU_NO"']
cudesc.cores = 1
# -------- END USER DEFINED CU 2 DESCRIPTION --------- #
cudesc_list_B.append(cudesc)
# Submit the previously created ComputeUnit descriptions to the
# PilotManager. This will trigger the selected scheduler to start
# assigning ComputeUnits to the ComputePilots.
print "Submit Compute Units 'B' to Unit Manager ..."
cu_set_B = umgr.submit_units(cudesc_list_B)
# ---------------------------------------------------------------------
print "Waiting for 'A' and 'B' CUs to complete..."
umgr.wait_units()
print "Executing 'C' tasks now..."
# ---------------------------------------------------------------------
# submit 'C' tasks to pilot job. each 'C' task takes the output of
# an 'A' and a 'B' task and puts them together.
cudesc_list_C = []
for idx in range(NUMBER_JOBS):
# -------- BEGIN USER DEFINED CU 3 DESCRIPTION --------- #
cudesc = rp.ComputeUnitDescription()
cudesc.environment = {"CU_SET": "C", "CU_NO": "%02d" % idx}
cudesc.executable = "/bin/echo"
cudesc.arguments = ['"$CU_SET CU with id $CU_NO"']
cudesc.cores = 1
# -------- END USER DEFINED CU 3 DESCRIPTION --------- #
cudesc_list_C.append(cudesc)
# Submit the previously created ComputeUnit descriptions to the
# PilotManager. This will trigger the selected scheduler to start
# assigning ComputeUnits to the ComputePilots.
print "Submit Compute Units 'C' to Unit Manager ..."
cu_set_C = umgr.submit_units(cudesc_list_C)
# ---------------------------------------------------------------------
print "Waiting for 'C' CUs to complete..."
umgr.wait_units()
print "All CUs completed successfully!"
except Exception as e:
# Something unexpected happened in the pilot code above
print "caught Exception: %s" % e
raise
except (KeyboardInterrupt, SystemExit) as e:
# the callback called sys.exit(), and we can here catch the
# corresponding KeyboardInterrupt exception for shutdown. We also catch
# SystemExit (which gets raised if the main threads exits for some other
# reason).
print "need to exit now: %s" % e
finally:
# always clean up the session, no matter if we caught an exception or
# not.
print "closing session"
session.close ()
# the above is equivalent to
#
# session.close (cleanup=True, terminate=True)
#
# it will thus both clean out the session's database record, and kill
# all remaining pilots (none in our example).
#-------------------------------------------------------------------------------
|
mit
| 2,146,378,430,429,013,200 | 35.813853 | 93 | 0.559384 | false |
googleapis/python-managed-identities
|
google/cloud/managedidentities/__init__.py
|
1
|
2912
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.managedidentities_v1.services.managed_identities_service.client import (
ManagedIdentitiesServiceClient,
)
from google.cloud.managedidentities_v1.services.managed_identities_service.async_client import (
ManagedIdentitiesServiceAsyncClient,
)
from google.cloud.managedidentities_v1.types.managed_identities_service import (
AttachTrustRequest,
)
from google.cloud.managedidentities_v1.types.managed_identities_service import (
CreateMicrosoftAdDomainRequest,
)
from google.cloud.managedidentities_v1.types.managed_identities_service import (
DeleteDomainRequest,
)
from google.cloud.managedidentities_v1.types.managed_identities_service import (
DetachTrustRequest,
)
from google.cloud.managedidentities_v1.types.managed_identities_service import (
GetDomainRequest,
)
from google.cloud.managedidentities_v1.types.managed_identities_service import (
ListDomainsRequest,
)
from google.cloud.managedidentities_v1.types.managed_identities_service import (
ListDomainsResponse,
)
from google.cloud.managedidentities_v1.types.managed_identities_service import (
OpMetadata,
)
from google.cloud.managedidentities_v1.types.managed_identities_service import (
ReconfigureTrustRequest,
)
from google.cloud.managedidentities_v1.types.managed_identities_service import (
ResetAdminPasswordRequest,
)
from google.cloud.managedidentities_v1.types.managed_identities_service import (
ResetAdminPasswordResponse,
)
from google.cloud.managedidentities_v1.types.managed_identities_service import (
UpdateDomainRequest,
)
from google.cloud.managedidentities_v1.types.managed_identities_service import (
ValidateTrustRequest,
)
from google.cloud.managedidentities_v1.types.resource import Domain
from google.cloud.managedidentities_v1.types.resource import Trust
__all__ = (
"ManagedIdentitiesServiceClient",
"ManagedIdentitiesServiceAsyncClient",
"AttachTrustRequest",
"CreateMicrosoftAdDomainRequest",
"DeleteDomainRequest",
"DetachTrustRequest",
"GetDomainRequest",
"ListDomainsRequest",
"ListDomainsResponse",
"OpMetadata",
"ReconfigureTrustRequest",
"ResetAdminPasswordRequest",
"ResetAdminPasswordResponse",
"UpdateDomainRequest",
"ValidateTrustRequest",
"Domain",
"Trust",
)
|
apache-2.0
| 7,330,119,701,366,065,000 | 33.666667 | 96 | 0.786058 | false |
trolldbois/python-haystack
|
test/haystack/test_argparse_utils.py
|
1
|
1657
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import unittest
import sys
import argparse
from haystack import argparse_utils
class Test(unittest.TestCase):
def test_readable(self):
"""test the readable helper."""
invalid = '/345678ui0d9t921giv9'
self.assertRaises(argparse.ArgumentTypeError, argparse_utils.readable, invalid)
valid = sys.modules[__name__].__file__
self.assertEqual(argparse_utils.readable(valid), valid)
return
def test_writeable(self):
"""test the writeable helper."""
invalid = '/345678ui0d9t921giv9/qwf89/2/4r/ef/23/23g/'
self.assertRaises(argparse.ArgumentTypeError, argparse_utils.writeable, invalid)
valid = sys.modules[__name__].__file__
self.assertEqual(argparse_utils.writeable(valid), valid)
return
def test_int16(self):
"""test the int16 helper."""
invalid = '/345678ui0d9t921giv9'
self.assertRaises(argparse.ArgumentTypeError, argparse_utils.int16, invalid)
invalid = sys.modules[__name__].__file__
self.assertRaises(argparse.ArgumentTypeError, argparse_utils.int16, invalid)
valid = '0x01293'
self.assertEqual(argparse_utils.int16(valid), 0x01293)
return
if __name__ == '__main__':
logging.basicConfig(level=logging.WARNING)
#logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# logging.getLogger('basicmodel').setLevel(level=logging.DEBUG)
# logging.getLogger('model').setLevel(level=logging.DEBUG)
# logging.getLogger('memory_mapping').setLevel(level=logging.INFO)
unittest.main(verbosity=2)
|
gpl-3.0
| 2,497,858,495,602,369,000 | 31.490196 | 88 | 0.675317 | false |
eengl/pytdlpack
|
test/test_create_new_grid.py
|
1
|
2265
|
#!/usr/bin/env python3
# ----------------------------------------------------------------------------------------
# Import Modules
# ----------------------------------------------------------------------------------------
import numpy as np
import setuptools
import sys
platform = setuptools.distutils.util.get_platform()
build_path = './build/lib.'+platform+'-'+str(sys.version_info.major)+'.'+str(sys.version_info.minor)
sys.path.insert(0,build_path)
import pytdlpack
# ----------------------------------------------------------------------------------------
# Create some data
# ----------------------------------------------------------------------------------------
nx = 2345
ny = 1597
date = 2019052900
id = [4210008,10,24,0]
grid_data = np.random.rand(nx,ny)*75.0
grid_data.fill(np.nan)
# ----------------------------------------------------------------------------------------
# Grid Specs: CONUS Lambert-Conformal 2.5km 2345x1597
# ----------------------------------------------------------------------------------------
griddef = pytdlpack.create_grid_definition(proj=3,nx=nx,ny=ny,latll=19.2290,
lonll=233.7234,orientlon=265.,stdlat=25.,meshlength=2.539703)
# ----------------------------------------------------------------------------------------
# Create TDLPACK data record and pack
# ----------------------------------------------------------------------------------------
rec = pytdlpack.TdlpackRecord(date=date,id=id,lead=24,plain="GFS WIND SPEED",
data=grid_data,missing_value=9999.0,grid=griddef)
rec.pack(dec_scale=3)
# ----------------------------------------------------------------------------------------
# Open new sequential file and write the records
# ----------------------------------------------------------------------------------------
f = pytdlpack.open('new_grid.sq',mode='w',format='sequential')
f.write(rec)
f.close()
# ----------------------------------------------------------------------------------------
# Open new random-access file and write the records
# ----------------------------------------------------------------------------------------
fra = pytdlpack.open('new_grid.ra',mode='w',format='random-access',ra_template='large')
fra.write(rec)
fra.close()
|
gpl-3.0
| -6,735,055,586,359,847,000 | 44.3 | 100 | 0.368653 | false |
Jumpscale/jumpscale_portal8
|
apps/gridportal/base/Grid/.macros/page/adminjumpscripts/3_adminjumpscripts.py
|
1
|
1480
|
def main(j, args, params, tags, tasklet):
def _formatdata(jumpscripts):
aaData = list()
for name, jumpscript in jumpscripts.items():
itemdata = ['<a href=adminjumpscript?name=%s>%s</a>' % (name, name)]
for field in ['organization', 'version', 'descr']: #code
itemdata.append(str(jumpscript[field]))
aaData.append(itemdata)
return aaData
cl=j.clients.redis.getGeventRedisClient("localhost", 7770)
if not j.application.config.exists("grid.watchdog.secret") or j.application.config.exists("grid.watchdog.secret") == "":
page = args.page
page.addMessage('* no grid configured for watchdog: hrd:grid.watchdog.secret')
params.result = page
return params
key = "%s:admin:jscripts" % j.application.config.get("grid.watchdog.secret")
scripts = cl.hgetall(key)
jumpscripts = dict([(scripts[i], j.data.serializer.json.loads(scripts[i+1])) for i, _ in enumerate(scripts) if i % 2 == 0])
jscripts = _formatdata(jumpscripts)
page = args.page
modifier = j.portal.tools.html.getPageModifierGridDataTables(page)
fieldnames = ('Name', 'Organization', 'Version', 'Description')
tableid = modifier.addTableFromData(jscripts, fieldnames)
modifier.addSearchOptions('#%s' % tableid)
modifier.addSorting('#%s' % tableid, 0, 'desc')
params.result = page
return params
def match(j, args, params, tags, tasklet):
return True
|
apache-2.0
| 6,871,115,370,447,651,000 | 36.948718 | 127 | 0.651351 | false |
RyanDJLee/pyta
|
nodes/AsyncFunctionDef.py
|
1
|
1078
|
"""
AsyncFunctionDef astroid node
Subclass of FunctionDef astroid node. An async def function definition and used
for async astroid nodes like AsyncFor and AsyncWith.
Attributes:
- name (str)
- The function's name.
- args (Arguments)
- An arguments node. See Arguments.py for more details.
- doc (str)
- The docstring of the function.
- body (List[Node])
- The list of nodes inside the function.
- decorators (Decorator)
- The decorator to be applied on this function.
- returns (None)
- The return annotation.
Example:
- name -> animal
- args -> arguments(args=[], vararg=None, kwonlyargs=
[arg(arg='arg')],kw_defaults=[], kwarg=None, defaults=[])
- doc -> "This is function animal."
- body -> [Assign(dog, "an animal")]
- decorators -> @wrapper
- returns -> return dog
"""
@wrapper
async def animal(arg):
"""
This is function animal.
"""
dog = "an animal"
return dog
|
gpl-3.0
| -8,257,557,885,051,870,000 | 28.135135 | 79 | 0.573284 | false |
mjpost/sacreBLEU
|
sacrebleu/sacrebleu.py
|
1
|
17856
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
SacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores.
Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text.
It also knows all the standard test sets and handles downloading, processing, and tokenization for you.
See the [README.md] file for more information.
"""
import io
import sys
import logging
import pathlib
import argparse
# Allows calling the script as a standalone utility
# See: https://github.com/mjpost/sacrebleu/issues/86
if __package__ is None and __name__ == '__main__':
parent = pathlib.Path(__file__).absolute().parents[1]
sys.path.insert(0, str(parent))
__package__ = 'sacrebleu'
from .tokenizers import TOKENIZERS, DEFAULT_TOKENIZER
from .dataset import DATASETS, DOMAINS, COUNTRIES, SUBSETS
from .metrics import METRICS
from .utils import smart_open, filter_subset, get_available_origlangs, SACREBLEU_DIR
from .utils import get_langpairs_for_testset, get_available_testsets
from .utils import print_test_set, get_reference_files, download_test_set
from . import __version__ as VERSION
sacrelogger = logging.getLogger('sacrebleu')
try:
# SIGPIPE is not available on Windows machines, throwing an exception.
from signal import SIGPIPE
# If SIGPIPE is available, change behaviour to default instead of ignore.
from signal import signal, SIG_DFL
signal(SIGPIPE, SIG_DFL)
except ImportError:
sacrelogger.warning('Could not import signal.SIGPIPE (this is expected on Windows machines)')
def parse_args():
arg_parser = argparse.ArgumentParser(
description='sacreBLEU: Hassle-free computation of shareable BLEU scores.\n'
'Quick usage: score your detokenized output against WMT\'14 EN-DE:\n'
' cat output.detok.de | sacrebleu -t wmt14 -l en-de',
formatter_class=argparse.RawDescriptionHelpFormatter)
arg_parser.add_argument('--citation', '--cite', default=False, action='store_true',
help='dump the bibtex citation and quit.')
arg_parser.add_argument('--list', default=False, action='store_true',
help='print a list of all available test sets.')
arg_parser.add_argument('--test-set', '-t', type=str, default=None,
help='the test set to use (see also --list) or a comma-separated list of test sets to be concatenated')
arg_parser.add_argument('--language-pair', '-l', dest='langpair', default=None,
help='source-target language pair (2-char ISO639-1 codes)')
arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=None,
help='use a subset of sentences with a given original language (2-char ISO639-1 codes), "non-" prefix means negation')
arg_parser.add_argument('--subset', dest='subset', default=None,
help='use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)')
arg_parser.add_argument('--download', type=str, default=None,
help='download a test set and quit')
arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=str, default=None,
help='output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit')
# I/O related arguments
arg_parser.add_argument('--input', '-i', type=str, default='-',
help='Read input from a file instead of STDIN')
arg_parser.add_argument('refs', nargs='*', default=[],
help='optional list of references (for backwards-compatibility with older scripts)')
arg_parser.add_argument('--num-refs', '-nr', type=int, default=1,
help='Split the reference stream on tabs, and expect this many references. Default: %(default)s.')
arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',
help='open text files with specified encoding (default: %(default)s)')
# Metric selection
arg_parser.add_argument('--metrics', '-m', choices=METRICS.keys(), nargs='+', default=['bleu'],
help='metrics to compute (default: bleu)')
arg_parser.add_argument('--sentence-level', '-sl', action='store_true', help='Output metric on each sentence.')
# BLEU-related arguments
arg_parser.add_argument('-lc', action='store_true', default=False, help='Use case-insensitive BLEU (default: False)')
arg_parser.add_argument('--smooth-method', '-s', choices=METRICS['bleu'].SMOOTH_DEFAULTS.keys(), default='exp',
help='smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none')
arg_parser.add_argument('--smooth-value', '-sv', type=float, default=None,
help='The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'.format(
METRICS['bleu'].SMOOTH_DEFAULTS['floor'], METRICS['bleu'].SMOOTH_DEFAULTS['add-k']))
arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(), default=None,
help='Tokenization method to use for BLEU. If not provided, defaults to `zh` for Chinese, `mecab` for Japanese and `mteval-v13a` otherwise.')
arg_parser.add_argument('--force', default=False, action='store_true',
help='insist that your tokenized input is actually detokenized')
# ChrF-related arguments
arg_parser.add_argument('--chrf-order', type=int, default=METRICS['chrf'].ORDER,
help='chrf character order (default: %(default)s)')
arg_parser.add_argument('--chrf-beta', type=int, default=METRICS['chrf'].BETA,
help='chrf BETA parameter (default: %(default)s)')
arg_parser.add_argument('--chrf-whitespace', action='store_true', default=False,
help='include whitespace in chrF calculation (default: %(default)s)')
# Reporting related arguments
arg_parser.add_argument('--quiet', '-q', default=False, action='store_true',
help='suppress informative output')
arg_parser.add_argument('--short', default=False, action='store_true',
help='produce a shorter (less human readable) signature')
arg_parser.add_argument('--score-only', '-b', default=False, action='store_true',
help='output only the BLEU score')
arg_parser.add_argument('--width', '-w', type=int, default=1,
help='floating point width (default: %(default)s)')
arg_parser.add_argument('--detail', '-d', default=False, action='store_true',
help='print extra information (split test sets based on origlang)')
arg_parser.add_argument('-V', '--version', action='version',
version='%(prog)s {}'.format(VERSION))
args = arg_parser.parse_args()
return args
def main():
args = parse_args()
# Explicitly set the encoding
sys.stdin = open(sys.stdin.fileno(), mode='r', encoding='utf-8', buffering=True, newline="\n")
sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf-8', buffering=True)
if not args.quiet:
logging.basicConfig(level=logging.INFO, format='sacreBLEU: %(message)s')
if args.download:
download_test_set(args.download, args.langpair)
sys.exit(0)
if args.list:
if args.test_set:
print(' '.join(get_langpairs_for_testset(args.test_set)))
else:
print('The available test sets are:')
for testset in get_available_testsets():
print('%30s: %s' % (testset, DATASETS[testset].get('description', '').strip()))
sys.exit(0)
if args.sentence_level and len(args.metrics) > 1:
sacrelogger.error('Only one metric can be used with Sentence-level reporting.')
sys.exit(1)
if args.citation:
if not args.test_set:
sacrelogger.error('I need a test set (-t).')
sys.exit(1)
for test_set in args.test_set.split(','):
if 'citation' not in DATASETS[test_set]:
sacrelogger.error('No citation found for %s', test_set)
else:
print(DATASETS[test_set]['citation'])
sys.exit(0)
if args.num_refs != 1 and (args.test_set is not None or len(args.refs) > 1):
sacrelogger.error('The --num-refs argument allows you to provide any number of tab-delimited references in a single file.')
sacrelogger.error('You can only use it with externaly-provided references, however (i.e., not with `-t`),')
sacrelogger.error('and you cannot then provide multiple reference files.')
sys.exit(1)
if args.test_set is not None:
for test_set in args.test_set.split(','):
if test_set not in DATASETS:
sacrelogger.error('Unknown test set "%s"', test_set)
sacrelogger.error('Please run with --list to see the available test sets.')
sys.exit(1)
if args.test_set is None:
if len(args.refs) == 0:
sacrelogger.error('I need either a predefined test set (-t) or a list of references')
sacrelogger.error(get_available_testsets())
sys.exit(1)
elif len(args.refs) > 0:
sacrelogger.error('I need exactly one of (a) a predefined test set (-t) or (b) a list of references')
sys.exit(1)
elif args.langpair is None:
sacrelogger.error('I need a language pair (-l).')
sys.exit(1)
else:
for test_set in args.test_set.split(','):
langpairs = get_langpairs_for_testset(test_set)
if args.langpair not in langpairs:
sacrelogger.error('No such language pair "%s"', args.langpair)
sacrelogger.error('Available language pairs for test set "%s": %s', test_set,
', '.join(langpairs))
sys.exit(1)
if args.echo:
if args.langpair is None or args.test_set is None:
sacrelogger.warning("--echo requires a test set (--t) and a language pair (-l)")
sys.exit(1)
for test_set in args.test_set.split(','):
print_test_set(test_set, args.langpair, args.echo, args.origlang, args.subset)
sys.exit(0)
if args.test_set is not None and args.tokenize == 'none':
sacrelogger.warning("You are turning off sacrebleu's internal tokenization ('--tokenize none'), presumably to supply\n"
"your own reference tokenization. Published numbers will not be comparable with other papers.\n")
if 'ter' in args.metrics and args.tokenize is not None:
logging.warning("Your setting of --tokenize will be ignored when "
"computing TER")
# Internal tokenizer settings
if args.tokenize is None:
# set default
if args.langpair is not None and args.langpair.split('-')[1] == 'zh':
args.tokenize = 'zh'
elif args.langpair is not None and args.langpair.split('-')[1] == 'ja':
args.tokenize = 'ja-mecab'
else:
args.tokenize = DEFAULT_TOKENIZER
if args.langpair is not None and 'bleu' in args.metrics:
if args.langpair.split('-')[1] == 'zh' and args.tokenize != 'zh':
sacrelogger.warning('You should also pass "--tok zh" when scoring Chinese...')
if args.langpair.split('-')[1] == 'ja' and not args.tokenize.startswith('ja-'):
sacrelogger.warning('You should also pass "--tok ja-mecab" when scoring Japanese...')
# concat_ref_files is a list of list of reference filenames, for example:
# concat_ref_files = [[testset1_refA, testset1_refB], [testset2_refA, testset2_refB]]
if args.test_set is None:
concat_ref_files = [args.refs]
else:
concat_ref_files = []
for test_set in args.test_set.split(','):
ref_files = get_reference_files(test_set, args.langpair)
if len(ref_files) == 0:
sacrelogger.warning('No references found for test set {}/{}.'.format(test_set, args.langpair))
concat_ref_files.append(ref_files)
# Read references
full_refs = [[] for x in range(max(len(concat_ref_files[0]), args.num_refs))]
for ref_files in concat_ref_files:
for refno, ref_file in enumerate(ref_files):
for lineno, line in enumerate(smart_open(ref_file, encoding=args.encoding), 1):
if args.num_refs != 1:
splits = line.rstrip().split(sep='\t', maxsplit=args.num_refs-1)
if len(splits) != args.num_refs:
sacrelogger.error('FATAL: line {}: expected {} fields, but found {}.'.format(lineno, args.num_refs, len(splits)))
sys.exit(17)
for refno, split in enumerate(splits):
full_refs[refno].append(split)
else:
full_refs[refno].append(line)
# Decide on the number of final references, override the argument
args.num_refs = len(full_refs)
# Read hypotheses stream
if args.input == '-':
inputfh = io.TextIOWrapper(sys.stdin.buffer, encoding=args.encoding)
else:
inputfh = smart_open(args.input, encoding=args.encoding)
full_system = inputfh.readlines()
# Filter sentences according to a given origlang
system, *refs = filter_subset(
[full_system, *full_refs], args.test_set, args.langpair, args.origlang, args.subset)
if len(system) == 0:
message = 'Test set %s contains no sentence' % args.test_set
if args.origlang is not None or args.subset is not None:
message += ' with'
message += '' if args.origlang is None else ' origlang=' + args.origlang
message += '' if args.subset is None else ' subset=' + args.subset
sacrelogger.error(message)
sys.exit(1)
# Create metric inventory, let each metric consume relevant args from argparse
metrics = [METRICS[met](args) for met in args.metrics]
# Handle sentence level and quit
if args.sentence_level:
# one metric in use for sentence-level
metric = metrics[0]
for output, *references in zip(system, *refs):
score = metric.sentence_score(output, references)
print(score.format(args.width, args.score_only, metric.signature))
sys.exit(0)
# Else, handle system level
for metric in metrics:
try:
score = metric.corpus_score(system, refs)
except EOFError:
sacrelogger.error('The input and reference stream(s) were of different lengths.')
if args.test_set is not None:
sacrelogger.error('\nThis could be a problem with your system output or with sacreBLEU\'s reference database.\n'
'If the latter, you can clean out the references cache by typing:\n'
'\n'
' rm -r %s/%s\n'
'\n'
'They will be downloaded automatically again the next time you run sacreBLEU.', SACREBLEU_DIR,
args.test_set)
sys.exit(1)
else:
print(score.format(args.width, args.score_only, metric.signature))
if args.detail:
width = args.width
sents_digits = len(str(len(full_system)))
origlangs = args.origlang if args.origlang else get_available_origlangs(args.test_set, args.langpair)
for origlang in origlangs:
subsets = [None]
if args.subset is not None:
subsets += [args.subset]
elif all(t in SUBSETS for t in args.test_set.split(',')):
subsets += COUNTRIES + DOMAINS
for subset in subsets:
system, *refs = filter_subset([full_system, *full_refs], args.test_set, args.langpair, origlang, subset)
if len(system) == 0:
continue
if subset in COUNTRIES:
subset_str = '%20s' % ('country=' + subset)
elif subset in DOMAINS:
subset_str = '%20s' % ('domain=' + subset)
else:
subset_str = '%20s' % ''
for metric in metrics:
# FIXME: handle this in metrics
if metric.name == 'bleu':
_refs = refs
elif metric.name == 'chrf':
_refs = refs[0]
score = metric.corpus_score(system, _refs)
print('origlang={} {}: sentences={:{}} {}={:{}.{}f}'.format(
origlang, subset_str, len(system), sents_digits,
score.prefix, score.score, width+4, width))
if __name__ == '__main__':
main()
|
apache-2.0
| -2,981,217,303,946,219,000 | 49.157303 | 169 | 0.602431 | false |
KevinKazama/game
|
jeutennis/management/commands/tournoi2.py
|
1
|
12445
|
from django.core.management.base import BaseCommand, CommandError
from jeutennis.models import table_joueurs, table_match, table_tournoi
from django.utils import timezone
import datetime
import random
import time
from collections import OrderedDict
list_tournoi = []
id_tournoi = []
list_part = []
domicile = []
exterieur = []
date_time = datetime.datetime.now()
class Command(BaseCommand):
help = 'Gestion tournoi'
def handle(self, *args, **options):
if len(args) == 0:
print 'no args'
try:
req_part = table_tournoi.objects.filter(date_tournoi__gte=date_time).order_by('-date_tournoi')[:3]
#print("tournoi "+req_part.nom)
for s in req_part:
print(s.nom)
list_tournoi.append(s.nom)
id_tournoi.append(s.id)
fichier = open("/kevin/python/Projets/tennis/jeu/jeutennis/tournois/"+str(s.nom)+".txt","r")
line = fichier.read().splitlines()
#print(line)
for x in line:
#print(x)
list_part.append(x)
i = 0
while i < len(list_part):
if i % 2 == 0:
domicile.append(list_part[i])
else:
exterieur.append(list_part[i])
i += 1
j = 0
#print(domicile)
while j < len(domicile):
#print(str(domicile[j])+' vs '+str(exterieur[j]))
joueur1 = table_joueurs.objects.get(id = domicile[j])
joueur2 = table_joueurs.objects.get(id = exterieur[j])
j1 = str(joueur1.prenom+" "+joueur1.nom)
j2 = str(joueur2.prenom+" "+joueur2.nom)
#print(j1+" vs "+j2)
"""
while j < 1:
joueur1 = table_joueurs.objects.get(id = 1)
joueur2 = table_joueurs.objects.get(id = 11)
j1 = str(joueur1.prenom+" "+joueur1.nom)
j2 = str(joueur2.prenom+" "+joueur2.nom)
"""
#Jeux
nb1 = 0
nb2 = 0
#Sets
sets = 0
set1 = 0
set2 = 0
sj1 = []
sj2 = []
s1j1 = []
s2j1 = []
s3j1 = []
s4j1 = []
s5j1 = []
s1j2 = []
s2j2 = []
s3j2 = []
s4j2 = []
s5j2 = []
#Scores
res1 = []
res2 = []
#Tour de jeu
tour = 0
#Caracteristiques
serv1 = joueur1.service
serv2 = joueur2.service
ret1 = joueur1.retour
ret2 = joueur2.retour
end1 = joueur1.endurance
end2 = joueur2.endurance
con1 = joueur1.concentration
con2 = joueur2.concentration
diff = 0
comm = []
message = []
nbtour = []
comptset = 0
while (set1 < 3) and (set2 < 3):
nb1 = 0
nb2 = 0
#Boucle sets
while (nb1 < 6) and (nb2 < 6):
tour += 1
#print(tour)
nbtour.append(tour)
if tour % 2 == 0:
diff = serv1 - ret2
else:
diff = ret1 - serv2
alea = int(random.randrange(0,100))
if alea < 50+diff:
nb1 += 1
else:
nb2 += 1
#Baisse des stats endurance
if serv1 < 1:
serv1 = 0
else:
serv1 = serv1 - end2/100
if ret1 < 1:
ret1 = 0
else:
ret1 = ret1 - end2/100
if con1:
con1 = 0
else:
con1 = con1 - end2/100
if serv2 < 1:
serv2 = 0
else:
serv2 = serv2 - end1/100
if ret2 < 1:
ret2 = 0
else:
ret2 = ret2 - end1/100
if con2 < 1:
con2 = 0
else:
con2 = con2 - end1/100
sj1.append(str(nb1))
sj2.append(str(nb2))
#Tie-Break
if nb1 + nb2 == 11:
while ((nb1 < 7) and (nb2 < 7)) and (abs(nb1-nb2) != 2):
tour += 1
nbtour.append(tour)
if tour % 2 == 0:
diff = serv1 + con1 - ret2 - con2
else:
diff = ret1 + con1 - ret2 - con2
alea = int(random.randrange(100))
if alea < 50+diff:
nb1 += 1
else:
nb2 += 1
#Baisse stats
if serv1 < 1:
serv1 = 0
else:
serv1 = serv1 - end2/100
if ret1 < 1:
ret1 = 0
else:
ret1 = ret1 - end2/100
if con1 < 1:
con1 = 0
else:
con1 = con1 - end2/100
if serv2 < 1:
serv2 = 0
else:
serv2 = serv2 - end1/100
if ret2 < 1:
ret2 = 0
else:
ret2 = ret2 - end1/100
if con2 < 1:
con2 = 0
else:
con2 = con2 - end1/100
rendu = j1+" : "+str(nb1)+" | "+j2+" : "+str(nb2)
sj1.append(str(nb1))
sj2.append(str(nb2))
comm.append(rendu)
#Ajout scores
comm.append("")
res1.append(nb1)
res2.append(nb2)
#Nb sets
sets += 1
#Add game number in set list
if sets == 1:
for x in sj1:
s1j1.append(x)
for x in sj2:
s1j2.append(x)
elif sets == 2:
for x in sj1:
s2j1.append(x)
for x in sj2:
s2j2.append(x)
elif sets == 3:
for x in sj1:
s3j1.append(x)
for x in sj2:
s3j2.append(x)
elif sets == 4:
for x in sj1:
s4j1.append(x)
for x in sj2:
s4j2.append(x)
elif sets == 5:
for x in sj1:
s5j1.append(x)
for x in sj2:
s5j2.append(x)
while comptset < len(sj1):
sj1[comptset] = "."
comptset += 1
comptset = 0
while comptset < len(sj2):
sj2[comptset] = "."
comptset += 1
comptset = 0
if nb1 > nb2:
set1 += 1
#print(j1+" gagne le set "+str(set1+set2)+" : "+str(nb1)+"/"+str(nb2))
mess = j1+" gagne le set "+str(set1+set2)+" : "+str(nb1)+"/"+str(nb2)
comm.append(mess)
else:
set2 += 1
#print(j2+" gagne le set "+str(set1+set2)+" : "+str(nb2)+"/"+str(nb1))
mess = j2+" gagne le set "+str(set1+set2)+" : "+str(nb2)+"/"+str(nb1)
comm.append(mess)
nset = len(res1)
#print('nset = '+str(nset))
i = 0
win = []
win2 = []
while i < nset:
win.append(str(res1[i])+"/"+str(res2[i]))
win2.append(str(res2[i])+"/"+str(res1[i]))
i += 1
strwin = ' - '.join(win)
strwin2 = ' - '.join(win2)
if set1 > set2:
context2 = j1+" gagne "+strwin+" !"
joueur1.victoire += 1
joueur2.defaite += 1
joueur1.points += 60
joueur2.points -= 50
winner = 1
else:
context2 = j2+" gagne "+strwin2+" !"
joueur1.defaite += 1
joueur2.victoire += 1
joueur1.points -= 60
joueur2.points += 50
winner = 2
joueur1.save()
joueur2.save()
res = []
tour = len(nbtour) + 1
score = len(nbtour) + 2
"""
context = RequestContext(request, {
'j1' : j1,
'j2' : j2,
'res1' : res1,
'res2' : res2,
'set1' : set1,
'set2' : set2,
'comm' : comm,
'message' : message,
'context2' : context2,
'tour' : tour,
'score' : score,
's1j1' : s1j1,
's1j2' : s1j2,
's2j1' : s2j1,
's2j2' : s2j2,
's3j1' : s3j1,
's3j2' : s3j2,
's4j1' : s4j1,
's4j2' : s4j2,
's5j1' : s5j1,
's5j2' : s5j2,
'nbtour' : nbtour,
})
"""
g1j1 = res1[0]
g2j1 = res1[1]
g3j1 = res1[2]
try:
g4j1 = res1[3]
except IndexError:
g4j1 = None
try:
g5j1 = res1[4]
except IndexError:
g5j1 = None
g1j2 = res2[0]
g2j2 = res2[1]
g3j2 = res2[2]
try:
g4j2 = res2[3]
except IndexError:
g4j2 = None
try:
g5j2 = res2[4]
except IndexError:
g5j2 = None
if g4j1 == None:
print(j1+" vs "+j2+" : "+str(g1j1)+"/"+str(g1j2)+" - "+str(g2j1)+"/"+str(g2j2)+" - "+str(g3j1)+"/"+str(g3j2))
elif g5j1 == None:
print(j1+" vs "+j2+" : "+str(g1j1)+"/"+str(g1j2)+" - "+str(g2j1)+"/"+str(g2j2)+" - "+str(g3j1)+"/"+str(g3j2)+" - "+str(g4j1)+"/"+str(g4j2))
else:
print(j1+" vs "+j2+" : "+str(g1j1)+"/"+str(g1j2)+" - "+str(g2j1)+"/"+str(g2j2)+" - "+str(g3j1)+"/"+str(g3j2)+" - "+str(g4j1)+"/"+str(g4j2)+" - "+str(g5j1)+"/"+str(g5j2))
add = table_match.objects.create(s1j1 = g1j1, s2j1 = g2j1, s3j1 = g3j1, s4j1 = g4j1, s5j1 = g5j1, s1j2 = g1j2, s2j2 = g2j2, s3j2 = g3j2, s4j2 = g4j2, s5j2 = g5j2, date_match = date_time, j1_id = joueur1.id, j2_id = joueur2.id, winner = winner, idtournoi_id = s.id)
add.save()
#Incremente j
j += 1
except:
print("Pas de tournoi")
# def handle_noargs(self, **options):
# raise NotImplementedError('subclasses of NoArgsCommand must provide a handle_noargs() method')
# def handle_base(self, **options):
# raise NotImplementedError('subclasses of BaseCommand must provide a handle() method')
|
gpl-3.0
| 2,856,110,380,647,489,500 | 34.355114 | 266 | 0.346083 | false |
fireeye/flare-wmi
|
python-cim/samples/dump_keys.py
|
1
|
1204
|
import logging
from cim.common import LoggingObject
from cim import CIM
from cim import is_index_page_number_valid
logging.basicConfig(level=logging.DEBUG)
g_logger = logging.getLogger("cim.printer")
class Printer(LoggingObject):
def __init__(self, cim):
super(Printer, self).__init__()
self._cim = cim
def _printPageRec(self, page):
for i in range(page.key_count):
key = page.get_key(i)
print(key.human_format)
keyCount = page.key_count
for i in range(keyCount + 1):
childIndex = page.get_child(i)
if not is_index_page_number_valid(childIndex):
continue
i = self._cim.logical_index_store
self._printPageRec(i.get_page(childIndex))
def printKeys(self):
i = self._cim.logical_index_store
self._printPageRec(i.root_page)
def main(type_, path, pageNum=None):
if type_ not in ("xp", "win7"):
raise RuntimeError("Invalid mapping type: {:s}".format(type_))
c = CIM(type_, path)
p = Printer(c)
p.printKeys()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
import sys
main(*sys.argv[1:])
|
apache-2.0
| -2,232,003,725,080,394,800 | 24.617021 | 70 | 0.606312 | false |
CO600GOL/Game_of_life
|
GameEngine/game/data_structures/grid.py
|
1
|
1632
|
"""
This module contains the logic representing the grid on which a game is played. A grid, in this sense, is simply a
collection cells set into rows and columns. The cells can, for the purposes of the project, only be square.
"""
from game.data_structures.cell import Cell
def create_empty_grid():
"""
This function creates an empty ten-by-ten grid for use in initialisation of a grid object.
@return The collection of cells to use in a grid.
"""
cells = []
for x in range(0, 10):
cells.append([])
for _y in range(0, 10):
cells[x].append(Cell())
# Cells is a 2-dimensional array
return cells
class Grid(object):
"""
This class represents a grid board on which a game can be played. The grid contains a number of cells that have
one state at any possible point.
"""
def __init__(self, cell_pattern=create_empty_grid()):
"""
Ctor - Initialises the grid, with a two-dimensional array of cells.
@param cell_pattern If the a cell pattern is input as a parameter, it is that cell pattern that is set. If not
all the cells are set to dead.
"""
self.set_cells(cell_pattern)
def get_cells(self):
"""
This pattern retrieves the cells contained within this grid.
@return The grid cells.
"""
return self._cells
def set_cells(self, cells):
"""
This method sets the cells inside the grid to the given configuration.
@param cells The cell configuration to give to the grid.
"""
self._cells = cells
|
mit
| 7,694,286,100,144,176,000 | 29.792453 | 118 | 0.626838 | false |
robmcmullen/peppy
|
peppy/major_modes/groovy.py
|
1
|
1612
|
# peppy Copyright (c) 2006-2009 Rob McMullen
# Licenced under the GPLv2; see http://peppy.flipturn.org for more info
"""Groovy programming language editing support.
Major mode for editing Groovy files.
Supporting actions and minor modes should go here only if they are uniquely
applicable to this major mode and can't be used in other major modes. If
actions can be used with multiple major modes, they should be put in a
separate plugin in the peppy/plugins directory.
"""
import os
import wx
import wx.stc
from peppy.lib.foldexplorer import *
from peppy.lib.autoindent import *
from peppy.yapsy.plugins import *
from peppy.major import *
from peppy.editra.style_specs import unique_keywords
from peppy.fundamental import FundamentalMode
class GroovyMode(FundamentalMode):
"""Stub major mode for editing Groovy files.
This major mode has been automatically generated and is a boilerplate/
placeholder major mode. Enhancements to this mode are appreciated!
"""
keyword = 'Groovy'
editra_synonym = 'Groovy'
stc_lexer_id = wx.stc.STC_LEX_CPP
start_line_comment = u'//'
end_line_comment = ''
icon = 'icons/page_white.png'
default_classprefs = (
StrParam('extensions', 'groovy', fullwidth=True),
StrParam('keyword_set_0', unique_keywords[77], hidden=False, fullwidth=True),
StrParam('keyword_set_1', unique_keywords[78], hidden=False, fullwidth=True),
)
class GroovyModePlugin(IPeppyPlugin):
"""Plugin to register modes and user interface for Groovy
"""
def getMajorModes(self):
yield GroovyMode
|
gpl-2.0
| 5,934,957,365,940,290,000 | 30.607843 | 85 | 0.722705 | false |
joonro/PyTables
|
tables/description.py
|
1
|
35728
|
# -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: September 21, 2002
# Author: Francesc Alted
#
# $Id$
#
########################################################################
"""Classes for describing columns for ``Table`` objects."""
# Imports
# =======
from __future__ import print_function
import sys
import copy
import warnings
import numpy
from tables import atom
from tables.path import check_name_validity
# Public variables
# ================
__docformat__ = 'reStructuredText'
"""The format of documentation strings in this module."""
# Private functions
# =================
def same_position(oldmethod):
"""Decorate `oldmethod` to also compare the `_v_pos` attribute."""
def newmethod(self, other):
try:
other._v_pos
except AttributeError:
return False # not a column definition
return self._v_pos == other._v_pos and oldmethod(self, other)
newmethod.__name__ = oldmethod.__name__
newmethod.__doc__ = oldmethod.__doc__
return newmethod
# Column classes
# ==============
class Col(atom.Atom):
"""Defines a non-nested column.
Col instances are used as a means to declare the different properties of a
non-nested column in a table or nested column. Col classes are descendants
of their equivalent Atom classes (see :ref:`AtomClassDescr`), but their
instances have an additional _v_pos attribute that is used to decide the
position of the column inside its parent table or nested column (see the
IsDescription class in :ref:`IsDescriptionClassDescr` for more information
on column positions).
In the same fashion as Atom, you should use a particular Col descendant
class whenever you know the exact type you will need when writing your
code. Otherwise, you may use one of the Col.from_*() factory methods.
Each factory method inherited from the Atom class is available with the
same signature, plus an additional pos parameter (placed in last position)
which defaults to None and that may take an integer value. This parameter
might be used to specify the position of the column in the table.
Besides, there are the next additional factory methods, available only for
Col objects.
The following parameters are available for most Col-derived constructors.
Parameters
----------
itemsize : int
For types with a non-fixed size, this sets the size in bytes of
individual items in the column.
shape : tuple
Sets the shape of the column. An integer shape of N is equivalent to
the tuple (N,).
dflt
Sets the default value for the column.
pos : int
Sets the position of column in table. If unspecified, the position
will be randomly selected.
"""
# Avoid mangling atom class data.
__metaclass__ = type
_class_from_prefix = {} # filled as column classes are created
"""Maps column prefixes to column classes."""
# Class methods
# ~~~~~~~~~~~~~
@classmethod
def prefix(class_):
"""Return the column class prefix."""
cname = class_.__name__
return cname[:cname.rfind('Col')]
@classmethod
def from_atom(class_, atom, pos=None):
"""Create a Col definition from a PyTables atom.
An optional position may be specified as the pos argument.
"""
prefix = atom.prefix()
kwargs = atom._get_init_args()
colclass = class_._class_from_prefix[prefix]
return colclass(pos=pos, **kwargs)
@classmethod
def from_sctype(class_, sctype, shape=(), dflt=None, pos=None):
"""Create a `Col` definition from a NumPy scalar type `sctype`.
Optional shape, default value and position may be specified as
the `shape`, `dflt` and `pos` arguments, respectively.
Information in the `sctype` not represented in a `Col` is
ignored.
"""
newatom = atom.Atom.from_sctype(sctype, shape, dflt)
return class_.from_atom(newatom, pos=pos)
@classmethod
def from_dtype(class_, dtype, dflt=None, pos=None):
"""Create a `Col` definition from a NumPy `dtype`.
Optional default value and position may be specified as the
`dflt` and `pos` arguments, respectively. The `dtype` must have
a byte order which is irrelevant or compatible with that of the
system. Information in the `dtype` not represented in a `Col`
is ignored.
"""
newatom = atom.Atom.from_dtype(dtype, dflt)
return class_.from_atom(newatom, pos=pos)
@classmethod
def from_type(class_, type, shape=(), dflt=None, pos=None):
"""Create a `Col` definition from a PyTables `type`.
Optional shape, default value and position may be specified as
the `shape`, `dflt` and `pos` arguments, respectively.
"""
newatom = atom.Atom.from_type(type, shape, dflt)
return class_.from_atom(newatom, pos=pos)
@classmethod
def from_kind(class_, kind, itemsize=None, shape=(), dflt=None, pos=None):
"""Create a `Col` definition from a PyTables `kind`.
Optional item size, shape, default value and position may be
specified as the `itemsize`, `shape`, `dflt` and `pos`
arguments, respectively. Bear in mind that not all columns
support a default item size.
"""
newatom = atom.Atom.from_kind(kind, itemsize, shape, dflt)
return class_.from_atom(newatom, pos=pos)
@classmethod
def _subclass_from_prefix(class_, prefix):
"""Get a column subclass for the given `prefix`."""
cname = '%sCol' % prefix
class_from_prefix = class_._class_from_prefix
if cname in class_from_prefix:
return class_from_prefix[cname]
atombase = getattr(atom, '%sAtom' % prefix)
class NewCol(class_, atombase):
"""Defines a non-nested column of a particular type.
The constructor accepts the same arguments as the equivalent
`Atom` class, plus an additional ``pos`` argument for
position information, which is assigned to the `_v_pos`
attribute.
"""
def __init__(self, *args, **kwargs):
pos = kwargs.pop('pos', None)
class_from_prefix = self._class_from_prefix
atombase.__init__(self, *args, **kwargs)
# The constructor of an abstract atom may have changed
# the class of `self` to something different of `NewCol`
# and `atombase` (that's why the prefix map is saved).
if self.__class__ is not NewCol:
colclass = class_from_prefix[self.prefix()]
self.__class__ = colclass
self._v_pos = pos
__eq__ = same_position(atombase.__eq__)
_is_equal_to_atom = same_position(atombase._is_equal_to_atom)
# XXX: API incompatible change for PyTables 3 line
# Overriding __eq__ blocks inheritance of __hash__ in 3.x
# def __hash__(self):
# return hash((self._v_pos, self.atombase))
if prefix == 'Enum':
_is_equal_to_enumatom = same_position(
atombase._is_equal_to_enumatom)
NewCol.__name__ = cname
class_from_prefix[prefix] = NewCol
return NewCol
# Special methods
# ~~~~~~~~~~~~~~~
def __repr__(self):
# Reuse the atom representation.
atomrepr = super(Col, self).__repr__()
lpar = atomrepr.index('(')
rpar = atomrepr.rindex(')')
atomargs = atomrepr[lpar + 1:rpar]
classname = self.__class__.__name__
return '%s(%s, pos=%s)' % (classname, atomargs, self._v_pos)
# Private methods
# ~~~~~~~~~~~~~~~
def _get_init_args(self):
"""Get a dictionary of instance constructor arguments."""
kwargs = dict((arg, getattr(self, arg)) for arg in ('shape', 'dflt'))
kwargs['pos'] = getattr(self, '_v_pos', None)
return kwargs
def _generate_col_classes():
"""Generate all column classes."""
# Abstract classes are not in the class map.
cprefixes = ['Int', 'UInt', 'Float', 'Time']
for (kind, kdata) in atom.atom_map.iteritems():
if hasattr(kdata, 'kind'): # atom class: non-fixed item size
atomclass = kdata
cprefixes.append(atomclass.prefix())
else: # dictionary: fixed item size
for atomclass in kdata.itervalues():
cprefixes.append(atomclass.prefix())
# Bottom-level complex classes are not in the type map, of course.
# We still want the user to get the compatibility warning, though.
cprefixes.extend(['Complex32', 'Complex64', 'Complex128'])
if hasattr(atom, 'Complex192Atom'):
cprefixes.append('Complex192')
if hasattr(atom, 'Complex256Atom'):
cprefixes.append('Complex256')
for cprefix in cprefixes:
newclass = Col._subclass_from_prefix(cprefix)
yield newclass
# Create all column classes.
#for _newclass in _generate_col_classes():
# exec('%s = _newclass' % _newclass.__name__)
#del _newclass
StringCol = Col._subclass_from_prefix('String')
BoolCol = Col._subclass_from_prefix('Bool')
EnumCol = Col._subclass_from_prefix('Enum')
IntCol = Col._subclass_from_prefix('Int')
Int8Col = Col._subclass_from_prefix('Int8')
Int16Col = Col._subclass_from_prefix('Int16')
Int32Col = Col._subclass_from_prefix('Int32')
Int64Col = Col._subclass_from_prefix('Int64')
UIntCol = Col._subclass_from_prefix('UInt')
UInt8Col = Col._subclass_from_prefix('UInt8')
UInt16Col = Col._subclass_from_prefix('UInt16')
UInt32Col = Col._subclass_from_prefix('UInt32')
UInt64Col = Col._subclass_from_prefix('UInt64')
FloatCol = Col._subclass_from_prefix('Float')
if hasattr(atom, 'Float16Atom'):
Float16Col = Col._subclass_from_prefix('Float16')
Float32Col = Col._subclass_from_prefix('Float32')
Float64Col = Col._subclass_from_prefix('Float64')
if hasattr(atom, 'Float96Atom'):
Float96Col = Col._subclass_from_prefix('Float96')
if hasattr(atom, 'Float128Atom'):
Float128Col = Col._subclass_from_prefix('Float128')
ComplexCol = Col._subclass_from_prefix('Complex')
Complex32Col = Col._subclass_from_prefix('Complex32')
Complex64Col = Col._subclass_from_prefix('Complex64')
Complex128Col = Col._subclass_from_prefix('Complex128')
if hasattr(atom, 'Complex192Atom'):
Complex192Col = Col._subclass_from_prefix('Complex192')
if hasattr(atom, 'Complex256Atom'):
Complex256Col = Col._subclass_from_prefix('Complex256')
TimeCol = Col._subclass_from_prefix('Time')
Time32Col = Col._subclass_from_prefix('Time32')
Time64Col = Col._subclass_from_prefix('Time64')
# Table description classes
# =========================
class Description(object):
"""This class represents descriptions of the structure of tables.
An instance of this class is automatically bound to Table (see
:ref:`TableClassDescr`) objects when they are created. It provides a
browseable representation of the structure of the table, made of non-nested
(Col - see :ref:`ColClassDescr`) and nested (Description) columns.
Column definitions under a description can be accessed as attributes of it
(*natural naming*). For instance, if table.description is a Description
instance with a column named col1 under it, the later can be accessed as
table.description.col1. If col1 is nested and contains a col2 column, this
can be accessed as table.description.col1.col2. Because of natural naming,
the names of members start with special prefixes, like in the Group class
(see :ref:`GroupClassDescr`).
.. rubric:: Description attributes
.. attribute:: _v_colobjects
A dictionary mapping the names of the columns hanging
directly from the associated table or nested column to their
respective descriptions (Col - see :ref:`ColClassDescr` or
Description - see :ref:`DescriptionClassDescr` instances).
.. versionchanged:: 3.0
The *_v_colObjects* attobute has been renamed into
*_v_colobjects*.
.. attribute:: _v_dflts
A dictionary mapping the names of non-nested columns
hanging directly from the associated table or nested column
to their respective default values.
.. attribute:: _v_dtype
The NumPy type which reflects the structure of this
table or nested column. You can use this as the
dtype argument of NumPy array factories.
.. attribute:: _v_dtypes
A dictionary mapping the names of non-nested columns
hanging directly from the associated table or nested column
to their respective NumPy types.
.. attribute:: _v_is_nested
Whether the associated table or nested column contains
further nested columns or not.
.. attribute:: _v_itemsize
The size in bytes of an item in this table or nested column.
.. attribute:: _v_name
The name of this description group. The name of the
root group is '/'.
.. attribute:: _v_names
A list of the names of the columns hanging directly
from the associated table or nested column. The order of the
names matches the order of their respective columns in the
containing table.
.. attribute:: _v_nested_descr
A nested list of pairs of (name, format) tuples for all the columns
under this table or nested column. You can use this as the dtype and
descr arguments of NumPy array factories.
.. versionchanged:: 3.0
The *_v_nestedDescr* attribute has been renamed into
*_v_nested_descr*.
.. attribute:: _v_nested_formats
A nested list of the NumPy string formats (and shapes) of all the
columns under this table or nested column. You can use this as the
formats argument of NumPy array factories.
.. versionchanged:: 3.0
The *_v_nestedFormats* attribute has been renamed into
*_v_nested_formats*.
.. attribute:: _v_nestedlvl
The level of the associated table or nested column in the nested
datatype.
.. attribute:: _v_nested_names
A nested list of the names of all the columns under this table or
nested column. You can use this as the names argument of NumPy array
factories.
.. versionchanged:: 3.0
The *_v_nestedNames* attribute has been renamed into
*_v_nested_names*.
.. attribute:: _v_pathname
Pathname of the table or nested column.
.. attribute:: _v_pathnames
A list of the pathnames of all the columns under this table or nested
column (in preorder). If it does not contain nested columns, this is
exactly the same as the :attr:`Description._v_names` attribute.
.. attribute:: _v_types
A dictionary mapping the names of non-nested columns hanging directly
from the associated table or nested column to their respective PyTables
types.
"""
def __init__(self, classdict, nestedlvl=-1, validate=True):
if not classdict:
raise ValueError("cannot create an empty data type")
# Do a shallow copy of classdict just in case this is going to
# be shared by other instances
newdict = self.__dict__
newdict["_v_name"] = "/" # The name for root descriptor
newdict["_v_names"] = []
newdict["_v_dtypes"] = {}
newdict["_v_types"] = {}
newdict["_v_dflts"] = {}
newdict["_v_colobjects"] = {}
newdict["_v_is_nested"] = False
nestedFormats = []
nestedDType = []
if not hasattr(newdict, "_v_nestedlvl"):
newdict["_v_nestedlvl"] = nestedlvl + 1
cols_with_pos = [] # colum (position, name) pairs
cols_no_pos = [] # just column names
# Check for special variables and convert column descriptions
for (name, descr) in classdict.iteritems():
if name.startswith('_v_'):
if name in newdict:
# print("Warning!")
# special methods &c: copy to newdict, warn about conflicts
warnings.warn("Can't set attr %r in description class %r"
% (name, self))
else:
# print("Special variable!-->", name, classdict[name])
newdict[name] = descr
continue # This variable is not needed anymore
columns = None
if (type(descr) == type(IsDescription) and
issubclass(descr, IsDescription)):
# print("Nested object (type I)-->", name)
columns = descr().columns
elif (type(descr.__class__) == type(IsDescription) and
issubclass(descr.__class__, IsDescription)):
# print("Nested object (type II)-->", name)
columns = descr.columns
elif isinstance(descr, dict):
# print("Nested object (type III)-->", name)
columns = descr
else:
# print("Nested object (type IV)-->", name)
descr = copy.copy(descr)
# The copies above and below ensure that the structures
# provided by the user will remain unchanged even if we
# tamper with the values of ``_v_pos`` here.
if columns is not None:
descr = Description(copy.copy(columns), self._v_nestedlvl)
classdict[name] = descr
pos = getattr(descr, '_v_pos', None)
if pos is None:
cols_no_pos.append(name)
else:
cols_with_pos.append((pos, name))
# Sort field names:
#
# 1. Fields with explicit positions, according to their
# positions (and their names if coincident).
# 2. Fields with no position, in alfabetical order.
cols_with_pos.sort()
cols_no_pos.sort()
keys = [name for (pos, name) in cols_with_pos] + cols_no_pos
pos = 0
# Get properties for compound types
for k in keys:
if validate:
# Check for key name validity
check_name_validity(k)
# Class variables
object = classdict[k]
newdict[k] = object # To allow natural naming
if not (isinstance(object, Col) or
isinstance(object, Description)):
raise TypeError('Passing an incorrect value to a table column.'
' Expected a Col (or subclass) instance and '
'got: "%s". Please make use of the Col(), or '
'descendant, constructor to properly '
'initialize columns.' % object)
object._v_pos = pos # Set the position of this object
object._v_parent = self # The parent description
pos += 1
newdict['_v_colobjects'][k] = object
newdict['_v_names'].append(k)
object.__dict__['_v_name'] = k
if not isinstance(k, str):
# numpy only accepts "str" for field names
if sys.version_info[0] < 3:
# Python 2.x: unicode --> str
kk = k.encode() # use the default encoding
else:
# Python 3.x: bytes --> str (unicode)
kk = k.decode()
else:
kk = k
if isinstance(object, Col):
dtype = object.dtype
newdict['_v_dtypes'][k] = dtype
newdict['_v_types'][k] = object.type
newdict['_v_dflts'][k] = object.dflt
nestedFormats.append(object.recarrtype)
baserecarrtype = dtype.base.str[1:]
nestedDType.append((kk, baserecarrtype, dtype.shape))
else: # A description
nestedFormats.append(object._v_nested_formats)
nestedDType.append((kk, object._v_dtype))
# Assign the format list to _v_nested_formats
newdict['_v_nested_formats'] = nestedFormats
newdict['_v_dtype'] = numpy.dtype(nestedDType)
# _v_itemsize is derived from the _v_dtype that already computes this
newdict['_v_itemsize'] = newdict['_v_dtype'].itemsize
if self._v_nestedlvl == 0:
# Get recursively nested _v_nested_names and _v_nested_descr attrs
self._g_set_nested_names_descr()
# Get pathnames for nested groups
self._g_set_path_names()
# Check the _v_byteorder has been used an issue an Error
if hasattr(self, "_v_byteorder"):
raise ValueError(
"Using a ``_v_byteorder`` in the description is obsolete. "
"Use the byteorder parameter in the constructor instead.")
def _g_set_nested_names_descr(self):
"""Computes the nested names and descriptions for nested datatypes."""
names = self._v_names
fmts = self._v_nested_formats
self._v_nested_names = names[:] # Important to do a copy!
self._v_nested_descr = list(zip(names, fmts))
for i, name in enumerate(names):
new_object = self._v_colobjects[name]
if isinstance(new_object, Description):
new_object._g_set_nested_names_descr()
# replace the column nested name by a correct tuple
self._v_nested_names[i] = (name, new_object._v_nested_names)
self._v_nested_descr[i] = (name, new_object._v_nested_descr)
# set the _v_is_nested flag
self._v_is_nested = True
def _g_set_path_names(self):
"""Compute the pathnames for arbitrary nested descriptions.
This method sets the ``_v_pathname`` and ``_v_pathnames``
attributes of all the elements (both descriptions and columns)
in this nested description.
"""
def get_cols_in_order(description):
return [description._v_colobjects[colname]
for colname in description._v_names]
def join_paths(path1, path2):
if not path1:
return path2
return '%s/%s' % (path1, path2)
# The top of the stack always has a nested description
# and a list of its child columns
# (be they nested ``Description`` or non-nested ``Col`` objects).
# In the end, the list contains only a list of column paths
# under this one.
#
# For instance, given this top of the stack::
#
# (<Description X>, [<Column A>, <Column B>])
#
# After computing the rest of the stack, the top is::
#
# (<Description X>, ['a', 'a/m', 'a/n', ... , 'b', ...])
stack = []
# We start by pushing the top-level description
# and its child columns.
self._v_pathname = ''
stack.append((self, get_cols_in_order(self)))
while stack:
desc, cols = stack.pop()
head = cols[0]
# What's the first child in the list?
if isinstance(head, Description):
# A nested description. We remove it from the list and
# push it with its child columns. This will be the next
# handled description.
head._v_pathname = join_paths(desc._v_pathname, head._v_name)
stack.append((desc, cols[1:])) # alter the top
stack.append((head, get_cols_in_order(head))) # new top
elif isinstance(head, Col):
# A non-nested column. We simply remove it from the
# list and append its name to it.
head._v_pathname = join_paths(desc._v_pathname, head._v_name)
cols.append(head._v_name) # alter the top
stack.append((desc, cols[1:])) # alter the top
else:
# Since paths and names are appended *to the end* of
# children lists, a string signals that no more children
# remain to be processed, so we are done with the
# description at the top of the stack.
assert isinstance(head, basestring)
# Assign the computed set of descendent column paths.
desc._v_pathnames = cols
if len(stack) > 0:
# Compute the paths with respect to the parent node
# (including the path of the current description)
# and append them to its list.
descName = desc._v_name
colPaths = [join_paths(descName, path) for path in cols]
colPaths.insert(0, descName)
parentCols = stack[-1][1]
parentCols.extend(colPaths)
# (Nothing is pushed, we are done with this description.)
def _f_walk(self, type='All'):
"""Iterate over nested columns.
If type is 'All' (the default), all column description objects (Col and
Description instances) are yielded in top-to-bottom order (preorder).
If type is 'Col' or 'Description', only column descriptions of that
type are yielded.
"""
if type not in ["All", "Col", "Description"]:
raise ValueError("""\
type can only take the parameters 'All', 'Col' or 'Description'.""")
stack = [self]
while stack:
object = stack.pop(0) # pop at the front so as to ensure the order
if type in ["All", "Description"]:
yield object # yield description
for name in object._v_names:
new_object = object._v_colobjects[name]
if isinstance(new_object, Description):
stack.append(new_object)
else:
if type in ["All", "Col"]:
yield new_object # yield column
def __repr__(self):
"""Gives a detailed Description column representation."""
rep = ['%s\"%s\": %r' %
(" " * self._v_nestedlvl, k, self._v_colobjects[k])
for k in self._v_names]
return '{\n %s}' % (',\n '.join(rep))
def __str__(self):
"""Gives a brief Description representation."""
return 'Description(%s)' % self._v_nested_descr
class MetaIsDescription(type):
"""Helper metaclass to return the class variables as a dictionary."""
def __new__(cls, classname, bases, classdict):
"""Return a new class with a "columns" attribute filled."""
newdict = {"columns": {}, }
if '__doc__' in classdict:
newdict['__doc__'] = classdict['__doc__']
for b in bases:
if "columns" in b.__dict__:
newdict["columns"].update(b.__dict__["columns"])
for k in classdict:
# if not (k.startswith('__') or k.startswith('_v_')):
# We let pass _v_ variables to configure class behaviour
if not (k.startswith('__')):
newdict["columns"][k] = classdict[k]
# Return a new class with the "columns" attribute filled
return type.__new__(cls, classname, bases, newdict)
class IsDescription(object):
"""Description of the structure of a table or nested column.
This class is designed to be used as an easy, yet meaningful way to
describe the structure of new Table (see :ref:`TableClassDescr`) datasets
or nested columns through the definition of *derived classes*. In order to
define such a class, you must declare it as descendant of IsDescription,
with as many attributes as columns you want in your table. The name of each
attribute will become the name of a column, and its value will hold a
description of it.
Ordinary columns can be described using instances of the Col class (see
:ref:`ColClassDescr`). Nested columns can be described by using classes
derived from IsDescription, instances of it, or name-description
dictionaries. Derived classes can be declared in place (in which case the
column takes the name of the class) or referenced by name.
Nested columns can have a _v_pos special attribute which sets the
*relative* position of the column among sibling columns *also having
explicit positions*. The pos constructor argument of Col instances is used
for the same purpose. Columns with no explicit position will be placed
afterwards in alphanumeric order.
Once you have created a description object, you can pass it to the Table
constructor, where all the information it contains will be used to define
the table structure.
.. rubric:: IsDescription attributes
.. attribute:: _v_pos
Sets the position of a possible nested column description among its
sibling columns. This attribute can be specified *when declaring*
an IsDescription subclass to complement its *metadata*.
.. attribute:: columns
Maps the name of each column in the description to its own descriptive
object. This attribute is *automatically created* when an IsDescription
subclass is declared. Please note that declared columns can no longer
be accessed as normal class variables after its creation.
"""
__metaclass__ = MetaIsDescription
def descr_from_dtype(dtype_):
"""Get a description instance and byteorder from a (nested) NumPy dtype."""
fields = {}
fbyteorder = '|'
for name in dtype_.names:
dtype, pos = dtype_.fields[name][:2]
kind = dtype.base.kind
byteorder = dtype.base.byteorder
if byteorder in '><=':
if fbyteorder not in ['|', byteorder]:
raise NotImplementedError(
"structured arrays with mixed byteorders "
"are not supported yet, sorry")
fbyteorder = byteorder
# Non-nested column
if kind in 'biufSUc':
col = Col.from_dtype(dtype, pos=pos)
# Nested column
elif kind == 'V' and dtype.shape in [(), (1,)]:
if dtype.shape != ():
warnings.warn(
"nested descriptions will be converted to scalar")
col, _ = descr_from_dtype(dtype.base)
col._v_pos = pos
else:
raise NotImplementedError(
"structured arrays with columns with type description ``%s`` "
"are not supported yet, sorry" % dtype)
fields[name] = col
return Description(fields), fbyteorder
def dtype_from_descr(descr, byteorder=None):
"""Get a (nested) NumPy dtype from a description instance and byteorder.
The descr parameter can be a Description or IsDescription
instance, sub-class of IsDescription or a dictionary.
"""
if isinstance(descr, dict):
descr = Description(descr)
elif (type(descr) == type(IsDescription)
and issubclass(descr, IsDescription)):
descr = Description(descr().columns)
elif isinstance(descr, IsDescription):
descr = Description(descr.columns)
elif not isinstance(descr, Description):
raise ValueError('invalid description: %r' % descr)
dtype_ = descr._v_dtype
if byteorder and byteorder != '|':
dtype_ = dtype_.newbyteorder(byteorder)
return dtype_
if __name__ == "__main__":
"""Test code."""
class Info(IsDescription):
_v_pos = 2
Name = UInt32Col()
Value = Float64Col()
class Test(IsDescription):
"""A description that has several columns."""
x = Col.from_type("int32", 2, 0, pos=0)
y = Col.from_kind('float', dflt=1, shape=(2, 3))
z = UInt8Col(dflt=1)
color = StringCol(2, dflt=" ")
# color = UInt32Col(2)
Info = Info()
class info(IsDescription):
_v_pos = 1
name = UInt32Col()
value = Float64Col(pos=0)
y2 = Col.from_kind('float', dflt=1, shape=(2, 3), pos=1)
z2 = UInt8Col(dflt=1)
class info2(IsDescription):
y3 = Col.from_kind('float', dflt=1, shape=(2, 3))
z3 = UInt8Col(dflt=1)
name = UInt32Col()
value = Float64Col()
class info3(IsDescription):
name = UInt32Col()
value = Float64Col()
y4 = Col.from_kind('float', dflt=1, shape=(2, 3))
z4 = UInt8Col(dflt=1)
# class Info(IsDescription):
# _v_pos = 2
# Name = StringCol(itemsize=2)
# Value = ComplexCol(itemsize=16)
# class Test(IsDescription):
# """A description that has several columns"""
# x = Col.from_type("int32", 2, 0, pos=0)
# y = Col.from_kind('float', dflt=1, shape=(2,3))
# z = UInt8Col(dflt=1)
# color = StringCol(2, dflt=" ")
# Info = Info()
# class info(IsDescription):
# _v_pos = 1
# name = StringCol(itemsize=2)
# value = ComplexCol(itemsize=16, pos=0)
# y2 = Col.from_kind('float', dflt=1, shape=(2,3), pos=1)
# z2 = UInt8Col(dflt=1)
# class info2(IsDescription):
# y3 = Col.from_kind('float', dflt=1, shape=(2,3))
# z3 = UInt8Col(dflt=1)
# name = StringCol(itemsize=2)
# value = ComplexCol(itemsize=16)
# class info3(IsDescription):
# name = StringCol(itemsize=2)
# value = ComplexCol(itemsize=16)
# y4 = Col.from_kind('float', dflt=1, shape=(2,3))
# z4 = UInt8Col(dflt=1)
# example cases of class Test
klass = Test()
# klass = Info()
desc = Description(klass.columns)
print("Description representation (short) ==>", desc)
print("Description representation (long) ==>", repr(desc))
print("Column names ==>", desc._v_names)
print("Column x ==>", desc.x)
print("Column Info ==>", desc.Info)
print("Column Info.value ==>", desc.Info.Value)
print("Nested column names ==>", desc._v_nested_names)
print("Defaults ==>", desc._v_dflts)
print("Nested Formats ==>", desc._v_nested_formats)
print("Nested Descriptions ==>", desc._v_nested_descr)
print("Nested Descriptions (info) ==>", desc.info._v_nested_descr)
print("Total size ==>", desc._v_dtype.itemsize)
# check _f_walk
for object in desc._f_walk():
if isinstance(object, Description):
print("******begin object*************", end=' ')
print("name -->", object._v_name)
# print("name -->", object._v_dtype.name)
# print("object childs-->", object._v_names)
# print("object nested childs-->", object._v_nested_names)
print("totalsize-->", object._v_dtype.itemsize)
else:
# pass
print("leaf -->", object._v_name, object.dtype)
class testDescParent(IsDescription):
c = Int32Col()
class testDesc(testDescParent):
pass
assert 'c' in testDesc.columns
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
|
bsd-3-clause
| 1,684,387,528,308,238,800 | 36.608421 | 79 | 0.587886 | false |
tymofij/adofex
|
transifex/addons/cla/views.py
|
1
|
3292
|
from django.contrib import messages
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import get_object_or_404, render_to_response
from django.utils.translation import ugettext_lazy as _
from django.views.generic.list_detail import object_list
from transifex.projects.models import Project
from transifex.projects.permissions import pr_project_add_change
from transifex.projects.permissions.project import ProjectPermission
from transifex.txcommon.decorators import one_perm_required_or_403
from transifex.txcommon.views import permission_denied
from cla.forms import ClaForm
from cla.models import Cla
from cla.handlers import handle_pre_team
@login_required
def view(request, project_slug):
project = get_object_or_404(Project, slug=project_slug)
cla = get_object_or_404(Cla, project=project)
return render_to_response(
"view_cla.html",
{'project': project, 'cla': cla},
context_instance= RequestContext(request)
)
@login_required
def cla_project_sign(request, project_slug):
project = get_object_or_404(Project, slug=project_slug)
cla = get_object_or_404(Cla, project=project)
check = ProjectPermission(request.user)
if not check.submit_translations(project, any_team=True):
return permission_denied(request)
try:
signed_cla = request.user.cla_set.filter(project=project)[0]
except IndexError:
signed_cla = None
if request.method == 'POST' and not signed_cla:
form = ClaForm(request.POST)
if form.is_valid():
kwargs = {'cla_sign':True, 'project':project, 'user':request.user}
handle_pre_team(None, **kwargs)
messages.success(request, _("You have signed the CLA."))
return HttpResponseRedirect(reverse('cla_project_sign',
args=[project_slug]),)
else:
form = ClaForm()
return render_to_response(
"project_cla.html",
{'project': project,
'cla': cla,
'signed_cla': signed_cla,
'form': form},
context_instance= RequestContext(request)
)
@login_required
@one_perm_required_or_403(pr_project_add_change,
(Project, 'slug__exact', 'project_slug'))
def users(request, project_slug):
project = get_object_or_404(Project, slug=project_slug)
cla = get_object_or_404(Cla, project=project)
signed_user_ids = cla.users.values_list('id', flat=True).query
unsigned_user_list = User.objects.filter(
Q(team_coordinators__project=project) |
Q(team_members__project=project) |
Q(teamrequest__project=project) |
Q(teamaccessrequest__team__project=project)
).exclude(id__in=signed_user_ids).distinct()
return render_to_response(
"user_list.html",
{'project': project,
'cla': cla,
'signed_user_list': cla.users.all().order_by('username'),
'unsigned_user_list': unsigned_user_list.order_by('username')},
context_instance= RequestContext(request)
)
|
gpl-3.0
| -2,236,727,836,968,306,000 | 35.577778 | 78 | 0.681349 | false |
beav/pulp
|
server/pulp/server/managers/auth/role/cud.py
|
1
|
13478
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
"""
Contains the manager class and exceptions for operations surrounding the creation,
update, and deletion on a Pulp Role.
"""
import re
from gettext import gettext as _
from celery import task
from pulp.server.async.tasks import Task
from pulp.server.auth.authorization import CREATE, READ, UPDATE, DELETE, EXECUTE, \
_operations_not_granted_by_roles
from pulp.server.db.model.auth import Role, User
from pulp.server.exceptions import (DuplicateResource, InvalidValue, MissingResource,
PulpDataException)
from pulp.server.managers import factory
from pulp.server.util import Delta
SUPER_USER_ROLE = 'super-users'
_ROLE_NAME_REGEX = re.compile(r'^[\-_A-Za-z0-9]+$') # letters, numbers, underscore, hyphen
class RoleManager(object):
"""
Performs role related functions relating to CRUD operations.
"""
@staticmethod
def create_role(role_id, display_name=None, description=None):
"""
Creates a new Pulp role.
:param role_id: unique identifier for the role
:type role_id: str
:param display_name: user-readable name of the role
:type display_name: str
:param description: free form text used to describe the role
:type description: str
:raise DuplicateResource: if there is already a role with the requested name
:raise InvalidValue: if any of the fields are unacceptable
"""
existing_role = Role.get_collection().find_one({'id': role_id})
if existing_role is not None:
raise DuplicateResource(role_id)
if role_id is None or _ROLE_NAME_REGEX.match(role_id) is None:
raise InvalidValue(['role_id'])
# Use the ID for the display name if one was not specified
display_name = display_name or role_id
# Creation
create_me = Role(id=role_id, display_name=display_name, description=description)
Role.get_collection().save(create_me, safe=True)
# Retrieve the role to return the SON object
created = Role.get_collection().find_one({'id': role_id})
return created
@staticmethod
def update_role(role_id, delta):
"""
Updates a role object.
:param role_id: The role identifier.
:type role_id: str
:param delta: A dict containing update keywords.
:type delta: dict
:return: The updated object
:rtype: dict
:raise MissingResource: if the given role does not exist
:raise PulpDataException: if update keyword is not supported
"""
delta.pop('id', None)
role = Role.get_collection().find_one({'id': role_id})
if role is None:
raise MissingResource(role_id)
for key, value in delta.items():
# simple changes
if key in ('display_name', 'description',):
role[key] = value
continue
# unsupported
raise PulpDataException(_("Update Keyword [%s] is not supported" % key))
Role.get_collection().save(role, safe=True)
# Retrieve the user to return the SON object
updated = Role.get_collection().find_one({'id': role_id})
return updated
@staticmethod
def delete_role(role_id):
"""
Deletes the given role. This has the side-effect of revoking any permissions granted
to the role from the users in the role, unless those permissions are also granted
through another role the user is a memeber of.
:param role_id: identifies the role being deleted
:type role_id: str
:raise InvalidValue: if any of the fields are unacceptable
:raise MissingResource: if the given role does not exist
"""
# Raise exception if role id is invalid
if role_id is None or not isinstance(role_id, basestring):
raise InvalidValue(['role_id'])
# Check whether role exists
role = Role.get_collection().find_one({'id': role_id})
if role is None:
raise MissingResource(role_id)
# Make sure role is not a superuser role
if role_id == SUPER_USER_ROLE:
raise PulpDataException(_('Role %s cannot be changed') % role_id)
# Remove respective roles from users
users = factory.user_query_manager().find_users_belonging_to_role(role_id)
for item in role['permissions']:
for user in users:
other_roles = factory.role_query_manager().get_other_roles(role, user['roles'])
user_ops = _operations_not_granted_by_roles(item['resource'],
item['permission'], other_roles)
factory.permission_manager().revoke(item['resource'], user['login'], user_ops)
for user in users:
user['roles'].remove(role_id)
factory.user_manager().update_user(user['login'], Delta(user, 'roles'))
Role.get_collection().remove({'id': role_id}, safe=True)
@staticmethod
def add_permissions_to_role(role_id, resource, operations):
"""
Add permissions to a role.
:param role_id: role identifier
:type role_id: str
:param resource: resource path to grant permissions to
:type resource: str
:param operations: list or tuple
:type operations: list of allowed operations being granted
:raise MissingResource: if the given role does not exist
"""
if role_id == SUPER_USER_ROLE:
raise PulpDataException(_('super-users role cannot be changed'))
role = Role.get_collection().find_one({'id': role_id})
if role is None:
raise MissingResource(role_id)
if not role['permissions']:
role['permissions'] = []
resource_permission = {}
current_ops = []
for item in role['permissions']:
if item['resource'] == resource:
resource_permission = item
current_ops = resource_permission['permission']
if not resource_permission:
resource_permission = dict(resource=resource, permission=current_ops)
role['permissions'].append(resource_permission)
for o in operations:
if o in current_ops:
continue
current_ops.append(o)
users = factory.user_query_manager().find_users_belonging_to_role(role_id)
for user in users:
factory.permission_manager().grant(resource, user['login'], operations)
Role.get_collection().save(role, safe=True)
@staticmethod
def remove_permissions_from_role(role_id, resource, operations):
"""
Remove permissions from a role.
:param role_id: role identifier
:type role_id: str
:param resource: resource path to revoke permissions from
:type resource: str
:param operations: list or tuple
:type operations: list of allowed operations being revoked
:raise MissingResource: if the given role does not exist
"""
if role_id == SUPER_USER_ROLE:
raise PulpDataException(_('super-users role cannot be changed'))
role = Role.get_collection().find_one({'id': role_id})
if role is None:
raise MissingResource(role_id)
resource_permission = {}
current_ops = []
for item in role['permissions']:
if item['resource'] == resource:
resource_permission = item
current_ops = resource_permission['permission']
if not current_ops:
return
for o in operations:
if o not in current_ops:
continue
current_ops.remove(o)
users = factory.user_query_manager().find_users_belonging_to_role(role_id)
for user in users:
other_roles = factory.role_query_manager().get_other_roles(role, user['roles'])
user_ops = _operations_not_granted_by_roles(resource,
operations,
other_roles)
factory.permission_manager().revoke(resource, user['login'], user_ops)
# in no more allowed operations, remove the resource
if not current_ops:
role['permissions'].remove(resource_permission)
Role.get_collection().save(role, safe=True)
@staticmethod
def add_user_to_role(role_id, login):
"""
Add a user to a role. This has the side-effect of granting all the
permissions granted to the role to the user.
:param role_id: role identifier
:type role_id: str
:param login: login of user
:type login: str
:raise MissingResource: if the given role or user does not exist
"""
role = Role.get_collection().find_one({'id': role_id})
if role is None:
raise MissingResource(role_id)
user = User.get_collection().find_one({'login': login})
if user is None:
raise InvalidValue(['login'])
if role_id in user['roles']:
return
user['roles'].append(role_id)
User.get_collection().save(user, safe=True)
for item in role['permissions']:
factory.permission_manager().grant(item['resource'], login,
item.get('permission', []))
@staticmethod
def remove_user_from_role(role_id, login):
"""
Remove a user from a role. This has the side-effect of revoking all the
permissions granted to the role from the user, unless the permissions are
also granted by another role.
:param role_id: role identifier
:type role_id: str
:param login: name of user
:type login: str
:raise MissingResource: if the given role or user does not exist
"""
role = Role.get_collection().find_one({'id': role_id})
if role is None:
raise MissingResource(role_id)
user = User.get_collection().find_one({'login': login})
if user is None:
raise MissingResource(login)
if role_id == SUPER_USER_ROLE and factory.user_query_manager().is_last_super_user(login):
raise PulpDataException(
_('%(role)s cannot be empty, and %(login)s is the last member') %
{'role': SUPER_USER_ROLE, 'login': login})
if role_id not in user['roles']:
return
user['roles'].remove(role_id)
User.get_collection().save(user, safe=True)
for item in role['permissions']:
other_roles = factory.role_query_manager().get_other_roles(role, user['roles'])
user_ops = _operations_not_granted_by_roles(item['resource'],
item['permission'],
other_roles)
factory.permission_manager().revoke(item['resource'], login, user_ops)
def ensure_super_user_role(self):
"""
Ensure that the super user role exists.
"""
role = self.get_role(SUPER_USER_ROLE)
if role is None:
role = self.create_role(SUPER_USER_ROLE, 'Super Users',
'Role indicates users with admin privileges')
role['permissions'] = [{'resource': '/',
'permissions': [CREATE, READ, UPDATE, DELETE, EXECUTE]}]
Role.get_collection().save(role, safe=True)
@staticmethod
def get_role(role):
"""
Get a Role by id.
:param role: A role id to search for
:type role: str
:return: a Role object that have the given role id.
:rtype: Role or None
"""
return Role.get_collection().find_one({'id': role})
add_permissions_to_role = task(RoleManager.add_permissions_to_role, base=Task, ignore_result=True)
add_user_to_role = task(RoleManager.add_user_to_role, base=Task, ignore_result=True)
create_role = task(RoleManager.create_role, base=Task)
delete_role = task(RoleManager.delete_role, base=Task, ignore_result=True)
remove_permissions_from_role = task(RoleManager.remove_permissions_from_role, base=Task,
ignore_result=True)
remove_user_from_role = task(RoleManager.remove_user_from_role, base=Task, ignore_result=True)
update_role = task(RoleManager.update_role, base=Task)
|
gpl-2.0
| -2,383,789,678,807,386,000 | 38.291545 | 98 | 0.588484 | false |
JamesLinEngineer/RKMC
|
addons/plugin.video.salts/scrapers/hevcbluray_scraper.py
|
1
|
4696
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import urlparse
import re
import kodi
import log_utils # @UnusedImport
import dom_parser
from salts_lib import scraper_utils
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import QUALITIES
BASE_URL = 'https://hevcbluray.com'
QUALITY_MAP = {'HD 720P': QUALITIES.HD720, 'HD 1080P': QUALITIES.HD1080, '1080P BLURAY': QUALITIES.HD1080}
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'HEVCBluRay'
def get_sources(self, video):
source_url = self.get_url(video)
sources = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
is_3d = False
page_quality = QUALITIES.HD720
title = dom_parser.parse_dom(html, 'title')
if title:
title = title[0]
match = re.search('(\d{3,})p', title)
if match:
page_quality = scraper_utils.height_get_quality(match.group(1))
is_3d = True if re.search('\s+3D\s+', title) else False
fragments = dom_parser.parse_dom(html, 'div', {'class': 'txt-block'}) + dom_parser.parse_dom(html, 'li', {'class': 'elemento'})
for fragment in fragments:
for match in re.finditer('href="([^"]+)', fragment):
stream_url = match.group(1)
host = urlparse.urlparse(stream_url).hostname
q_str = dom_parser.parse_dom(fragment, 'span', {'class': 'd'})
q_str = q_str[0].upper() if q_str else ''
base_quality = QUALITY_MAP.get(q_str, page_quality)
quality = scraper_utils.get_quality(video, host, base_quality)
source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': False}
source['format'] = 'x265'
source['3D'] = is_3d
sources.append(source)
return sources
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
html = self._http_get(self.base_url, params={'s': title}, cache_limit=8)
for item in dom_parser.parse_dom(html, 'div', {'class': 'item'}):
match = re.search('href="([^"]+)', item)
match_title = dom_parser.parse_dom(item, 'span', {'class': 'tt'})
year_frag = dom_parser.parse_dom(item, 'span', {'class': 'year'})
if match and match_title:
url = match.group(1)
match_title = match_title[0]
if re.search('\d+\s*x\s*\d+', match_title): continue # exclude episodes
match_title, match_year = scraper_utils.extra_year(match_title)
if not match_year and year_frag:
match_year = year_frag[0]
match = re.search('(.*?)\s+\d{3,}p', match_title)
if match:
match_title = match.group(1)
extra = dom_parser.parse_dom(item, 'span', {'class': 'calidad2'})
if extra:
match_title += ' [%s]' % (extra[0])
if not year or not match_year or year == match_year:
result = {'title': scraper_utils.cleanse_title(match_title), 'year': match_year, 'url': scraper_utils.pathify_url(url)}
results.append(result)
return results
|
gpl-2.0
| -167,296,965,489,720,000 | 42.88785 | 166 | 0.567717 | false |
sidnarayanan/BAdNet
|
train/pf/adv/models/train_panda_0.py
|
1
|
9018
|
#!/usr/local/bin/python2.7
from sys import exit, stdout, argv
from os import environ, system
environ['KERAS_BACKEND'] = 'tensorflow'
import numpy as np
import utils
import signal
from keras.layers import Input, Dense, Dropout, concatenate, LSTM, BatchNormalization, Conv1D, concatenate
from keras.models import Model
from keras.callbacks import ModelCheckpoint, LambdaCallback, TensorBoard
from keras.optimizers import Adam, SGD
from keras.utils import np_utils
from keras import backend as K
K.set_image_data_format('channels_last')
from adversarial import Adversary
import obj
import config
#config.DEBUG = True
#config.n_truth = 5
#config.truth = 'resonanceType'
#config.adversary_mask = 0
'''
some global definitions
'''
LEARNMASS = True
LEARNRHO = True
LEARNPT = True
DECORRMASS = True
DECORRRHO = False
DECORRPT = False
adv_loss_weights = [0.0001, 50]
ADV = 2
NEPOCH = 2
APOSTLE = 'panda_0'
system('cp %s models/train_%s.py'%(argv[0], APOSTLE))
'''
instantiate data loaders
'''
def make_coll(fpath):
coll = obj.PFSVCollection()
coll.add_categories(['singletons', 'pf'], fpath)
return coll
top = make_coll('/fastscratch/snarayan/pandaarrays/v1//PARTITION/ZpTT_*_CATEGORY.npy')
qcd = make_coll('/fastscratch/snarayan/pandaarrays/v1//PARTITION/QCD_*_CATEGORY.npy')
data = [top, qcd]
# preload some data just to get the dimensions
data[0].objects['train']['pf'].load(memory=False)
dims = data[0].objects['train']['pf'].data.data.shape
obj.limit = 20
dims = (None, obj.limit, 9) # override
'''
first build the classifier!
'''
# set up data
opts = {'learn_mass':LEARNMASS,
'learn_pt':LEARNPT,
'learn_rho':LEARNRHO}
classifier_train_gen = obj.generatePF(data, partition='train', batch=1000, normalize=False, **opts)
classifier_validation_gen = obj.generatePF(data, partition='validate', batch=10000, **opts)
classifier_test_gen = obj.generatePF(data, partition='test', batch=2, **opts)
test_i, test_o, test_w = next(classifier_test_gen)
#print test_i
inputs = Input(shape=(dims[1], dims[2]), name='input')
mass_inputs = Input(shape=(1,), name='mass_input')
rho_inputs = Input(shape=(1,), name='rho_input')
pt_inputs = Input(shape=(1,), name='pt_input')
norm = BatchNormalization(momentum=0.6, name='input_bnorm') (inputs)
conv = Conv1D(32, 2, activation='relu', name='conv0', kernel_initializer='lecun_uniform', padding='same')(norm)
norm = BatchNormalization(momentum=0.6, name='conv0_bnorm') (conv)
conv = Conv1D(16, 4, activation='relu', name='conv1', kernel_initializer='lecun_uniform', padding='same')(norm)
norm = BatchNormalization(momentum=0.6, name='conv1_bnorm') (conv)
lstm = LSTM(100, go_backwards=True, implementation=2, name='lstm') (norm)
norm = BatchNormalization(momentum=0.6, name='lstm_norm') (lstm)
dense = Dense(100, activation='relu',name='lstmdense',kernel_initializer='lecun_uniform') (norm)
norm = BatchNormalization(momentum=0.6,name='lstmdense_norm') (dense)
for i in xrange(1,5):
dense = Dense(50, activation='relu',name='dense%i'%i)(norm)
norm = BatchNormalization(momentum=0.6,name='dense%i_norm'%i)(dense)
if LEARNMASS or LEARNPT or LEARNRHO:
to_merge = [norm]
if LEARNMASS:
to_merge.append(mass_inputs)
if LEARNRHO:
to_merge.append(rho_inputs)
if LEARNPT:
to_merge.append(pt_inputs)
merge = concatenate(to_merge)
dense = Dense(50, activation='tanh', name='dense5a')(merge)
norm = BatchNormalization(momentum=0.6,name='dense5a_norm')(dense)
# dense = Dense(50, activation='tanh', name='dense5')(norm)
# norm = BatchNormalization(momentum=0.6,name='dense5_norm')(dense)
else:
dense = Dense(50, activation='tanh',name='dense5')(norm)
norm = BatchNormalization(momentum=0.6,name='dense5_norm')(dense)
y_hat = Dense(config.n_truth, activation='softmax') (norm)
i = [inputs]
if LEARNMASS:
i.append(mass_inputs)
if LEARNRHO:
i.append(rho_inputs)
if LEARNPT:
i.append(pt_inputs)
classifier = Model(inputs=i, outputs=y_hat)
classifier.compile(optimizer=Adam(lr=0.0001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# print '########### CLASSIFIER ############'
# classifier.summary()
# print '###################################'
pred = classifier.predict(test_i)
# ctrl+C now triggers a graceful exit
def save_classifier(name='classifier_conv', model=classifier):
model.save('models/%s_%s.h5'%(name, APOSTLE))
def save_and_exit(signal=None, frame=None, name='classifier_conv', model=classifier):
save_classifier(name, model)
flog.close()
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
'''
now build the adversarial setup
'''
# set up data
opts = {'decorr_mass':DECORRMASS,
'decorr_rho':DECORRRHO,
'decorr_pt':DECORRPT,
'learn_mass':LEARNMASS,
'learn_pt':LEARNPT,
'learn_rho':LEARNRHO}
train_gen = obj.generatePF(data, partition='train', batch=1000, normalize=False, **opts)
validation_gen = obj.generatePF(data, partition='validate', batch=100, **opts)
test_gen = obj.generatePF(data, partition='test', batch=1, **opts)
# build the model
kin_hats = Adversary(config.n_decorr_bins, n_outputs=(int(DECORRMASS)+int(DECORRPT)+int(DECORRRHO)), scale=0.0001)(y_hat)
# kin_hats = Adversary(config.n_decorr_bins, n_outputs=2, scale=0.01)(y_hat)
i = [inputs]
if LEARNMASS:
i.append(mass_inputs)
if LEARNRHO:
i.append(rho_inputs)
if LEARNPT:
i.append(pt_inputs)
pivoter = Model(inputs=i,
outputs=[y_hat]+kin_hats)
pivoter.compile(optimizer=Adam(lr=0.001),
loss=['categorical_crossentropy'] + ['categorical_crossentropy' for _ in kin_hats],
loss_weights=adv_loss_weights)
print '############# ARCHITECTURE #############'
pivoter.summary()
print '###################################'
'''
Now we train both models
'''
if ADV > 0:
print 'TRAINING ADVERSARIAL NETWORK'
system('mv logs/train_conv_adv.log logs/train_conv_adv.log.old')
flog = open('logs/train_conv_adv.log','w')
callback = LambdaCallback(
on_batch_end=lambda batch, logs: flog.write('%i,%f,%f,%f,%f\n'%(batch,logs['loss'],logs['dense_6_loss'],logs['dense_7_loss'],logs['dense_1_loss'])),
on_epoch_end=lambda epoch, logs: save_classifier(name='regularized_conv')
)
tb = TensorBoard(
log_dir = './logs/conv_logs',
write_graph = True,
write_images = True
)
print ' -Pre-training the classifier'
# bit of pre-training to get the classifer in the right place
classifier.fit_generator(classifier_train_gen,
steps_per_epoch=1000,
epochs=2)
save_classifier(name='pretrained_conv')
# np.set_printoptions(threshold='nan')
# print test_o
# print classifier.predict(test_i)
def save_and_exit(signal=None, frame=None, name='regularized_conv', model=classifier):
save_classifier(name, model)
flog.close()
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
print ' -Training the adversarial stack'
# now train the model for real
pivoter.fit_generator(train_gen,
steps_per_epoch=5000,
epochs=NEPOCH*2,
# callbacks=[callback],
# validation_data=validation_gen,
# validation_steps=100
)
save_classifier(name='regularized_conv')
save_classifier(name='pivoter_conv', model=pivoter)
flog.close()
if ADV % 2 == 0:
print 'TRAINING CLASSIFIER ONLY'
system('mv logs/train_conv.log logs/train_conv.log.old')
flog = open('logs/train_conv.log','w')
callback = LambdaCallback(
on_batch_end=lambda batch, logs: flog.write('%i,%f\n'%(batch,logs['loss'])),
on_epoch_end=lambda epoch, logs: save_classifier(name='classifier_conv')
)
tb = TensorBoard(
log_dir = './logs/lstmnoreg_logs',
write_graph = True,
write_images = True
)
n_epochs = 1 if (ADV == 2) else 2 # fewer epochs if network is pretrained
n_epochs *= NEPOCH
def save_and_exit(signal=None, frame=None, name='classifier_conv', model=classifier):
save_classifier(name, model)
flog.close()
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
classifier.fit_generator(classifier_train_gen,
steps_per_epoch=5000,
epochs=n_epochs,
# callbacks=[callback],
# validation_data=classifier_validation_gen,
# validation_steps=100
)
save_classifier(name='classifier_conv')
|
mit
| 1,051,288,253,197,318,900 | 31.555957 | 156 | 0.626192 | false |
sdvillal/manysources
|
chemdeco/integration/smartsviewer_utils.py
|
1
|
5954
|
# coding=utf-8
"""Thin python wrapper over SmartsViewer."""
import os.path as op
import subprocess
from joblib import Parallel, delayed, cpu_count
# for joblib pickling...
def _depict(svr, smarts, dest_file):
return svr.depict(smarts, dest_file)
def as_pil(paths):
from PIL import Image
return map(Image.open, paths)
class SmartsViewerRunner(object):
"""Pythonic interface to smartsviewer:
http://www.smartsviewer.de/
http://www.biosolveit.de/SMARTStools/
Parameters
----------
Output of running "SmartsViewer -h" for version 0.9.0
-----------------------------------------------------
SYNOPSIS:
smartsviewer [OPTIONS]
OPTIONS:
-h : Print this help text.
-s <smarts> : The input smarts for visualization. Either -s or -f have to be given.
-f <file> : A file containing the smarts. Either -s or -f have to be given.
-o <outfile> : Prints the diagram to <outfile>
possible file formats: .pdf, .ps, .svg
-d <w> <h> : Dimension of the .svg output file. (100 <= w|h <= 1000)
-p : Set default parameter.
: Eight values have to be given, range and defaults:
: 1. Display options: 0-3 <0>
: (0=Complete Visualization, 1= IDs, 2= Element symbols, 3=Structure Diagram-like)
: 2. Default bond options: 0-1 <0>
: (0=Single bond, 1=Single or aromatic bond
: 3. Show Userlabels?: 0-1 <0>
: (0=No, 1=Yes)
: 4. Trim-errorcheck?: 0-1 <0>
: (0=Yes, 1=No)
: 5. Trim-simplification?: 0-1 <0>
: (0=Yes, 1=No)
: 6. Trim-interpretation?: 0-1 <0>
: (0=Yes, 1=No)
: 7. Show Legend?: 0-3 <0>
: (0=No, 1=Dynamic legend, 2=Static Legend 3=Both)
: 8. Print SMARTS string into picture?: 0-1 <0>
: (0=YES, 1=NO)
"""
def __init__(self,
sv_root=None,
w=500, h=500, # Dimension of the .svg output file. (100 <= w|h <= 1000)
display_style=0, # (0=Complete Visualization,
# 1= IDs, 2= Element symbols, 3=Structure Diagram-like)
also_aromatic_bonds=False, # (0=Single bond, 1=Single or aromatic bond)
user_labels=False, # (0=No, 1=Yes)
trim_errorcheck=True, # (0=Yes, 1=No)
trim_simplification=True, # (0=Yes, 1=No)
trim_interpretation=True, # (0=Yes, 1=No)
legend_style=0, # (0=No, 1=Dynamic legend, 2=Static Legend 3=Both)
show_smarts=True): # (0=YES, 1=NO)
super(SmartsViewerRunner, self).__init__()
self.sv_root = sv_root if sv_root is not None else \
op.abspath(op.join(op.dirname(__file__), 'thirdparty', 'smartsviewer'))
self.w = w
self.h = h
self.display_style = display_style
self.also_aromatic = also_aromatic_bonds
self.user_labels = user_labels
self.trim_errorcheck = trim_errorcheck
self.trim_simplification = trim_simplification
self.trim_interpretation = trim_interpretation
self.legend_style = legend_style
self.show_smarts = show_smarts
self.cl = op.join(self.sv_root, 'SmartsViewer -d %d %d -p %d %d %d %d %d %d %d %d' %
(self.w, self.h, self.display_style,
0 if not self.also_aromatic else 1,
0 if not self.user_labels else 1,
0 if self.trim_errorcheck else 1,
0 if self.trim_simplification else 1,
0 if self.trim_interpretation else 1,
self.legend_style,
0 if self.show_smarts else 1))
def depict(self, smarts, dest_file):
"""
Generates the image file for a smarts string using the object configuration.
Parameters
----------
smarts : string
The smiles or smarts to depict
dest_file : string
The path to the file where the depiction will happen (.pdf, .ps and .svg are supported in v. 0.9.0)
Returns
-------
A tuple (retcode, command_output) with an int for the errcode of the smartsviewer run
and its stdout+stderr output.
"""
cl = self.cl + ' -s \"%s\" -o \"%s\"' % (smarts, dest_file)
subprocess.call(cl, shell=True) # TODO: eat the output
return dest_file
def depict_all(self, smartss, root, ext='.png', n_jobs=1):
if n_jobs is None:
n_jobs = cpu_count()
# TODO: block all in n_jobs blocks and only create once the pool
return Parallel(n_jobs=n_jobs)(delayed(_depict)(self, smarts, op.join(root, '%d%s' % (i, ext)))
for i, smarts in enumerate(smartss))
if __name__ == '__main__':
svr = SmartsViewerRunner(w=500, h=500, legend_style=3, show_smarts=False)
svr.depict('CCCC#CC1=CC(=CC(=C1)C#CC2=CC(=C(C=C2C#CC(C)(C)C)C3OCCO3)C#CC(C)(C)C)C#CCCC',
op.join(op.expanduser('~'), 'example-smartsviewer.png'))
smartss = ['CCCC#CC1=CC(=CC(=C1)C#CC2=CC(=C(C=C2C#CC(C)(C)C)C3OCCO3)C#CC(C)(C)C)C#CCCC'] * 20
print as_pil(svr.depict_all(smartss, op.expanduser('~'), n_jobs=20))
|
bsd-3-clause
| -6,591,404,141,517,652,000 | 45.155039 | 112 | 0.490595 | false |
jamesleesaunders/python-xbee
|
xbee/tests/test_base.py
|
1
|
5818
|
#! /usr/bin/python
"""
test_base.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
Tests the XBeeBase superclass module for XBee API conformance.
"""
import unittest
from xbee.base import XBeeBase
from xbee.tests.Fake import Serial
class TestWriteToDevice(unittest.TestCase):
"""
XBeeBase class should properly._write binary data in a valid API
frame to a given serial device.
"""
def test_write(self):
"""
_write method should write the expected data to the serial
device
"""
device = Serial()
xbee = XBeeBase(device)
xbee._write(b'\x00')
# Check resuting state of fake device
result_frame = device.get_data_written()
expected_frame = b'\x7E\x00\x01\x00\xFF'
self.assertEqual(result_frame, expected_frame)
def test_write_again(self):
"""
_write method should write the expected data to the serial
device
"""
device = Serial()
xbee = XBeeBase(device)
xbee._write(b'\x00\x01\x02')
# Check resuting state of fake device
expected_frame = b'\x7E\x00\x03\x00\x01\x02\xFC'
result_frame = device.get_data_written()
self.assertEqual(result_frame, expected_frame)
def test_write_escaped(self):
"""
_write method should write the expected data to the serial
device
"""
device = Serial()
xbee = XBeeBase(device,escaped=True)
xbee._write(b'\x7E\x01\x7D\x11\x13')
# Check resuting state of fake device
expected_frame = b'\x7E\x00\x05\x7D\x5E\x01\x7D\x5D\x7D\x31\x7D\x33\xDF'
result_frame = device.get_data_written()
self.assertEqual(result_frame, expected_frame)
class TestReadFromDevice(unittest.TestCase):
"""
XBeeBase class should properly read and extract data from a valid
API frame
"""
def test_read(self):
"""
_wait_for_frame should properly read a frame of data
"""
device = Serial()
device.set_read_data(b'\x7E\x00\x01\x00\xFF')
xbee = XBeeBase(device)
frame = xbee._wait_for_frame()
self.assertEqual(frame.data, b'\x00')
def test_read_invalid_followed_by_valid(self):
"""
_wait_for_frame should skip invalid data
"""
device = Serial()
device.set_read_data(b'\x7E\x00\x01\x00\xFA' + b'\x7E\x00\x01\x05\xFA')
xbee = XBeeBase(device)
frame = xbee._wait_for_frame()
self.assertEqual(frame.data, b'\x05')
def test_read_escaped(self):
"""
_wait_for_frame should properly read a frame of data
Verify that API mode 2 escaped bytes are read correctly
"""
device = Serial()
device.set_read_data(b'\x7E\x00\x04\x7D\x5E\x7D\x5D\x7D\x31\x7D\x33\xE0')
xbee = XBeeBase(device,escaped=True)
frame = xbee._wait_for_frame()
self.assertEqual(frame.data, b'\x7E\x7D\x11\x13')
class TestNotImplementedFeatures(unittest.TestCase):
"""
In order to properly use the XBeeBase class for most situations,
it must be subclassed with the proper attributes definined. If
this is not the case, then a NotImplemented exception should be
raised as appropriate.
"""
def setUp(self):
"""
Set up a base class XBeeBase object which does not have
api_commands or api_responses defined
"""
self.xbee = XBeeBase(None)
def test_build_command(self):
"""
_build_command should raise NotImplemented
"""
self.assertRaises(NotImplementedError, self.xbee._build_command, "at")
def test_split_response(self):
"""
split_command should raise NotImplemented
"""
self.assertRaises(NotImplementedError, self.xbee._split_response, b"\x00")
def test_shorthand(self):
"""
Shorthand calls should raise NotImplementedError
"""
try:
self.xbee.at
except NotImplementedError:
pass
else:
self.fail("Shorthand call on XBeeBase base class should raise NotImplementedError")
class TestAsyncCallback(unittest.TestCase):
"""
XBeeBase constructor should accept an optional callback function
argument. When provided, this will put the module into a threaded
mode, in which it will call the provided function with any API
frame data received.
As it would be very difficult to sanely test an asynchonous callback
routine with a synchronous test process, proper callback behavior
is not tested automatically at this time. Theoretically, the
callback implementation logic is simple, but use it at your own risk.
"""
def setUp(self):
self.xbee = None
self.serial = Serial()
self.callback = lambda data: None
self.error_callback = lambda data: None
def tearDown(self):
# Ensure proper thread shutdown before continuing
self.xbee.halt()
def test_provide_callback(self):
"""
XBeeBase constructor should accept a callback function
"""
self.xbee = XBeeBase(self.serial,
callback=self.callback,
error_callback=self.error_callback)
class TestInitialization(unittest.TestCase):
"""
Ensures that XBeeBase objects are properly constructed
"""
def setUp(self):
self.base = XBeeBase(None)
def test_thread_always_initialized(self):
"""
Even when a callback method is not supplied to the XBeeBase
constructor, it must be properly initalized as a
threading.Thread object
"""
self.assertFalse(self.base.is_alive())
if __name__ == '__main__':
unittest.main()
|
mit
| 4,733,193,037,075,674,000 | 29.460733 | 95 | 0.625473 | false |
boada/planckClusters
|
MOSAICpipe/bpz-1.99.3/bpz.py
|
1
|
52171
|
"""
bpz: Bayesian Photo-Z estimation
Reference: Benitez 2000, ApJ, 536, p.571
Usage:
python bpz.py catalog.cat
Needs a catalog.columns file which describes the contents of catalog.cat
"""
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import map
from builtins import input
from builtins import range
from past.utils import old_div
from useful import *
rolex = watch()
rolex.set()
#from Numeric import *
from numpy import *
from bpz_tools import *
from string import *
import os, glob, sys
import time
import pickle
import shelve
from coetools import pause, params_cl
class Printer():
"""Print things to stdout on one line dynamically"""
def __init__(self, data):
sys.stdout.write("\r\x1b[K" + data.__str__())
sys.stdout.flush()
def seglist(vals, mask=None):
"""Split vals into lists based on mask > 0"""
if mask == None:
mask = greater(vals, 0)
lists = []
i = 0
lastgood = False
list1 = []
for i in range(len(vals)):
if mask[i] == False:
if lastgood:
lists.append(list1)
list1 = []
lastgood = False
if mask[i]:
list1.append(vals[i])
lastgood = True
if lastgood:
lists.append(list1)
return lists
# Initialization and definitions#
#Current directory
homedir = os.getcwd()
#Parameter definition
pars = params()
pars.d = {
'SPECTRA': 'CWWSB4.list', # template list
#'PRIOR': 'hdfn_SB', # prior name
'PRIOR': 'hdfn_gen', # prior name
'NTYPES':
None, # Number of Elliptical, Spiral, and Starburst/Irregular templates Default: 1,2,n-3
'DZ': 0.01, # redshift resolution
'ZMIN': 0.01, # minimum redshift
'ZMAX': 10., # maximum redshift
'MAG': 'yes', # Data in magnitudes?
'MIN_MAGERR': 0.001, # minimum magnitude uncertainty --DC
'ODDS': 0.95, # Odds threshold: affects confidence limits definition
'INTERP':
0, # Number of interpolated templates between each of the original ones
'EXCLUDE': 'none', # Filters to be excluded from the estimation
'NEW_AB': 'no', # If yes, generate new AB files even if they already exist
'CHECK':
'yes', # Perform some checks, compare observed colors with templates, etc.
'VERBOSE': 'yes', # Print estimated redshifts to the standard output
'PROBS':
'no', # Save all the galaxy probability distributions (it will create a very large file)
'PROBS2':
'no', # Save all the galaxy probability distributions P(z,t) (but not priors) -- Compact
'PROBS_LITE': 'yes', # Save only the final probability distribution
'GET_Z': 'yes', # Actually obtain photo-z
'ONLY_TYPE': 'no', # Use spectroscopic redshifts instead of photo-z
'MADAU': 'yes', #Apply Madau correction to spectra
'Z_THR': 0, #Integrate probability for z>z_thr
'COLOR': 'no', #Use colors instead of fluxes
'PLOTS': 'no', #Don't produce plots
'INTERACTIVE': 'yes', #Don't query the user
'PHOTO_ERRORS':
'no', #Define the confidence interval using only the photometric errors
'MIN_RMS':
0.05, #"Intrinsic" photo-z rms in dz /(1+z) (Change to 0.05 for templates from Benitez et al. 2004
'N_PEAKS': 1,
'MERGE_PEAKS': 'no',
'CONVOLVE_P': 'yes',
'P_MIN': 1e-2,
'SED_DIR': sed_dir,
'AB_DIR': ab_dir,
'FILTER_DIR': fil_dir,
'DELTA_M_0': 0.,
'ZP_OFFSETS': 0.,
'ZC': None,
'FC': None,
"ADD_SPEC_PROB": None,
"ADD_CONTINUOUS_PROB": None,
"NMAX": None # Useful for testing
}
if pars.d['PLOTS'] == 'no': plots = 0
if plots:
# If pylab installed show plots
plots = 'pylab'
try:
import matplotlib
matplotlib.use('TkAgg')
from pylab import *
# from coeplot2a import *
plot([1])
title('KILL THIS WINDOW!')
show()
ioff()
except:
try:
from biggles import *
plots = 'biggles'
except:
plots = 0
#Define the default values of the parameters
pars.d['INPUT'] = sys.argv[1] # catalog with the photometry
obs_file = pars.d['INPUT']
root = os.path.splitext(pars.d['INPUT'])[0]
pars.d[
'COLUMNS'] = root + '.columns' # column information for the input catalog
pars.d['OUTPUT'] = root + '.bpz' # output
nargs = len(sys.argv)
ipar = 2
if nargs > 2: #Check for parameter file and update parameters
if sys.argv[2] == '-P':
pars.fromfile(sys.argv[3])
ipar = 4
# Update the parameters using command line additions
#pars.fromcommandline(sys.argv[ipar:])
#for key in pars.d:
# print key, pars.d[key]
#pause()
pars.d.update(
params_cl()) # allows for flag only (no value after), e.g., -CHECK
def updateblank(var, ext):
global pars
if pars.d[var] in [None, 'yes']:
pars.d[var] = root + '.' + ext
updateblank('CHECK', 'flux_comparison')
updateblank('PROBS_LITE', 'probs')
updateblank('PROBS', 'full_probs')
updateblank('PROBS2', 'chisq')
#if pars.d['CHECK'] in [None, 'yes']:
# pars.d['CHECK'] = root+'.flux_comparison'
#This allows to change the auxiliary directories used by BPZ
if pars.d['SED_DIR'] != sed_dir:
print("Changing sed_dir to ", pars.d['SED_DIR'])
sed_dir = pars.d['SED_DIR']
if sed_dir[-1] != '/': sed_dir += '/'
if pars.d['AB_DIR'] != ab_dir:
print("Changing ab_dir to ", pars.d['AB_DIR'])
ab_dir = pars.d['AB_DIR']
if ab_dir[-1] != '/': ab_dir += '/'
if pars.d['FILTER_DIR'] != fil_dir:
print("Changing fil_dir to ", pars.d['FILTER_DIR'])
fil_dir = pars.d['FILTER_DIR']
if fil_dir[-1] != '/': fil_dir += '/'
#Better safe than sorry
if pars.d['OUTPUT'] == obs_file or pars.d['PROBS'] == obs_file or pars.d[
'PROBS2'] == obs_file or pars.d['PROBS_LITE'] == obs_file:
print("This would delete the input file!")
sys.exit()
if pars.d['OUTPUT'] == pars.d['COLUMNS'] or pars.d['PROBS_LITE'] == pars.d[
'COLUMNS'] or pars.d['PROBS'] == pars.d['COLUMNS']:
print("This would delete the .columns file!")
sys.exit()
#Assign the intrinsin rms
if pars.d['SPECTRA'] == 'CWWSB.list':
print('Setting the intrinsic rms to 0.067(1+z)')
pars.d['MIN_RMS'] = 0.067
pars.d['MIN_RMS'] = float(pars.d['MIN_RMS'])
pars.d['MIN_MAGERR'] = float(pars.d['MIN_MAGERR'])
if pars.d['INTERACTIVE'] == 'no': interactive = 0
else: interactive = 1
if pars.d['VERBOSE'] == 'yes':
print("Current parameters")
view_keys(pars.d)
pars.d['N_PEAKS'] = int(pars.d['N_PEAKS'])
if pars.d["ADD_SPEC_PROB"] != None:
specprob = 1
specfile = pars.d["ADD_SPEC_PROB"]
spec = get_2Darray(specfile)
ns = spec.shape[1]
if old_div(ns, 2) != (old_div(ns, 2.)):
print("Number of columns in SPEC_PROB is odd")
sys.exit()
z_spec = spec[:, :old_div(ns, 2)]
p_spec = spec[:, old_div(ns, 2):]
# Write output file header
header = "#ID "
header += ns / 2 * " z_spec%i"
header += ns / 2 * " p_spec%i"
header += "\n"
header = header % tuple(list(range(old_div(ns, 2))) + list(range(old_div(
ns, 2))))
specout = open(specfile.split()[0] + ".p_spec", "w")
specout.write(header)
else:
specprob = 0
pars.d['DELTA_M_0'] = float(pars.d['DELTA_M_0'])
#Some misc. initialization info useful for the .columns file
#nofilters=['M_0','OTHER','ID','Z_S','X','Y']
nofilters = ['M_0', 'OTHER', 'ID', 'Z_S']
#Numerical codes for nondetection, etc. in the photometric catalog
unobs = -99. #Objects not observed
undet = 99. #Objects not detected
#Define the z-grid
zmin = float(pars.d['ZMIN'])
zmax = float(pars.d['ZMAX'])
if zmin > zmax: raise 'zmin < zmax !'
dz = float(pars.d['DZ'])
linear = 1
if linear:
z = arange(zmin, zmax + dz, dz)
else:
if zmax != 0.:
zi = zmin
z = []
while zi <= zmax:
z.append(zi)
zi = zi + dz * (1. + zi)
z = array(z)
else:
z = array([0.])
#Now check the contents of the FILTERS,SED and A diBrectories
#Get the filters in stock
filters_db = []
filters_db = glob.glob(fil_dir + '*.res')
for i in range(len(filters_db)):
filters_db[i] = os.path.basename(filters_db[i])
filters_db[i] = filters_db[i][:-4]
#Get the SEDs in stock
sed_db = []
sed_db = glob.glob(sed_dir + '*.sed')
for i in range(len(sed_db)):
sed_db[i] = os.path.basename(sed_db[i])
sed_db[i] = sed_db[i][:-4]
#Get the ABflux files in stock
ab_db = []
ab_db = glob.glob(ab_dir + '*.AB')
for i in range(len(ab_db)):
ab_db[i] = os.path.basename(ab_db[i])
ab_db[i] = ab_db[i][:-3]
#Get a list with the filter names and check whether they are in stock
col_file = pars.d['COLUMNS']
filters = get_str(col_file, 0)
for cosa in nofilters:
if filters.count(cosa): filters.remove(cosa)
if pars.d['EXCLUDE'] != 'none':
if type(pars.d['EXCLUDE']) == type(' '):
pars.d['EXCLUDE'] = [pars.d['EXCLUDE']]
for cosa in pars.d['EXCLUDE']:
if filters.count(cosa): filters.remove(cosa)
for filter in filters:
if filter[-4:] == '.res': filter = filter[:-4]
if filter not in filters_db:
print('filter ', filter, 'not in database at', fil_dir, ':')
if ask('Print filters in database?'):
for line in filters_db:
print(line)
sys.exit()
#Get a list with the spectrum names and check whether they're in stock
#Look for the list in the home directory first,
#if it's not there, look in the SED directory
spectra_file = os.path.join(homedir, pars.d['SPECTRA'])
if not os.path.exists(spectra_file):
spectra_file = os.path.join(sed_dir, pars.d['SPECTRA'])
spectra = get_str(spectra_file, 0)
for i in range(len(spectra)):
if spectra[i][-4:] == '.sed': spectra[i] = spectra[i][:-4]
nf = len(filters)
nt = len(spectra)
nz = len(z)
#Get the model fluxes
f_mod = zeros((nz, nt, nf)) * 0.
abfiles = []
for it in range(nt):
for jf in range(nf):
if filters[jf][-4:] == '.res': filtro = filters[jf][:-4]
else: filtro = filters[jf]
#model = join([spectra[it], filtro, 'AB'], '.')
model = '.'.join([spectra[it], filtro, 'AB'])
model_path = os.path.join(ab_dir, model)
abfiles.append(model)
#Generate new ABflux files if not present
# or if new_ab flag on
if pars.d['NEW_AB'] == 'yes' or model[:-3] not in ab_db:
if spectra[it] not in sed_db:
print('SED ', spectra[it], 'not in database at', sed_dir)
# for line in sed_db:
# print line
sys.exit()
#print spectra[it],filters[jf]
print(' Generating ', model, '....')
ABflux(spectra[it], filtro, madau=pars.d['MADAU'])
#z_ab=arange(0.,zmax_ab,dz_ab) #zmax_ab and dz_ab are def. in bpz_tools
# abflux=f_z_sed(spectra[it],filters[jf], z_ab,units='nu',madau=pars.d['MADAU'])
# abflux=clip(abflux,0.,1e400)
# buffer=join(['#',spectra[it],filters[jf], 'AB','\n'])
#for i in range(len(z_ab)):
# buffer=buffer+join([`z_ab[i]`,`abflux[i]`,'\n'])
#open(model_path,'w').write(buffer)
#zo=z_ab
#f_mod_0=abflux
#else:
#Read the data
zo, f_mod_0 = get_data(model_path, (0, 1))
#Rebin the data to the required redshift resolution
f_mod[:, it, jf] = match_resol(zo, f_mod_0, z)
#if sometrue(less(f_mod[:,it,jf],0.)):
if less(f_mod[:, it, jf], 0.).any():
print('Warning: some values of the model AB fluxes are <0')
print('due to the interpolation ')
print('Clipping them to f>=0 values')
#To avoid rounding errors in the calculation of the likelihood
f_mod[:, it, jf] = clip(f_mod[:, it, jf], 0., 1e300)
#We forbid f_mod to take values in the (0,1e-100) interval
#f_mod[:,it,jf]=where(less(f_mod[:,it,jf],1e-100)*greater(f_mod[:,it,jf],0.),0.,f_mod[:,it,jf])
#Here goes the interpolacion between the colors
ninterp = int(pars.d['INTERP'])
ntypes = pars.d['NTYPES']
if ntypes == None:
nt0 = nt
else:
nt0 = list(ntypes)
for i, nt1 in enumerate(nt0):
print(i, nt1)
nt0[i] = int(nt1)
if (len(nt0) != 3) or (sum(nt0) != nt):
print()
print('%d ellipticals + %d spirals + %d ellipticals' % tuple(nt0))
print('does not add up to %d templates' % nt)
print('USAGE: -NTYPES nell,nsp,nsb')
print('nell = # of elliptical templates')
print('nsp = # of spiral templates')
print('nsb = # of starburst templates')
print(
'These must add up to the number of templates in the SPECTRA list')
print('Quitting BPZ.')
sys.exit()
if ninterp:
nti = nt + (nt - 1) * ninterp
buffer = zeros((nz, nti, nf)) * 1.
tipos = arange(0., float(nti), float(ninterp) + 1.)
xtipos = arange(float(nti))
for iz in arange(nz):
for jf in range(nf):
buffer[iz, :, jf] = match_resol(tipos, f_mod[iz, :, jf], xtipos)
nt = nti
f_mod = buffer
#for j in range(nf):
# plot=FramedPlot()
# for i in range(nt): plot.add(Curve(z,log(f_mod[:,i,j]+1e-40)))
# plot.show()
# ask('More?')
#Load all the parameters in the columns file to a dictionary
col_pars = params()
col_pars.fromfile(col_file)
# Read which filters are in which columns
flux_cols = []
eflux_cols = []
cals = []
zp_errors = []
zp_offsets = []
for filter in filters:
datos = col_pars.d[filter]
flux_cols.append(int(datos[0]) - 1)
eflux_cols.append(int(datos[1]) - 1)
cals.append(datos[2])
zp_errors.append(datos[3])
zp_offsets.append(datos[4])
zp_offsets = array(list(map(float, zp_offsets)))
if pars.d['ZP_OFFSETS']:
zp_offsets += array(list(map(float, pars.d['ZP_OFFSETS'])))
flux_cols = tuple(flux_cols)
eflux_cols = tuple(eflux_cols)
#READ the flux and errors from obs_file
f_obs = get_2Darray(obs_file, flux_cols)
ef_obs = get_2Darray(obs_file, eflux_cols)
#Convert them to arbitrary fluxes if they are in magnitudes
if pars.d['MAG'] == 'yes':
seen = greater(f_obs, 0.) * less(f_obs, undet)
no_seen = equal(f_obs, undet)
no_observed = equal(f_obs, unobs)
todo = seen + no_seen + no_observed
#The minimum photometric error is 0.01
#ef_obs=ef_obs+seen*equal(ef_obs,0.)*0.001
ef_obs = where(
greater_equal(ef_obs, 0.), clip(ef_obs, pars.d['MIN_MAGERR'], 1e10),
ef_obs)
if add.reduce(add.reduce(todo)) != todo.shape[0] * todo.shape[1]:
print('Objects with unexpected magnitudes!')
print("""Allowed values for magnitudes are
0<m<""" + repr(undet) + " m=" + repr(undet) + "(non detection), m=" + repr(
unobs) + "(not observed)")
for i in range(len(todo)):
if not alltrue(todo[i, :]):
print(i + 1, f_obs[i, :], ef_obs[i, :])
sys.exit()
#Detected objects
try:
f_obs = where(seen, 10.**(-.4 * f_obs), f_obs)
except OverflowError:
print(
'Some of the input magnitudes have values which are >700 or <-700')
print('Purge the input photometric catalog')
print('Minimum value', min(f_obs))
print('Maximum value', max(f_obs))
print('Indexes for minimum values', argmin(f_obs, 0.))
print('Indexes for maximum values', argmax(f_obs, 0.))
print('Bye.')
sys.exit()
try:
ef_obs = where(seen, (10.**(.4 * ef_obs) - 1.) * f_obs, ef_obs)
except OverflowError:
print(
'Some of the input magnitude errors have values which are >700 or <-700')
print('Purge the input photometric catalog')
print('Minimum value', min(ef_obs))
print('Maximum value', max(ef_obs))
print('Indexes for minimum values', argmin(ef_obs, 0.))
print('Indexes for maximum values', argmax(ef_obs, 0.))
print('Bye.')
sys.exit()
#print 'ef', ef_obs[0,:nf]
#print 'f', f_obs[1,:nf]
#print 'ef', ef_obs[1,:nf]
#Looked at, but not detected objects (mag=99.)
#We take the flux equal to zero, and the error in the flux equal to the 1-sigma detection error.
#If m=99, the corresponding error magnitude column in supposed to be dm=m_1sigma, to avoid errors
#with the sign we take the absolute value of dm
f_obs = where(no_seen, 0., f_obs)
ef_obs = where(no_seen, 10.**(-.4 * abs(ef_obs)), ef_obs)
#Objects not looked at (mag=-99.)
f_obs = where(no_observed, 0., f_obs)
ef_obs = where(no_observed, 0., ef_obs)
#Flux codes:
# If f>0 and ef>0 : normal objects
# If f==0 and ef>0 :object not detected
# If f==0 and ef==0: object not observed
#Everything else will crash the program
#Check that the observed error fluxes are reasonable
#if sometrue(less(ef_obs,0.)): raise 'Negative input flux errors'
if less(ef_obs, 0.).any():
raise ValueError('Negative input flux errors')
f_obs = where(less(f_obs, 0.), 0., f_obs) #Put non-detections to 0
ef_obs = where(
less(f_obs, 0.), maximum(1e-100, f_obs + ef_obs),
ef_obs) # Error equivalent to 1 sigma upper limit
#if sometrue(less(f_obs,0.)) : raise 'Negative input fluxes'
seen = greater(f_obs, 0.) * greater(ef_obs, 0.)
no_seen = equal(f_obs, 0.) * greater(ef_obs, 0.)
no_observed = equal(f_obs, 0.) * equal(ef_obs, 0.)
todo = seen + no_seen + no_observed
if add.reduce(add.reduce(todo)) != todo.shape[0] * todo.shape[1]:
print('Objects with unexpected fluxes/errors')
#Convert (internally) objects with zero flux and zero error(non observed)
#to objects with almost infinite (~1e108) error and still zero flux
#This will yield reasonable likelihoods (flat ones) for these objects
ef_obs = where(no_observed, 1e108, ef_obs)
#Include the zero point errors
zp_errors = array(list(map(float, zp_errors)))
zp_frac = e_mag2frac(zp_errors)
#zp_frac=10.**(.4*zp_errors)-1.
ef_obs = where(seen, sqrt(ef_obs * ef_obs + (zp_frac * f_obs)**2), ef_obs)
ef_obs = where(no_seen,
sqrt(ef_obs * ef_obs + (zp_frac * (old_div(ef_obs, 2.)))**2),
ef_obs)
#Add the zero-points offset
#The offsets are defined as m_new-m_old
zp_offsets = array(list(map(float, zp_offsets)))
zp_offsets = where(not_equal(zp_offsets, 0.), 10.**(-.4 * zp_offsets), 1.)
f_obs = f_obs * zp_offsets
ef_obs = ef_obs * zp_offsets
#Convert fluxes to AB if needed
for i in range(f_obs.shape[1]):
if cals[i] == 'Vega':
const = mag2flux(VegatoAB(0., filters[i]))
f_obs[:, i] = f_obs[:, i] * const
ef_obs[:, i] = ef_obs[:, i] * const
elif cals[i] == 'AB':
continue
else:
print('AB or Vega?. Check ' + col_file + ' file')
sys.exit()
#Get m_0 (if present)
if 'M_0' in col_pars.d:
m_0_col = int(col_pars.d['M_0']) - 1
m_0 = get_data(obs_file, m_0_col)
m_0 += pars.d['DELTA_M_0']
#Get the objects ID (as a string)
if 'ID' in col_pars.d:
# print col_pars.d['ID']
id_col = int(col_pars.d['ID']) - 1
id = get_str(obs_file, id_col)
else:
id = list(map(str, list(range(1, len(f_obs[:, 0]) + 1))))
#Get spectroscopic redshifts (if present)
if 'Z_S' in col_pars.d:
z_s_col = int(col_pars.d['Z_S']) - 1
z_s = get_data(obs_file, z_s_col)
#Get the X,Y coordinates
if 'X' in col_pars.d:
datos = col_pars.d['X']
if len(datos) == 1: # OTHERWISE IT'S A FILTER!
x_col = int(col_pars.d['X']) - 1
x = get_data(obs_file, x_col)
if 'Y' in col_pars.d:
datos = col_pars.d['Y']
if len(datos) == 1: # OTHERWISE IT'S A FILTER!
y_col = int(datos) - 1
y = get_data(obs_file, y_col)
#If 'check' on, initialize some variables
check = pars.d['CHECK']
# This generates a file with m,z,T and observed/expected colors
#if check=='yes': pars.d['FLUX_COMPARISON']=root+'.flux_comparison'
checkSED = check != 'no'
ng = f_obs.shape[0]
if checkSED:
# PHOTOMETRIC CALIBRATION CHECK
#r=zeros((ng,nf),float)+1.
#dm=zeros((ng,nf),float)+1.
#w=r*0.
# Defaults: r=1, dm=1, w=0
frat = ones((ng, nf), float)
dmag = ones((ng, nf), float)
fw = zeros((ng, nf), float)
#Visualize the colors of the galaxies and the templates
#When there are spectroscopic redshifts available
if interactive and 'Z_S' in col_pars.d and plots and checkSED and ask(
'Plot colors vs spectroscopic redshifts?'):
color_m = zeros((nz, nt, nf - 1)) * 1.
if plots == 'pylab':
figure(1)
nrows = 2
ncols = old_div((nf - 1), nrows)
if (nf - 1) % nrows: ncols += 1
for i in range(nf - 1):
##plot=FramedPlot()
# Check for overflows
fmu = f_obs[:, i + 1]
fml = f_obs[:, i]
good = greater(fml, 1e-100) * greater(fmu, 1e-100)
zz, fmu, fml = multicompress(good, (z_s, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
if plots == 'pylab':
subplot(nrows, ncols, i + 1)
plot(zz, colour, "bo")
elif plots == 'biggles':
d = Points(zz, colour, color='blue')
plot.add(d)
for it in range(nt):
#Prevent overflows
fmu = f_mod[:, it, i + 1]
fml = f_mod[:, it, i]
good = greater(fml, 1e-100)
zz, fmu, fml = multicompress(good, (z, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
if plots == 'pylab':
plot(zz, colour, "r")
elif plots == 'biggles':
d = Curve(zz, colour, color='red')
plot.add(d)
if plots == 'pylab':
xlabel(r'$z$')
ylabel('%s - %s' % (filters[i], filters[i + 1]))
elif plots == 'biggles':
plot.xlabel = r'$z$'
plot.ylabel = '%s - %s' % (filters[i], filters[i + 1])
plot.save_as_eps('%s-%s.eps' % (filters[i], filters[i + 1]))
plot.show()
if plots == 'pylab':
show()
inp = eval(input('Hit Enter to continue.'))
#Get other information which will go in the output file (as strings)
if 'OTHER' in col_pars.d:
if col_pars.d['OTHER'] != 'all':
other_cols = col_pars.d['OTHER']
if type(other_cols) == type((2, )):
other_cols = tuple(map(int, other_cols))
else:
other_cols = (int(other_cols), )
other_cols = [x - 1 for x in other_cols]
n_other = len(other_cols)
else:
n_other = get_2Darray(obs_file, cols='all', nrows=1).shape[1]
other_cols = list(range(n_other))
others = get_str(obs_file, other_cols)
if len(other_cols) > 1:
other = []
for j in range(len(others[0])):
lista = []
for i in range(len(others)):
lista.append(others[i][j])
other.append(join(lista))
else:
other = others
if pars.d['GET_Z'] == 'no': get_z = 0
else: get_z = 1
#Prepare the output file
out_name = pars.d['OUTPUT']
if get_z:
if os.path.exists(out_name):
os.system('cp %s %s.bak' % (out_name, out_name))
print("File %s exists. Copying it to %s.bak" % (out_name, out_name))
output = open(out_name, 'w')
if pars.d['PROBS_LITE'] == 'no': save_probs = 0
else: save_probs = 1
if pars.d['PROBS'] == 'no': save_full_probs = 0
else: save_full_probs = 1
if pars.d['PROBS2'] == 'no': save_probs2 = 0
else: save_probs2 = 1
#Include some header information
# File name and the date...
time_stamp = time.ctime(time.time())
if get_z: output.write('## File ' + out_name + ' ' + time_stamp + '\n')
#and also the parameters used to run bpz...
if get_z: output.write("""##
##Parameters used to run BPZ:
##
""")
claves = list(pars.d.keys())
claves.sort()
for key in claves:
if type(pars.d[key]) == type((1, )):
cosa = join(list(pars.d[key]), ',')
else:
cosa = str(pars.d[key])
if get_z: output.write('##' + key.upper() + '=' + cosa + '\n')
if save_full_probs:
#Shelve some info on the run
full_probs = shelve.open(pars.d['PROBS'])
full_probs['TIME'] = time_stamp
full_probs['PARS'] = pars.d
if save_probs:
probs = open(pars.d['PROBS_LITE'], 'w')
probs.write('# ID p_bayes(z) where z=arange(%.4f,%.4f,%.4f) \n' %
(zmin, zmax + dz, dz))
if save_probs2:
probs2 = open(pars.d['PROBS2'], 'w')
probs2.write(
'# id t z1 P(z1) P(z1+dz) P(z1+2*dz) ... where dz = %.4f\n' % dz)
#probs2.write('# ID\n')
#probs2.write('# t z1 P(z1) P(z1+dz) P(z1+2*dz) ... where dz = %.4f\n' % dz)
#Use a empirical prior?
tipo_prior = pars.d['PRIOR']
useprior = 0
if 'M_0' in col_pars.d:
has_mags = 1
else:
has_mags = 0
if has_mags and tipo_prior != 'none' and tipo_prior != 'flat':
useprior = 1
#Add cluster 'spikes' to the prior?
cluster_prior = 0.
if pars.d['ZC']:
cluster_prior = 1
if type(pars.d['ZC']) == type(""): zc = array([float(pars.d['ZC'])])
else: zc = array(list(map(float, pars.d['ZC'])))
if type(pars.d['FC']) == type(""): fc = array([float(pars.d['FC'])])
else: fc = array(list(map(float, pars.d['FC'])))
fcc = add.reduce(fc)
if fcc > 1.:
print(ftc)
raise 'Too many galaxies in clusters!'
pi_c = zeros((nz, nt)) * 1.
#Go over the different cluster spikes
for i in range(len(zc)):
#We define the cluster within dz=0.01 limits
cluster_range = less_equal(abs(z - zc[i]), .01) * 1.
#Clip values to avoid overflow
exponente = clip(-(z - zc[i])**2 / 2. / (0.00333)**2, -700., 0.)
#Outside the cluster range g is 0
g = exp(exponente) * cluster_range
norm = add.reduce(g)
pi_c[:, 0] = pi_c[:, 0] + g / norm * fc[i]
#Go over the different types
print('We only apply the cluster prior to the early type galaxies')
for i in range(1, 3 + 2 * ninterp):
pi_c[:, i] = pi_c[:, i] + pi_c[:, 0]
#Output format
format = '%' + repr(maximum(5, len(id[0]))) + 's' #ID format
format = format + pars.d[
'N_PEAKS'] * ' %.3f %.3f %.3f %.3f %.5f' + ' %.3f %.3f %10.3f'
#Add header with variable names to the output file
sxhdr = """##
##Column information
##
# 1 ID"""
k = 1
if pars.d['N_PEAKS'] > 1:
for j in range(pars.d['N_PEAKS']):
sxhdr += """
# %i Z_B_%i
# %i Z_B_MIN_%i
# %i Z_B_MAX_%i
# %i T_B_%i
# %i ODDS_%i""" % (k + 1, j + 1, k + 2, j + 1, k + 3, j + 1, k + 4, j + 1,
k + 5, j + 1)
k += 5
else:
sxhdr += """
# %i Z_B
# %i Z_B_MIN
# %i Z_B_MAX
# %i T_B
# %i ODDS""" % (k + 1, k + 2, k + 3, k + 4, k + 5)
k += 5
sxhdr += """
# %i Z_ML
# %i T_ML
# %i CHI-SQUARED\n""" % (k + 1, k + 2, k + 3)
nh = k + 4
if 'Z_S' in col_pars.d:
sxhdr = sxhdr + '# %i Z_S\n' % nh
format = format + ' %.3f'
nh += 1
if has_mags:
format = format + ' %.3f'
sxhdr = sxhdr + '# %i M_0\n' % nh
nh += 1
if 'OTHER' in col_pars.d:
sxhdr = sxhdr + '# %i OTHER\n' % nh
format = format + ' %s'
nh += n_other
#print sxhdr
if get_z: output.write(sxhdr + '##\n')
odds_i = float(pars.d['ODDS'])
oi = inv_gauss_int(odds_i)
print(odds_i, oi)
#Proceed to redshift estimation
if checkSED: buffer_flux_comparison = ""
if pars.d['CONVOLVE_P'] == 'yes':
# Will Convolve with a dz=0.03 gaussian to make probabilities smoother
# This is necessary; if not there are too many close peaks
sigma_g = 0.03
x = arange(-3. * sigma_g, 3. * sigma_g + old_div(dz, 10.),
dz) # made symmetric --DC
gaus = exp(-(old_div(x, sigma_g))**2)
if pars.d["NMAX"] != None: ng = int(pars.d["NMAX"])
for ig in range(ng):
currentPercent = ig / ng * 100
status = "{:.3f}% of {} completed.".format(currentPercent, ng)
Printer(status)
#Don't run BPZ on galaxies with have z_s > z_max
#if col_pars.d.has_key('Z_S'):
# if z_s[ig]<9.9 and z_s[ig]>zmax : continue
if not get_z: continue
if pars.d['COLOR'] == 'yes':
likelihood = p_c_z_t_color(f_obs[ig, :nf], ef_obs[ig, :nf],
f_mod[:nz, :nt, :nf])
else:
likelihood = p_c_z_t(f_obs[ig, :nf], ef_obs[ig, :nf],
f_mod[:nz, :nt, :nf])
if 0:
print(f_obs[ig, :nf])
print(ef_obs[ig, :nf])
iz_ml = likelihood.i_z_ml
t_ml = likelihood.i_t_ml
red_chi2 = old_div(likelihood.min_chi2, float(nf - 1.))
#p=likelihood.Bayes_likelihood
#likelihood.various_plots()
#print 'FULL BAYESAIN LIKELIHOOD'
p = likelihood.likelihood
if not ig:
print('ML * prior -- NOT QUITE BAYESIAN')
if pars.d[
'ONLY_TYPE'] == 'yes': #Use only the redshift information, no priors
p_i = zeros((nz, nt)) * 1.
j = searchsorted(z, z_s[ig])
#print j,nt,z_s[ig]
try:
p_i[j, :] = old_div(1., float(nt))
except IndexError:
pass
else:
if useprior:
if pars.d['PRIOR'] == 'lensing':
p_i = prior(z, m_0[ig], tipo_prior, nt0, ninterp, x[ig], y[ig])
else:
p_i = prior(z, m_0[ig], tipo_prior, nt0, ninterp)
else:
p_i = old_div(ones((nz, nt), float), float(nz * nt))
if cluster_prior: p_i = (1. - fcc) * p_i + pi_c
if save_full_probs:
full_probs[id[ig]] = [z, p_i[:nz, :nt], p[:nz, :nt], red_chi2]
#Multiply the prior by the likelihood to find the final probability
pb = p_i[:nz, :nt] * p[:nz, :nt]
#plo=FramedPlot()
#for i in range(p.shape[1]):
# plo.add(Curve(z,p_i[:nz,i]/sum(sum(p_i[:nz,:]))))
#for i in range(p.shape[1]):
# plo.add(Curve(z,p[:nz,i]/sum(sum(p[:nz,:])),color='red'))
#plo.add(Curve(z,pb[:nz,-1]/sum(pb[:nz,-1]),color='blue'))
#plo.show()
#ask('More?')
#Convolve with a gaussian of width \sigma(1+z) to take into
#accout the intrinsic scatter in the redshift estimation 0.06*(1+z)
#(to be done)
#Estimate the bayesian quantities
p_bayes = add.reduce(pb[:nz, :nt], -1)
#print p_bayes.shape
#print argmax(p_bayes)
#print p_bayes[300:310]
#Convolve with a gaussian
if pars.d['CONVOLVE_P'] == 'yes' and pars.d['ONLY_TYPE'] == 'no':
#print 'GAUSS CONV'
p_bayes = convolve(p_bayes, gaus, 1)
#print 'gaus', gaus
#print p_bayes.shape
#print argmax(p_bayes)
#print p_bayes[300:310]
# Eliminate all low level features in the prob. distribution
pmax = max(p_bayes)
p_bayes = where(
greater(p_bayes, pmax * float(pars.d['P_MIN'])), p_bayes, 0.)
norm = add.reduce(p_bayes)
p_bayes = old_div(p_bayes, norm)
if specprob:
p_spec[ig, :] = match_resol(z, p_bayes, z_spec[ig, :]) * p_spec[ig, :]
norma = add.reduce(p_spec[ig, :])
if norma == 0.: norma = 1.
p_spec[ig, :] /= norma
#vyjod=tuple([id[ig]]+list(z_spec[ig,:])+list(p_spec[ig,:])+[z_s[ig],
# int(float(other[ig]))])
vyjod = tuple([id[ig]] + list(z_spec[ig, :]) + list(p_spec[ig, :]))
formato = "%s " + 5 * " %.4f"
formato += 5 * " %.3f"
#formato+=" %4f %i"
formato += "\n"
print(formato % vyjod)
specout.write(formato % vyjod)
if pars.d['N_PEAKS'] > 1:
# Identify maxima and minima in the final probability
g_max = less(p_bayes[2:], p_bayes[1:-1]) * less(p_bayes[:-2],
p_bayes[1:-1])
g_min = greater(p_bayes[2:], p_bayes[1:-1]) * greater(p_bayes[:-2],
p_bayes[1:-1])
g_min += equal(p_bayes[1:-1], 0.) * greater(p_bayes[2:], 0.)
g_min += equal(p_bayes[1:-1], 0.) * greater(p_bayes[:-2], 0.)
i_max = compress(g_max, arange(nz - 2)) + 1
i_min = compress(g_min, arange(nz - 2)) + 1
# Check that the first point and the last one are not minima or maxima,
# if they are, add them to the index arrays
if p_bayes[0] > p_bayes[1]:
i_max = concatenate([[0], i_max])
i_min = concatenate([[0], i_min])
if p_bayes[-1] > p_bayes[-2]:
i_max = concatenate([i_max, [nz - 1]])
i_min = concatenate([i_min, [nz - 1]])
if p_bayes[0] < p_bayes[1]:
i_min = concatenate([[0], i_min])
if p_bayes[-1] < p_bayes[-2]:
i_min = concatenate([i_min, [nz - 1]])
p_max = take(p_bayes, i_max)
#p_min=take(p_bayes,i_min)
p_tot = []
z_peaks = []
t_peaks = []
# Sort them by probability values
p_max, i_max = multisort(old_div(1., p_max), (p_max, i_max))
# For each maximum, define the minima which sandwich it
# Assign minima to each maximum
jm = searchsorted(i_min, i_max)
p_max = list(p_max)
for i in range(len(i_max)):
z_peaks.append([z[i_max[i]], z[i_min[jm[i] - 1]], z[i_min[jm[i]]]])
t_peaks.append(argmax(pb[i_max[i], :nt]))
p_tot.append(sum(p_bayes[i_min[jm[i] - 1]:i_min[jm[i]]]))
# print z_peaks[-1][0],f_mod[i_max[i],t_peaks[-1]-1,:nf]
if ninterp:
t_peaks = list(old_div(array(t_peaks), (1. + ninterp)))
if pars.d['MERGE_PEAKS'] == 'yes':
# Merge peaks which are very close 0.03(1+z)
merged = []
for k in range(len(z_peaks)):
for j in range(len(z_peaks)):
if j > k and k not in merged and j not in merged:
if abs(z_peaks[k][0] - z_peaks[j][0]) < 0.06 * (
1. + z_peaks[j][0]):
# Modify the element which receives the accretion
z_peaks[k][1] = minimum(z_peaks[k][1],
z_peaks[j][1])
z_peaks[k][2] = maximum(z_peaks[k][2],
z_peaks[j][2])
p_tot[k] += p_tot[j]
# Put the merged element in the list
merged.append(j)
#print merged
# Clean up
copia = p_tot[:]
for j in merged:
p_tot.remove(copia[j])
copia = z_peaks[:]
for j in merged:
z_peaks.remove(copia[j])
copia = t_peaks[:]
for j in merged:
t_peaks.remove(copia[j])
copia = p_max[:]
for j in merged:
p_max.remove(copia[j])
if sum(array(p_tot)) != 1.:
p_tot = old_div(array(p_tot), sum(array(p_tot)))
# Define the peak
iz_b = argmax(p_bayes)
zb = z[iz_b]
# OKAY, NOW THAT GAUSSIAN CONVOLUTION BUG IS FIXED
# if pars.d['ONLY_TYPE']=='yes': zb=zb-dz/2. #This corrects a small bias
# else: zb=zb-dz #This corrects another small bias --DC
#Integrate within a ~ oi*sigma interval to estimate
# the odds. (based on a sigma=pars.d['MIN_RMS']*(1+z))
#Look for the number of sigma corresponding
#to the odds_i confidence limit
zo1 = zb - oi * pars.d['MIN_RMS'] * (1. + zb)
zo2 = zb + oi * pars.d['MIN_RMS'] * (1. + zb)
if pars.d['Z_THR'] > 0:
zo1 = float(pars.d['Z_THR'])
zo2 = float(pars.d['ZMAX'])
o = odds(p_bayes[:nz], z, zo1, zo2)
# Integrate within the same odds interval to find the type
# izo1=maximum(0,searchsorted(z,zo1)-1)
# izo2=minimum(nz,searchsorted(z,zo2))
# t_b=argmax(add.reduce(p[izo1:izo2,:nt],0))
it_b = argmax(pb[iz_b, :nt])
t_b = it_b + 1
if ninterp:
tt_b = old_div(float(it_b), (1. + ninterp))
tt_ml = old_div(float(t_ml), (1. + ninterp))
else:
tt_b = it_b
tt_ml = t_ml
if max(pb[iz_b, :]) < 1e-300:
print('NO CLEAR BEST t_b; ALL PROBABILITIES ZERO')
t_b = -1.
tt_b = -1.
#print it_b, t_b, tt_b, pb.shape
if 0:
print(f_mod[iz_b, it_b, :nf])
print(min(ravel(p_i)), max(ravel(p_i)))
print(min(ravel(p)), max(ravel(p)))
print(p_i[iz_b, :])
print(p[iz_b, :])
print(p_i[iz_b, it_b]) # prior
print(p[iz_b, it_b]) # chisq
print(likelihood.likelihood[iz_b, it_b])
print(likelihood.chi2[iz_b, it_b])
print(likelihood.ftt[iz_b, it_b])
print(likelihood.foo)
print()
print('t_b', t_b)
print('iz_b', iz_b)
print('nt', nt)
print(max(ravel(pb)))
impb = argmax(ravel(pb))
impbz = old_div(impb, nt)
impbt = impb % nt
print(impb, impbz, impbt)
print(ravel(pb)[impb])
print(pb.shape, (nz, nt))
print(pb[impbz, impbt])
print(pb[iz_b, it_b])
print('z, t', z[impbz], t_b)
print(t_b)
# Redshift confidence limits
z1, z2 = interval(p_bayes[:nz], z, odds_i)
if pars.d['PHOTO_ERRORS'] == 'no':
zo1 = zb - oi * pars.d['MIN_RMS'] * (1. + zb)
zo2 = zb + oi * pars.d['MIN_RMS'] * (1. + zb)
if zo1 < z1: z1 = maximum(0., zo1)
if zo2 > z2: z2 = zo2
# Print output
if pars.d['N_PEAKS'] == 1:
salida = [id[ig], zb, z1, z2, tt_b + 1, o, z[iz_ml], tt_ml + 1,
red_chi2]
else:
salida = [id[ig]]
for k in range(pars.d['N_PEAKS']):
if k <= len(p_tot) - 1:
salida = salida + list(z_peaks[k]) + [t_peaks[k] + 1, p_tot[k]]
else:
salida += [-1., -1., -1., -1., -1.]
salida += [z[iz_ml], tt_ml + 1, red_chi2]
if 'Z_S' in col_pars.d: salida.append(z_s[ig])
if has_mags: salida.append(m_0[ig] - pars.d['DELTA_M_0'])
if 'OTHER' in col_pars.d: salida.append(other[ig])
if get_z: output.write(format % tuple(salida) + '\n')
if pars.d['VERBOSE'] == 'yes': print(format % tuple(salida))
#try:
# if sometrue(greater(z_peaks,7.5)):
# connect(z,p_bayes)
# ask('More?')
#except:
# pass
odd_check = odds_i
if checkSED:
ft = f_mod[iz_b, it_b, :]
fo = f_obs[ig, :]
efo = ef_obs[ig, :]
dfosq = (old_div((ft - fo), efo))**2
if 0:
print(ft)
print(fo)
print(efo)
print(dfosq)
pause()
factor = ft / efo / efo
ftt = add.reduce(ft * factor)
fot = add.reduce(fo * factor)
am = old_div(fot, ftt)
ft = ft * am
if 0:
print(factor)
print(ftt)
print(fot)
print(am)
print(ft)
print()
pause()
flux_comparison = [id[ig], m_0[ig], z[iz_b], t_b, am] + list(
concatenate([ft, fo, efo]))
nfc = len(flux_comparison)
format_fc = '%s %.2f %.2f %i' + (nfc - 4) * ' %.3e' + '\n'
buffer_flux_comparison = buffer_flux_comparison + format_fc % tuple(
flux_comparison)
if o >= odd_check:
# PHOTOMETRIC CALIBRATION CHECK
# Calculate flux ratios, but only for objects with ODDS >= odd_check
# (odd_check = 0.95 by default)
# otherwise, leave weight w = 0 by default
eps = 1e-10
frat[ig, :] = divsafe(fo, ft, inf=eps, nan=eps)
#fw[ig,:] = greater(fo, 0)
fw[ig, :] = divsafe(fo, efo, inf=1e8, nan=0)
fw[ig, :] = clip(fw[ig, :], 0, 100)
#print fw[ig,:]
#print
if 0:
bad = less_equal(ft, 0.)
#Avoid overflow by setting r to 0.
fo = where(bad, 0., fo)
ft = where(bad, 1., ft)
r[ig, :] = old_div(fo, ft)
try:
dm[ig, :] = -flux2mag(old_div(fo, ft))
except:
dm[ig, :] = -100
# Clip ratio between 0.01 & 100
r[ig, :] = where(greater(r[ig, :], 100.), 100., r[ig, :])
r[ig, :] = where(less_equal(r[ig, :], 0.), 0.01, r[ig, :])
#Weight by flux
w[ig, :] = where(greater(fo, 0.), 1, 0.)
#w[ig,:]=where(greater(fo,0.),fo,0.)
#print fo
#print r[ig,:]
#print
# This is no good becasue r is always > 0 (has been clipped that way)
#w[ig,:]=where(greater(r[ig,:],0.),fo,0.)
# The is bad because it would include non-detections:
#w[ig,:]=where(greater(r[ig,:],0.),1.,0.)
if save_probs:
texto = '%s ' % str(id[ig])
texto += len(p_bayes) * '%.3e ' + '\n'
probs.write(texto % tuple(p_bayes))
# pb[z,t] -> p_bayes[z]
# 1. tb are summed over
# 2. convolved with Gaussian if CONVOLVE_P
# 3. Clipped above P_MIN * max(P), where P_MIN = 0.01 by default
# 4. normalized such that sum(P(z)) = 1
if save_probs2: # P = exp(-chisq / 2)
#probs2.write('%s\n' % id[ig])
pmin = pmax * float(pars.d['P_MIN'])
#pb = where(less(pb,pmin), 0, pb)
chisq = -2 * log(pb)
for itb in range(nt):
chisqtb = chisq[:, itb]
pqual = greater(pb[:, itb], pmin)
chisqlists = seglist(chisqtb, pqual)
if len(chisqlists) == 0:
continue
#print pb[:,itb]
#print chisqlists
zz = arange(zmin, zmax + dz, dz)
zlists = seglist(zz, pqual)
for i in range(len(zlists)):
probs2.write('%s %2d %.3f ' %
(id[ig], itb + 1, zlists[i][0]))
fmt = len(chisqlists[i]) * '%4.2f ' + '\n'
probs2.write(fmt % tuple(chisqlists[i]))
#fmt = len(chisqtb) * '%4.2f '+'\n'
#probs2.write('%d ' % itb)
#probs2.write(fmt % tuple(chisqtb))
#if checkSED: open(pars.d['FLUX_COMPARISON'],'w').write(buffer_flux_comparison)
if checkSED: open(pars.d['CHECK'], 'w').write(buffer_flux_comparison)
if get_z: output.close()
#if checkSED and get_z:
if checkSED:
#try:
if 1:
if interactive:
print("")
print("")
print("PHOTOMETRIC CALIBRATION TESTS")
# See PHOTOMETRIC CALIBRATION CHECK above
#ratios=add.reduce(w*r,0)/add.reduce(w,0)
#print "Average, weighted by flux ratios f_obs/f_model for objects with odds >= %g" % odd_check
#print len(filters)*' %s' % tuple(filters)
#print nf*' % 7.3f ' % tuple(ratios)
#print "Corresponding zero point shifts"
#print nf*' % 7.3f ' % tuple(-flux2mag(ratios))
#print
fratavg = old_div(sum(fw * frat, axis=0), sum(fw, axis=0))
dmavg = -flux2mag(fratavg)
fnobj = sum(greater(fw, 0), axis=0)
#print 'fratavg', fratavg
#print 'dmavg', dmavg
#print 'fnobj', fnobj
#fnobj = sum(greater(w[:,i],0))
print(
"If the dmag are large, add them to the .columns file (zp_offset), then re-run BPZ.")
print(
"(For better results, first re-run with -ONLY_TYPE yes to fit SEDs to known spec-z.)")
print()
print(' fo/ft dmag nobj filter')
#print nf
for i in range(nf):
print('% 7.3f % 7.3f %5d %s'\
% (fratavg[i], dmavg[i], fnobj[i], filters[i]))
#% (ratios[i], -flux2mag(ratios)[i], sum(greater(w[:,i],0)), filters[i])
#print ' fo/ft dmag filter'
#for i in range(nf):
# print '% 7.3f % 7.3f %s' % (ratios[i], -flux2mag(ratios)[i], filters[i])
print(
"fo/ft = Average f_obs/f_model weighted by f_obs/ef_obs for objects with ODDS >= %g"
% odd_check)
print(
"dmag = magnitude offset which should be applied (added) to the photometry (zp_offset)")
print(
"nobj = # of galaxies considered in that filter (detected and high ODDS >= %g)"
% odd_check)
# print r
# print w
#print
#print "Number of galaxies considered (with ODDS >= %g):" % odd_check
#print ' ', sum(greater(w,0)) / float(nf)
#print '(Note a galaxy detected in only 5 / 6 filters counts as 5/6 = 0.833)'
#print sum(greater(w,0))
#This part is experimental and may not work in the general case
#print "Median color offsets for objects with odds > "+`odd_check`+" (not weighted)"
#print len(filters)*' %s' % tuple(filters)
#r=flux2mag(r)
#print nf*' %.3f ' % tuple(-median(r))
#print nf*' %.3f ' % tuple(median(dm))
#rms=[]
#efobs=[]
#for j in range(nf):
# ee=where(greater(f_obs[:,j],0.),f_obs[:,j],2.)
# zz=e_frac2mag(ef_obs[:,j]/ee)
#
# xer=arange(0.,1.,.02)
# hr=hist(abs(r[:,j]),xer)
# hee=hist(zz,xer)
# rms.append(std_log(compress(less_equal(r[:,j],1.),r[:,j])))
# zz=compress(less_equal(zz,1.),zz)
# efobs.append(sqrt(mean(zz*zz)))
#print nf*' %.3f ' % tuple(rms)
#print nf*' %.3f ' % tuple(efobs)
#print nf*' %.3f ' % tuple(sqrt(abs(array(rms)**2-array(efobs)**2)))
#except: pass
if save_full_probs: full_probs.close()
if save_probs: probs.close()
if save_probs2: probs2.close()
if plots and checkSED:
zb, zm, zb1, zb2, o, tb = get_data(out_name, (1, 6, 2, 3, 5, 4))
#Plot the comparison between z_spec and z_B
if 'Z_S' in col_pars.d:
if not interactive or ask('Compare z_B vs z_spec?'):
good = less(z_s, 9.99)
print(
'Total initial number of objects with spectroscopic redshifts= ',
sum(good))
od_th = 0.
if ask('Select for galaxy characteristics?\n'):
od_th = eval(input('Odds threshold?\n'))
good *= greater_equal(o, od_th)
t_min = eval(input('Minimum spectral type\n'))
t_max = eval(input('Maximum spectral type\n'))
good *= less_equal(tb, t_max) * greater_equal(tb, t_min)
if has_mags:
mg_min = eval(input('Bright magnitude limit?\n'))
mg_max = eval(input('Faint magnitude limit?\n'))
good = good * less_equal(m_0, mg_max) * greater_equal(
m_0, mg_min)
zmo, zso, zbo, zb1o, zb2o, tb = multicompress(good, (zm, z_s, zb,
zb1, zb2, tb))
print('Number of objects with odds > %.2f= %i ' %
(od_th, len(zbo)))
deltaz = old_div((zso - zbo), (1. + zso))
sz = stat_robust(deltaz, 3., 3)
sz.run()
outliers = greater_equal(abs(deltaz), 3. * sz.rms)
print('Number of outliers [dz >%.2f*(1+z)]=%i' %
(3. * sz.rms, add.reduce(outliers)))
catastrophic = greater_equal(deltaz * (1. + zso), 1.)
n_catast = sum(catastrophic)
print('Number of catastrophic outliers [dz >1]=', n_catast)
print('Delta z/(1+z) = %.4f +- %.4f' % (sz.median, sz.rms))
if interactive and plots:
if plots == 'pylab':
figure(2)
subplot(211)
plot(
arange(
min(zso), max(zso) + 0.01, 0.01), arange(
min(zso), max(zso) + 0.01, 0.01), "r")
errorbar(zso,
zbo, [abs(zbo - zb1o), abs(zb2o - zbo)],
fmt="bo")
xlabel(r'$z_{spec}$')
ylabel(r'$z_{bpz}$')
subplot(212)
plot(zso, zmo, "go", zso, zso, "r")
xlabel(r'$z_{spec}$')
ylabel(r'$z_{ML}$')
show()
elif plots == 'biggles':
plot = FramedPlot()
if len(zso) > 2000: symbol = 'dot'
else: symbol = 'circle'
plot.add(Points(zso, zbo, symboltype=symbol, color='blue'))
plot.add(Curve(zso, zso, linewidth=2., color='red'))
plot.add(ErrorBarsY(zso, zb1o, zb2o))
plot.xlabel = r'$z_{spec}$'
plot.ylabel = r'$z_{bpz}$'
# plot.xrange=0.,1.5
# plot.yrange=0.,1.5
plot.show()
#
plot_ml = FramedPlot()
if len(zso) > 2000: symbol = 'dot'
else: symbol = 'circle'
plot_ml.add(Points(
zso, zmo, symboltype=symbol,
color='blue'))
plot_ml.add(Curve(zso, zso, linewidth=2., color='red'))
plot_ml.xlabel = r"$z_{spec}$"
plot_ml.ylabel = r"$z_{ML}$"
plot_ml.show()
if interactive and plots and ask('Plot Bayesian photo-z histogram?'):
if plots == 'biggles':
dz = eval(input('Redshift interval?\n'))
od_th = eval(input('Odds threshold?\n'))
good = greater_equal(o, od_th)
if has_mags:
mg_min = eval(input('Bright magnitude limit?\n'))
mg_max = eval(input('Faint magnitude limit?\n'))
good = good * less_equal(m_0, mg_max) * greater_equal(m_0,
mg_min)
z = compress(good, zb)
xz = arange(zmin, zmax, dz)
hz = hist(z, xz)
plot = FramedPlot()
h = Histogram(hz, 0., dz, color='blue')
plot.add(h)
plot.xlabel = r'$z_{bpz}$'
plot.ylabel = r'$N(z_{bpz})$'
plot.show()
if ask('Want to save plot as eps file?'):
file = eval(input('File name?\n'))
if file[-2:] != 'ps': file = file + '.eps'
plot.save_as_eps(file)
if interactive and plots and ask(
'Compare colors with photometric redshifts?'):
if plots == 'biggles':
color_m = zeros((nz, nt, nf - 1)) * 1.
for i in range(nf - 1):
plot = FramedPlot()
# Check for overflows
fmu = f_obs[:, i + 1]
fml = f_obs[:, i]
good = greater(fml, 1e-100) * greater(fmu, 1e-100)
zz, fmu, fml = multicompress(good, (zb, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
d = Points(zz, colour, color='blue')
plot.add(d)
for it in range(nt):
#Prevent overflows
fmu = f_mod[:, it, i + 1]
fml = f_mod[:, it, i]
good = greater(fml, 1e-100)
zz, fmu, fml = multicompress(good, (z, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
d = Curve(zz, colour, color='red')
plot.add(d)
plot.xlabel = r'$z$'
plot.ylabel = '%s - %s' % (filters[i], filters[i + 1])
plot.save_as_eps('%s-%s.eps' % (filters[i], filters[i + 1]))
plot.show()
rolex.check()
|
mit
| -4,512,998,537,696,326,700 | 34.037609 | 107 | 0.519695 | false |
xupingmao/xnote
|
handlers/search/note.py
|
1
|
1364
|
# -*- coding:utf-8 -*-
# Created by xupingmao on 2017/06/11
# @modified 2020/02/18 00:20:12
"""搜索知识库文件"""
import re
import sys
import six
import xutils
import xauth
import xmanager
import xconfig
import xtables
from xutils import textutil
from xutils import SearchResult, text_contains
NOTE_DAO = xutils.DAO("note")
def to_sqlite_obj(text):
if text is None:
return "NULL"
if not isinstance(text, six.string_types):
return repr(text)
text = text.replace("'", "''")
return "'" + text + "'"
def filter_symbols(words):
new_words = []
for word in words:
word = re.sub("。", "", word)
if word == "":
continue
new_words.append(word)
return new_words
def search(ctx, expression=None):
words = ctx.words
files = []
words = filter_symbols(words)
if len(words) == 0:
return files
if ctx.search_note_content:
files += NOTE_DAO.search_content(words, xauth.current_name())
if ctx.search_note:
files += NOTE_DAO.search_name(words, xauth.current_name())
for item in files:
item.category = 'note'
# group 放前面
groups = list(filter(lambda x: x.type == "group", files))
text_files = list(filter(lambda x: x.type != "group", files))
files = groups + text_files
return files
|
gpl-3.0
| -1,848,475,807,109,575,400 | 22.137931 | 69 | 0.608793 | false |
michaelBenin/sqlalchemy
|
lib/sqlalchemy/testing/plugin/noseplugin.py
|
1
|
2735
|
# plugin/noseplugin.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Enhance nose with extra options and behaviors for running SQLAlchemy tests.
Must be run via ./sqla_nose.py so that it is imported in the expected
way (e.g. as a package-less import).
"""
import os
import sys
from nose.plugins import Plugin
fixtures = None
# no package imports yet! this prevents us from tripping coverage
# too soon.
path = os.path.join(os.path.dirname(__file__), "plugin_base.py")
if sys.version_info >= (3,3):
from importlib import machinery
plugin_base = machinery.SourceFileLoader("plugin_base", path).load_module()
else:
import imp
plugin_base = imp.load_source("plugin_base", path)
class NoseSQLAlchemy(Plugin):
enabled = True
name = 'sqla_testing'
score = 100
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
opt = parser.add_option
def make_option(name, **kw):
callback_ = kw.pop("callback", None)
if callback_:
def wrap_(option, opt_str, value, parser):
callback_(opt_str, value, parser)
kw["callback"] = wrap_
opt(name, **kw)
plugin_base.setup_options(make_option)
plugin_base.read_config()
def configure(self, options, conf):
super(NoseSQLAlchemy, self).configure(options, conf)
plugin_base.pre_begin(options)
plugin_base.set_coverage_flag(options.enable_plugin_coverage)
global fixtures
from sqlalchemy.testing import fixtures
def begin(self):
plugin_base.post_begin()
def describeTest(self, test):
return ""
def wantFunction(self, fn):
if fn.__module__ is None:
return False
if fn.__module__.startswith('sqlalchemy.testing'):
return False
def wantClass(self, cls):
return plugin_base.want_class(cls)
def beforeTest(self, test):
plugin_base.before_test(test,
test.test.cls.__module__,
test.test.cls, test.test.method.__name__)
def afterTest(self, test):
plugin_base.after_test(test)
def startContext(self, ctx):
if not isinstance(ctx, type) \
or not issubclass(ctx, fixtures.TestBase):
return
plugin_base.start_test_class(ctx)
def stopContext(self, ctx):
if not isinstance(ctx, type) \
or not issubclass(ctx, fixtures.TestBase):
return
plugin_base.stop_test_class(ctx)
|
mit
| 1,261,401,323,931,881,700 | 28.095745 | 84 | 0.626325 | false |
gammu/wammu
|
Wammu/Logger.py
|
1
|
5021
|
# -*- coding: UTF-8 -*-
#
# Copyright © 2003 - 2018 Michal Čihař <michal@cihar.com>
#
# This file is part of Wammu <https://wammu.eu/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Wammu - Phone manager
Logging window and thread for log reading
'''
import threading
import wx
import os
import sys
import time
import Wammu.Events
from Wammu.Locales import ugettext as _
class LoggerDebug(threading.Thread):
'''
Thread which reads defined files and prints it to stderr.
'''
def __init__(self, filename):
'''
Initializes reader on filename, text will be printed to stderr.
'''
threading.Thread.__init__(self)
self.file_descriptor = open(filename, 'r')
self.filename = filename
self.canceled = False
def run(self):
"""
This is basically tail -f reimplementation
"""
while not self.canceled:
where = self.file_descriptor.tell()
txt = self.file_descriptor.readlines()
if len(txt) == 0:
fd_results = os.fstat(self.file_descriptor.fileno())
try:
st_results = os.stat(self.filename)
except OSError:
st_results = fd_results
if st_results[1] == fd_results[1] or sys.platform == 'win32':
time.sleep(1)
self.file_descriptor.seek(where)
else:
self.file_descriptor = open(self.filename, 'r')
else:
sys.stderr.write(''.join(txt))
self.file_descriptor.close()
class Logger(threading.Thread):
'''
Thread which reads defined files and posts events on change.
'''
def __init__(self, win, filename):
'''
Initializes reader on filename, events will be sent to win.
'''
threading.Thread.__init__(self)
self.win = win
self.file_descriptor = open(filename, 'r')
self.filename = filename
self.canceled = False
def run(self):
"""
This is basically tail -f reimplementation
"""
while not self.canceled:
where = self.file_descriptor.tell()
txt = self.file_descriptor.readlines()
if len(txt) == 0:
fd_results = os.fstat(self.file_descriptor.fileno())
try:
st_results = os.stat(self.filename)
except OSError:
st_results = fd_results
if st_results[1] == fd_results[1] or sys.platform == 'win32':
time.sleep(1)
self.file_descriptor.seek(where)
else:
self.file_descriptor = open(self.filename, 'r')
else:
evt = Wammu.Events.LogEvent(txt=''.join(txt))
wx.PostEvent(self.win, evt)
self.file_descriptor.close()
class LogFrame(wx.Frame):
'''
Window with debug log.
'''
def __init__(self, parent, cfg):
'''
Creates window and initializes event handlers.
'''
self.cfg = cfg
if cfg.HasEntry('/Debug/X') and cfg.HasEntry('/Debug/Y'):
pos = wx.Point(
cfg.ReadInt('/Debug/X'),
cfg.ReadInt('/Debug/Y'))
else:
pos = wx.DefaultPosition
size = wx.Size(
cfg.ReadInt('/Debug/Width'),
cfg.ReadInt('/Debug/Height')
)
wx.Frame.__init__(
self,
parent,
-1,
_('Wammu debug log'),
pos,
size,
wx.DEFAULT_FRAME_STYLE | wx.RESIZE_BORDER
)
self.txt = wx.TextCtrl(
self,
-1,
_('Here will appear debug messages from Gammu…\n'),
style=wx.TE_MULTILINE | wx.TE_READONLY
)
self.txt.SetFont(wx.Font(9, wx.MODERN, wx.NORMAL, wx.NORMAL))
Wammu.Events.EVT_LOG(self, self.OnLog)
wx.EVT_SIZE(self, self.OnSize)
self.OnSize(None)
def OnLog(self, evt):
'''
Event handler for text events from Logger.
'''
self.txt.AppendText(evt.txt)
def OnSize(self, evt):
'''
Resize handler to correctly resize text area.
'''
width, height = self.GetClientSizeTuple()
self.txt.SetDimensions(0, 0, width, height)
|
gpl-3.0
| 47,544,733,082,102,024 | 30.746835 | 77 | 0.552033 | false |
inveniosoftware/invenio-migrator
|
tests/legacy/test_access_dump.py
|
1
|
2580
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016, 2017 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from click.testing import CliRunner
from invenio_migrator.legacy.cli import dump as dump_cli
from invenio_migrator.legacy.access import (dump as dump_access, get as
get_access)
def test_dump_action():
"""Test single action dump."""
action = get_access('viewrestrcoll')
action_dump = dump_access(action[1][0])
assert action_dump
assert action_dump['name'] == 'viewrestrcoll'
assert len(action_dump['roles']) == 5
roles = action_dump['roles']
assert roles[0] == {
'users': {'jekyll@cds.cern.ch'},
'parameters': {'collection': {'Theses', 'Drafts'}, },
'name': 'thesesviewer',
'firerole_def': 'allow group "Theses and Drafts viewers"',
'id': 16L,
'description': 'Theses and Drafts viewer'
}
def test_get_action():
"""Test get action."""
action = get_access('viewrestrcoll')
assert action[0] == 1
assert action[1] == [
{'allowedkeywords': 'view restricted collection',
'optional': 'collection',
'id': 38L,
'name': 'viewrestrcoll'}
]
actions = get_access('%')
assert actions[0] == 63
assert actions[0] == len(actions[1])
actions = get_access('viewrestr%')
assert actions[0] == 3
assert [a['name'] for a in actions[1]
] == ['viewrestrcoll', 'viewrestrcomment', 'viewrestrdoc']
def test_cli():
"""Test CLI."""
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(dump_cli, ['access', '-q', '%'])
assert result.exit_code == 0
|
gpl-2.0
| -2,848,630,737,792,007,000 | 31.658228 | 76 | 0.646899 | false |
sebMathieu/dsima
|
simulator/src/agent/example.py
|
1
|
2631
|
##@package Example
# Example of agent-based system based on a supply and a demand curve
#@author Sebastien MATHIEU
from .agent import Agent
from .abstractSystem import AbstractSystem
from .layer import Layer
from .data import Data
import math
## Supply with the target supply function \f$\pi = 0.1 q^2 +2\f$
class Supply(Agent):
def initialize(self, data):
data.general['pi']=10.0 # Set a starting price
def act(self, data, layer):
# Compute the target price from the supply function
targetPrice=0.1*(data.general['q']**2)+2
# Take the mean between the last price and the target price.
data.general['pi']=(data.general['pi']+targetPrice)/2
print("\tSupply propose the price " + str(data.general['pi'])
## Demand with the inverse demand function \f$\pi = 40 - 0.05 q^2\f$
class Demand(Agent):
def initialize(self, data):
data.general['q']=0 # Initial quantity bought
def act(self, data, layer):
pi=data.general['pi']
if pi > 40.0: # Price to high, no demand
data.general['q']=0
else: # Demand function
data.general['q']=math.sqrt((40.0-data.general['pi'])/0.05)
print("\tDemand buy the quantity " + str(data.general['q']))
## Agent based system definition
class System(AbstractSystem):
def __init__(self):
AbstractSystem.__init__(self)
self._lastPrice=None
self.generate()
## Generate the example system.
def generate(self):
# Create actors
supply=Supply()
demand=Demand()
# Create two layers with one actor in each.
layerSupply=Layer([supply])
layerDemand=Layer([demand])
# Add the layers to the layer list
self.layerList.append(layerDemand) # First the system call the demand side
self.layerList.append(layerSupply) # After the system call the supply side
def hasConverged(self):
oldPrice=self._lastPrice
self._lastPrice=self.data.general['pi']
if oldPrice == None:
return None
elif abs(oldPrice - self._lastPrice) < 0.001: # Convergence if the price does not change.
return "System has converged."
else:
return None
# Starting point from python #
if __name__ == "__main__":
system=System()
print("Staring the agent-based simulation...")
convergence=system.run()
print("\nNumber of iterations : "+str(system.iterations))
print(convergence)
|
bsd-3-clause
| -5,392,300,815,870,196,000 | 33.106667 | 97 | 0.600912 | false |
odoo-arg/odoo_l10n_ar
|
l10n_ar_invoice_presentation/models/presentation_header.py
|
1
|
2454
|
# - coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.exceptions import Warning
from openerp import models
import l10n_ar_api.presentations.presentation as presentation_builder
class AccountInvoicePresentation(models.Model):
_inherit = 'account.invoice.presentation'
def validate_header(self):
"""
Validamos que la compania tenga los datos necesarios.
"""
if not self.company_id.partner_id.vat:
raise Warning(
"ERROR\nLa presentacion no pudo ser generada porque la compania no tiene CUIT\n"
)
def generate_header_file(self):
"""
Se genera el archivo de cabecera. Utiliza la API de presentaciones y tools para poder crear los archivos
y formatear los datos.
:return: objeto de la api (generator), con las lineas de la presentacion creadas.
"""
self.validate_header()
cabecera = presentation_builder.Presentation("ventasCompras", "cabecera")
line = cabecera.create_line()
line.cuit = self.company_id.vat
line.periodo = self.get_period()
line.secuencia = self.sequence
line.sinMovimiento = 'S'
if self.with_prorate:
line.prorratearCFC = 'S'
line.cFCGlobal = '1'
else:
line.prorratearCFC = 'N'
line.cFCGlobal = '2'
line.importeCFCG = 0
line.importeCFCAD = 0
line.importeCFCP = 0
line.importeCFnCG = 0
line.cFCSSyOC = 0
line.cFCCSSyOC = 0
return cabecera
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 4,813,481,864,961,278,000 | 35.626866 | 112 | 0.614914 | false |
ninadpdr/python-saml
|
setup.py
|
1
|
1286
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, OneLogin, Inc.
# All rights reserved.
from setuptools import setup
setup(
name='python-saml',
version='2.1.5',
description='Onelogin Python Toolkit. Add SAML support to your Python software using this library',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
],
author='OneLogin',
author_email='support@onelogin.com',
license='MIT',
url='https://github.com/onelogin/python-saml',
packages=['onelogin','onelogin/saml2'],
include_package_data=True,
package_data = {
'onelogin/saml2/schemas': ['*.xsd'],
},
package_dir={
'': 'src',
},
test_suite='tests',
install_requires=[
'dm.xmlsec.binding==1.3.2',
'isodate==0.5.0',
'defusedxml==0.4.1',
],
extras_require={
'test': (
'coverage==3.7.1',
'pylint==1.3.1',
'pep8==1.5.7',
'pyflakes==0.8.1',
'coveralls==0.4.4',
),
},
keywords='saml saml2 xmlsec django flask',
)
|
bsd-3-clause
| -1,276,905,179,418,649,900 | 25.244898 | 103 | 0.5521 | false |
google/assetMG
|
app/backend/helpers.py
|
1
|
1488
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to be used by the backend."""
from google.ads.googleads.client import GoogleAdsClient
def populate_adgroup_details(client, account, ag_id):
"""Gets an adgroup ID and returns an adgroup object including
adgroup id, adgroup name and campaign name."""
ga_service = client.get_service('GoogleAdsService', version='v7')
query = '''
SELECT
campaign.name,
ad_group.name,
ad_group.id
FROM
ad_group
WHERE
ad_group.id = %s
''' % (ag_id)
request = client.get_type("SearchGoogleAdsStreamRequest")
request.customer_id = account
request.query = query
response = ga_service.search_stream(request=request)
for batch in response:
for row in batch.results:
return {
'adgroup_id': row.ad_group.id,
'adgroup_name': row.ad_group.name,
'campaign_name': row.campaign.name
}
|
apache-2.0
| -277,142,481,400,671,940 | 30.020833 | 74 | 0.686156 | false |
aaltinisik/OCBAltinkaya
|
addons/email_template/email_template.py
|
1
|
31022
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import datetime
import dateutil.relativedelta as relativedelta
import logging
import lxml
import urlparse
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp import tools, api
from openerp.tools.translate import _
from urllib import urlencode, quote as quote
_logger = logging.getLogger(__name__)
def format_tz(pool, cr, uid, dt, tz=False, format=False, context=None):
context = dict(context or {})
if tz:
context['tz'] = tz or pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz'] or "UTC"
timestamp = datetime.datetime.strptime(dt, tools.DEFAULT_SERVER_DATETIME_FORMAT)
ts = fields.datetime.context_timestamp(cr, uid, timestamp, context)
# Babel allows to format datetime in a specific language without change locale
# So month 1 = January in English, and janvier in French
# Be aware that the default value for format is 'medium', instead of 'short'
# medium: Jan 5, 2016, 10:20:31 PM | 5 janv. 2016 22:20:31
# short: 1/5/16, 10:20 PM | 5/01/16 22:20
if context.get('use_babel'):
# Formatting available here : http://babel.pocoo.org/en/latest/dates.html#date-fields
from babel.dates import format_datetime
return format_datetime(ts, format or 'medium', locale=context.get("lang") or 'en_US')
if format:
return ts.strftime(format)
else:
lang = context.get("lang")
lang_params = {}
if lang:
res_lang = pool.get('res.lang')
ids = res_lang.search(cr, uid, [("code", "=", lang)])
if ids:
lang_params = res_lang.read(cr, uid, ids[0], ["date_format", "time_format"])
format_date = lang_params.get("date_format", '%B-%d-%Y')
format_time = lang_params.get("time_format", '%I-%M %p')
fdate = ts.strftime(format_date).decode('utf-8')
ftime = ts.strftime(format_time).decode('utf-8')
return "%s %s%s" % (fdate, ftime, (' (%s)' % tz) if tz else '')
try:
# We use a jinja2 sandboxed environment to render mako templates.
# Note that the rendering does not cover all the mako syntax, in particular
# arbitrary Python statements are not accepted, and not all expressions are
# allowed: only "public" attributes (not starting with '_') of objects may
# be accessed.
# This is done on purpose: it prevents incidental or malicious execution of
# Python code that may break the security of the server.
from jinja2.sandbox import SandboxedEnvironment
mako_template_env = SandboxedEnvironment(
block_start_string="<%",
block_end_string="%>",
variable_start_string="${",
variable_end_string="}",
comment_start_string="<%doc>",
comment_end_string="</%doc>",
line_statement_prefix="%",
line_comment_prefix="##",
trim_blocks=True, # do not output newline after blocks
autoescape=True, # XML/HTML automatic escaping
)
mako_template_env.globals.update({
'str': str,
'quote': quote,
'urlencode': urlencode,
'datetime': tools.wrap_module(datetime, []),
'len': len,
'abs': abs,
'min': min,
'max': max,
'sum': sum,
'filter': filter,
'reduce': reduce,
'map': map,
'round': round,
# dateutil.relativedelta is an old-style class and cannot be directly
# instanciated wihtin a jinja2 expression, so a lambda "proxy" is
# is needed, apparently.
'relativedelta': lambda *a, **kw : relativedelta.relativedelta(*a, **kw),
})
except ImportError:
_logger.warning("jinja2 not available, templating features will not work!")
class email_template(osv.osv):
"Templates for sending email"
_name = "email.template"
_description = 'Email Templates'
_order = 'name'
def default_get(self, cr, uid, fields, context=None):
res = super(email_template, self).default_get(cr, uid, fields, context)
if res.get('model'):
res['model_id'] = self.pool['ir.model'].search(cr, uid, [('model', '=', res.pop('model'))], context=context)[0]
return res
def _replace_local_links(self, cr, uid, html, context=None):
""" Post-processing of html content to replace local links to absolute
links, using web.base.url as base url. """
if not html:
return html
# form a tree
root = lxml.html.fromstring(html)
if not len(root) and root.text is None and root.tail is None:
html = '<div>%s</div>' % html
root = lxml.html.fromstring(html)
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
(base_scheme, base_netloc, bpath, bparams, bquery, bfragment) = urlparse.urlparse(base_url)
def _process_link(url):
new_url = url
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if not scheme and not netloc:
new_url = urlparse.urlunparse((base_scheme, base_netloc, path, params, query, fragment))
return new_url
# check all nodes, replace :
# - img src -> check URL
# - a href -> check URL
for node in root.iter():
if node.tag == 'a' and node.get('href'):
node.set('href', _process_link(node.get('href')))
elif node.tag == 'img' and not node.get('src', 'data').startswith('data'):
node.set('src', _process_link(node.get('src')))
html = lxml.html.tostring(root, pretty_print=False, method='html',encoding='unicode')
# this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
if html.startswith('<div>') and html.endswith('</div>'):
html = html[5:-6]
return html
def render_post_process(self, cr, uid, html, context=None):
html = self._replace_local_links(cr, uid, html, context=context)
return html
def render_template_batch(self, cr, uid, template, model, res_ids, context=None, post_process=False):
"""Render the given template text, replace mako expressions ``${expr}``
with the result of evaluating these expressions with
an evaluation context containing:
* ``user``: browse_record of the current user
* ``object``: browse_record of the document record this mail is
related to
* ``context``: the context passed to the mail composition wizard
:param str template: the template text to render
:param str model: model name of the document record this mail is related to.
:param int res_ids: list of ids of document records those mails are related to.
"""
if context is None:
context = {}
res_ids = filter(None, res_ids) # to avoid browsing [None] below
results = dict.fromkeys(res_ids, u"")
# try to load the template
try:
template = mako_template_env.from_string(tools.ustr(template))
except Exception:
_logger.exception("Failed to load template %r", template)
return results
# prepare template variables
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
records = self.pool[model].browse(cr, uid, res_ids, context=context) or [None]
variables = {
'format_tz': lambda dt, tz=False, format=False, context=context: format_tz(self.pool, cr, uid, dt, tz, format, context),
'user': user,
'ctx': context, # context kw would clash with mako internals
}
for record in records:
res_id = record.id if record else None
variables['object'] = record
try:
render_result = template.render(variables)
except Exception:
_logger.exception("Failed to render template %r using values %r" % (template, variables))
render_result = u""
if render_result == u"False":
render_result = u""
results[res_id] = render_result
if post_process:
for res_id, result in results.iteritems():
results[res_id] = self.render_post_process(cr, uid, result, context=context)
return results
def get_email_template_batch(self, cr, uid, template_id=False, res_ids=None, context=None):
if context is None:
context = {}
if res_ids is None:
res_ids = [None]
results = dict.fromkeys(res_ids, False)
if not template_id:
return results
template = self.browse(cr, uid, template_id, context)
langs = self.render_template_batch(cr, uid, template.lang, template.model, res_ids, context)
for res_id, lang in langs.iteritems():
if lang:
# Use translated template if necessary
ctx = context.copy()
ctx['lang'] = lang
template = self.browse(cr, uid, template.id, ctx)
else:
template = self.browse(cr, uid, int(template_id), context)
results[res_id] = template
return results
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
mod_name = False
if model_id:
mod_name = self.pool.get('ir.model').browse(cr, uid, model_id, context).model
return {'value': {'model': mod_name}}
_columns = {
'name': fields.char('Name'),
'model_id': fields.many2one('ir.model', 'Applies to', help="The kind of document with with this template can be used"),
'model': fields.related('model_id', 'model', type='char', string='Related Document Model',
select=True, store=True, readonly=True),
'lang': fields.char('Language',
help="Optional translation language (ISO code) to select when sending out an email. "
"If not set, the english version will be used. "
"This should usually be a placeholder expression "
"that provides the appropriate language, e.g. "
"${object.partner_id.lang}.",
placeholder="${object.partner_id.lang}"),
'user_signature': fields.boolean('Add Signature',
help="If checked, the user's signature will be appended to the text version "
"of the message"),
'subject': fields.char('Subject', translate=True, help="Subject (placeholders may be used here)",),
'email_from': fields.char('From',
help="Sender address (placeholders may be used here). If not set, the default "
"value will be the author's email alias if configured, or email address."),
'use_default_to': fields.boolean(
'Default recipients',
help="Default recipients of the record:\n"
"- partner (using id on a partner or the partner_id field) OR\n"
"- email (using email_from or email field)"),
'email_to': fields.char('To (Emails)', help="Comma-separated recipient addresses (placeholders may be used here)"),
'partner_to': fields.char('To (Partners)',
help="Comma-separated ids of recipient partners (placeholders may be used here)",
oldname='email_recipients'),
'email_cc': fields.char('Cc', help="Carbon copy recipients (placeholders may be used here)"),
'reply_to': fields.char('Reply-To', help="Preferred response address (placeholders may be used here)"),
'mail_server_id': fields.many2one('ir.mail_server', 'Outgoing Mail Server', readonly=False,
help="Optional preferred server for outgoing mails. If not set, the highest "
"priority one will be used."),
'body_html': fields.html('Body', translate=True, sanitize=False, help="Rich-text/HTML version of the message (placeholders may be used here)"),
'report_name': fields.char('Report Filename', translate=True,
help="Name to use for the generated report file (may contain placeholders)\n"
"The extension can be omitted and will then come from the report type."),
'report_template': fields.many2one('ir.actions.report.xml', 'Optional report to print and attach'),
'ref_ir_act_window': fields.many2one('ir.actions.act_window', 'Sidebar action', readonly=True, copy=False,
help="Sidebar action to make this template available on records "
"of the related document model"),
'ref_ir_value': fields.many2one('ir.values', 'Sidebar Button', readonly=True, copy=False,
help="Sidebar button to open the sidebar action"),
'attachment_ids': fields.many2many('ir.attachment', 'email_template_attachment_rel', 'email_template_id',
'attachment_id', 'Attachments',
help="You may attach files to this template, to be added to all "
"emails created from this template"),
'auto_delete': fields.boolean('Auto Delete', help="Permanently delete this email after sending it, to save space"),
# Fake fields used to implement the placeholder assistant
'model_object_field': fields.many2one('ir.model.fields', string="Field",
help="Select target field from the related document model.\n"
"If it is a relationship field you will be able to select "
"a target field at the destination of the relationship."),
'sub_object': fields.many2one('ir.model', 'Sub-model', readonly=True,
help="When a relationship field is selected as first field, "
"this field shows the document model the relationship goes to."),
'sub_model_object_field': fields.many2one('ir.model.fields', 'Sub-field',
help="When a relationship field is selected as first field, "
"this field lets you select the target field within the "
"destination document model (sub-model)."),
'null_value': fields.char('Default Value', help="Optional value to use if the target field is empty"),
'copyvalue': fields.char('Placeholder Expression', help="Final placeholder expression, to be copy-pasted in the desired template field."),
}
_defaults = {
'auto_delete': True,
}
def create_action(self, cr, uid, ids, context=None):
action_obj = self.pool.get('ir.actions.act_window')
data_obj = self.pool.get('ir.model.data')
for template in self.browse(cr, uid, ids, context=context):
src_obj = template.model_id.model
model_data_id = data_obj._get_id(cr, uid, 'mail', 'email_compose_message_wizard_form')
res_id = data_obj.browse(cr, uid, model_data_id, context=context).res_id
button_name = _('Send Mail (%s)') % template.name
act_id = action_obj.create(cr, uid, {
'name': button_name,
'type': 'ir.actions.act_window',
'res_model': 'mail.compose.message',
'src_model': src_obj,
'view_type': 'form',
'context': "{'default_composition_mode': 'mass_mail', 'default_template_id' : %d, 'default_use_template': True}" % (template.id),
'view_mode':'form,tree',
'view_id': res_id,
'target': 'new',
'auto_refresh':1
}, context)
ir_values_id = self.pool.get('ir.values').create(cr, uid, {
'name': button_name,
'model': src_obj,
'key2': 'client_action_multi',
'value': "ir.actions.act_window,%s" % act_id,
'object': True,
}, context)
template.write({
'ref_ir_act_window': act_id,
'ref_ir_value': ir_values_id,
})
return True
def unlink_action(self, cr, uid, ids, context=None):
for template in self.browse(cr, uid, ids, context=context):
try:
if template.ref_ir_act_window:
self.pool.get('ir.actions.act_window').unlink(cr, uid, template.ref_ir_act_window.id, context)
if template.ref_ir_value:
ir_values_obj = self.pool.get('ir.values')
ir_values_obj.unlink(cr, uid, template.ref_ir_value.id, context)
except Exception:
raise osv.except_osv(_("Warning"), _("Deletion of the action record failed."))
return True
def unlink(self, cr, uid, ids, context=None):
self.unlink_action(cr, uid, ids, context=context)
return super(email_template, self).unlink(cr, uid, ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
template = self.browse(cr, uid, id, context=context)
default = dict(default or {},
name=_("%s (copy)") % template.name)
return super(email_template, self).copy(cr, uid, id, default, context)
def build_expression(self, field_name, sub_field_name, null_value):
"""Returns a placeholder expression for use in a template field,
based on the values provided in the placeholder assistant.
:param field_name: main field name
:param sub_field_name: sub field name (M2O)
:param null_value: default value if the target value is empty
:return: final placeholder expression
"""
expression = ''
if field_name:
expression = "${object." + field_name
if sub_field_name:
expression += "." + sub_field_name
if null_value:
expression += " or '''%s'''" % null_value
expression += "}"
return expression
def onchange_sub_model_object_value_field(self, cr, uid, ids, model_object_field, sub_model_object_field=False, null_value=None, context=None):
result = {
'sub_object': False,
'copyvalue': False,
'sub_model_object_field': False,
'null_value': False
}
if model_object_field:
fields_obj = self.pool.get('ir.model.fields')
field_value = fields_obj.browse(cr, uid, model_object_field, context)
if field_value.ttype in ['many2one', 'one2many', 'many2many']:
res_ids = self.pool.get('ir.model').search(cr, uid, [('model', '=', field_value.relation)], context=context)
sub_field_value = False
if sub_model_object_field:
sub_field_value = fields_obj.browse(cr, uid, sub_model_object_field, context)
if res_ids:
result.update({
'sub_object': res_ids[0],
'copyvalue': self.build_expression(field_value.name, sub_field_value and sub_field_value.name or False, null_value or False),
'sub_model_object_field': sub_model_object_field or False,
'null_value': null_value or False
})
else:
result.update({
'copyvalue': self.build_expression(field_value.name, False, null_value or False),
'null_value': null_value or False
})
return {'value': result}
def generate_recipients_batch(self, cr, uid, results, template_id, res_ids, context=None):
"""Generates the recipients of the template. Default values can ben generated
instead of the template values if requested by template or context.
Emails (email_to, email_cc) can be transformed into partners if requested
in the context. """
if context is None:
context = {}
template = self.browse(cr, uid, template_id, context=context)
if template.use_default_to or context.get('tpl_force_default_to'):
ctx = dict(context, thread_model=template.model)
default_recipients = self.pool['mail.thread'].message_get_default_recipients(cr, uid, res_ids, context=ctx)
for res_id, recipients in default_recipients.iteritems():
results[res_id].pop('partner_to', None)
results[res_id].update(recipients)
for res_id, values in results.iteritems():
partner_ids = values.get('partner_ids', list())
if context and context.get('tpl_partners_only'):
mails = tools.email_split(values.pop('email_to', '')) + tools.email_split(values.pop('email_cc', ''))
for mail in mails:
partner_id = self.pool.get('res.partner').find_or_create(cr, uid, mail, context=context)
partner_ids.append(partner_id)
partner_to = values.pop('partner_to', '')
if partner_to:
# placeholders could generate '', 3, 2 due to some empty field values
tpl_partner_ids = [int(pid) for pid in partner_to.split(',') if pid]
partner_ids += self.pool['res.partner'].exists(cr, SUPERUSER_ID, tpl_partner_ids, context=context)
results[res_id]['partner_ids'] = partner_ids
return results
def generate_email_batch(self, cr, uid, template_id, res_ids, context=None, fields=None):
"""Generates an email from the template for given the given model based on
records given by res_ids.
:param template_id: id of the template to render.
:param res_id: id of the record to use for rendering the template (model
is taken from template definition)
:returns: a dict containing all relevant fields for creating a new
mail.mail entry, with one extra key ``attachments``, in the
format [(report_name, data)] where data is base64 encoded.
"""
if context is None:
context = {}
if fields is None:
fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to']
report_xml_pool = self.pool.get('ir.actions.report.xml')
res_ids_to_templates = self.get_email_template_batch(cr, uid, template_id, res_ids, context)
# templates: res_id -> template; template -> res_ids
templates_to_res_ids = {}
for res_id, template in res_ids_to_templates.iteritems():
templates_to_res_ids.setdefault(template, []).append(res_id)
results = dict()
for template, template_res_ids in templates_to_res_ids.iteritems():
# generate fields value for all res_ids linked to the current template
ctx = context.copy()
if template.lang:
ctx['lang'] = template._context.get('lang')
for field in fields:
generated_field_values = self.render_template_batch(
cr, uid, getattr(template, field), template.model, template_res_ids,
post_process=(field == 'body_html'),
context=ctx)
for res_id, field_value in generated_field_values.iteritems():
results.setdefault(res_id, dict())[field] = field_value
# compute recipients
results = self.generate_recipients_batch(cr, uid, results, template.id, template_res_ids, context=context)
# update values for all res_ids
for res_id in template_res_ids:
values = results[res_id]
# body: add user signature, sanitize
if 'body_html' in fields and template.user_signature:
signature = self.pool.get('res.users').browse(cr, uid, uid, context).signature
if signature:
values['body_html'] = tools.append_content_to_html(values['body_html'], signature, plaintext=False)
if values.get('body_html'):
values['body'] = tools.html_sanitize(values['body_html'])
# technical settings
values.update(
mail_server_id=template.mail_server_id.id or False,
auto_delete=template.auto_delete,
model=template.model,
res_id=res_id or False,
attachment_ids=[attach.id for attach in template.attachment_ids],
)
# Add report in attachments: generate once for all template_res_ids
if template.report_template:
# Fix : Force report to use res ids and not active_ids
if ctx and 'active_ids' in ctx:
del ctx['active_ids']
for res_id in template_res_ids:
attachments = []
report_name = self.render_template(cr, uid, template.report_name, template.model, res_id, context=ctx)
report = report_xml_pool.browse(cr, uid, template.report_template.id, context)
report_service = report.report_name
if report.report_type in ['qweb-html', 'qweb-pdf']:
result, format = self.pool['report'].get_pdf(cr, uid, [res_id], report_service, context=ctx), 'pdf'
else:
result, format = openerp.report.render_report(cr, uid, [res_id], report_service, {'model': template.model}, ctx)
# TODO in trunk, change return format to binary to match message_post expected format
result = base64.b64encode(result)
if not report_name:
report_name = 'report.' + report_service
ext = "." + format
if not report_name.endswith(ext):
report_name += ext
attachments.append((report_name, result))
results[res_id]['attachments'] = attachments
return results
@api.cr_uid_id_context
def send_mail(self, cr, uid, template_id, res_id, force_send=False, raise_exception=False, context=None):
"""Generates a new mail message for the given template and record,
and schedules it for delivery through the ``mail`` module's scheduler.
:param int template_id: id of the template to render
:param int res_id: id of the record to render the template with
(model is taken from the template)
:param bool force_send: if True, the generated mail.message is
immediately sent after being created, as if the scheduler
was executed for this message only.
:returns: id of the mail.message that was created
"""
if context is None:
context = {}
mail_mail = self.pool.get('mail.mail')
ir_attachment = self.pool.get('ir.attachment')
# create a mail_mail based on values, without attachments
values = self.generate_email(cr, uid, template_id, res_id, context=context)
if not values.get('email_from'):
raise osv.except_osv(_('Warning!'), _("Sender email is missing or empty after template rendering. Specify one to deliver your message"))
values['recipient_ids'] = [(4, pid) for pid in values.get('partner_ids', list())]
attachment_ids = values.pop('attachment_ids', [])
attachments = values.pop('attachments', [])
msg_id = mail_mail.create(cr, uid, values, context=context)
mail = mail_mail.browse(cr, uid, msg_id, context=context)
# manage attachments
for attachment in attachments:
attachment_data = {
'name': attachment[0],
'datas_fname': attachment[0],
'datas': attachment[1],
'res_model': 'mail.message',
'res_id': mail.mail_message_id.id,
}
context = dict(context)
context.pop('default_type', None)
attachment_ids.append(ir_attachment.create(cr, uid, attachment_data, context=context))
if attachment_ids:
values['attachment_ids'] = [(6, 0, attachment_ids)]
mail_mail.write(cr, uid, msg_id, {'attachment_ids': [(6, 0, attachment_ids)]}, context=context)
if force_send:
mail_mail.send(cr, uid, [msg_id], raise_exception=raise_exception, context=context)
return msg_id
# Compatibility method
def render_template(self, cr, uid, template, model, res_id, context=None):
return self.render_template_batch(cr, uid, template, model, [res_id], context)[res_id]
def get_email_template(self, cr, uid, template_id=False, record_id=None, context=None):
return self.get_email_template_batch(cr, uid, template_id, [record_id], context)[record_id]
def generate_email(self, cr, uid, template_id, res_id, context=None):
return self.generate_email_batch(cr, uid, template_id, [res_id], context=context)[res_id]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -3,462,888,602,468,852,700 | 50.703333 | 151 | 0.573786 | false |
pboechat/OperatorGraph
|
auto_tuner/Configuration.py
|
1
|
9246
|
import xml.sax
import ParseUtils
from Enums import *
####################################################################################################
class BaseConfiguration(xml.sax.ContentHandler):
def loadFromDisk(self, filename):
xml.sax.parse(filename, self)
####################################################################################################
class Pipeline(BaseConfiguration):
def __init__(self):
#self.globalSeed = -1
self.deviceId = 0
self.pgaBasePath = ""
self.analyzerBin = ""
self.partitionBin = ""
self.binTemplatePath = ""
self.workspaceTemplatePath = ""
self.databaseScript = ""
self.compileScript = ""
self.compileDependenciesScript = ""
self.cachePath = ""
self.generatePartitions = False
self.prepareWorkspaces = False
self.compile = False
self.findMaxNumAxioms = False
self.run = False
self.processResults = False
self.saveToDatabase = False
self.decorateDotFiles = False
self.analyzerExecutionTimeout = 300
self.dependenciesCompilationTimeout = 600
self.partitionCompilationTimeout = 3600
self.partitionExecutionTimeout = 600
self.logLevel = LogLevel.DEBUG
self.keepTemp = False
self.rebuild = False
self.compileDependencies = False
self.useMultiprocess = False
self.automaticPoolSize = False
self.poolSize = 0
self.cudaComputeCapability = None
def startElement(self, name, attributes):
if name == "Pipeline":
#self.globalSeed = ParseUtils.try_to_parse_int(attributes.getValue("globalSeed"), -1)
self.deviceId = ParseUtils.try_to_parse_int(attributes.getValue("deviceId"))
self.pgaBasePath = attributes.getValue("pgaBasePath")
self.analyzerBin = attributes.getValue("analyzerBin")
self.partitionBin = attributes.getValue("partitionBin")
self.binTemplatePath = attributes.getValue("binTemplatePath")
self.workspaceTemplatePath = attributes.getValue("workspaceTemplatePath")
self.databaseScript = attributes.getValue("databaseScript")
self.compileScript = attributes.getValue("compileScript")
self.compileDependenciesScript = attributes.getValue("compileDependenciesScript")
self.cachePath = attributes.getValue("cachePath")
self.generatePartitions = bool(ParseUtils.try_to_parse_int(attributes.getValue("generatePartitions")))
self.prepareWorkspaces = bool(ParseUtils.try_to_parse_int(attributes.getValue("prepareWorkspaces")))
self.compile = bool(ParseUtils.try_to_parse_int(attributes.getValue("compile")))
self.findMaxNumAxioms = bool(ParseUtils.try_to_parse_int(attributes.getValue("findMaxNumAxioms")))
self.run = bool(ParseUtils.try_to_parse_int(attributes.getValue("run")))
self.processResults = bool(ParseUtils.try_to_parse_int(attributes.getValue("processResults")))
self.saveToDatabase = bool(ParseUtils.try_to_parse_int(attributes.getValue("saveToDatabase")))
self.decorateDotFiles = bool(ParseUtils.try_to_parse_int(attributes.getValue("decorateDotFiles")))
self.analyzerExecutionTimeout = ParseUtils.try_to_parse_int(attributes.getValue("analyzerExecutionTimeout"))
self.dependenciesCompilationTimeout = ParseUtils.try_to_parse_int(attributes.getValue("dependenciesCompilationTimeout"))
self.partitionCompilationTimeout = ParseUtils.try_to_parse_int(attributes.getValue("partitionCompilationTimeout"))
self.partitionExecutionTimeout = ParseUtils.try_to_parse_int(attributes.getValue("partitionExecutionTimeout"))
# NOTE: if a valid log level is not found, the default value (LogLevel.DEBUG) is used
if "logLevel" in attributes.keys():
log_level_name = attributes.getValue("logLevel")
for log_level, log_level_name_ in LogLevel.NAMES.items():
if log_level_name_ == log_level_name:
self.logLevel = log_level
self.keepTemp = bool(ParseUtils.try_to_parse_int(attributes.getValue("keepTemp")))
self.rebuild = bool(ParseUtils.try_to_parse_int(attributes.getValue("rebuild")))
self.compileDependencies = bool(ParseUtils.try_to_parse_int(attributes.getValue("compileDependencies")))
self.useMultiprocess = bool(ParseUtils.try_to_parse_int(attributes.getValue("useMultiprocess")))
self.automaticPoolSize = bool(ParseUtils.try_to_parse_int(attributes.getValue("automaticPoolSize")))
self.poolSize = ParseUtils.try_to_parse_int(attributes.getValue("poolSize"))
self.cudaComputeCapability = attributes.getValue("cudaComputeCapability")
####################################################################################################
class Scene(BaseConfiguration):
def __init__(self):
self.runSeed = -1
self.grammarFile = ""
self.templateFile = ""
self.outputPath = ""
self.gridX = 0
self.gridY = 0
self.itemSize = 0
self.maxNumVertices = 0
self.maxNumIndices = 0
self.maxNumElements = 0
self.queuesMem = 0
self.numRuns = 0
self.minNumAxioms = 0
self.maxNumAxioms = 0
self.axiomStep = 0
self.globalMinNumAxioms = -1
self.optimization = 0
self.instrumentation = False
self.whippletreeTechnique = WhippletreeTechnique.MEGAKERNEL
self.H1TO1 = False
self.HRR = False
self.HSTO = -1
self.HTSPECS = ""
self.RSAMPL = 0
self.analyzerSeed = -1
def startElement(self, name, attributes):
if name == "Scene":
if "runSeed" in attributes.keys():
self.runSeed = ParseUtils.try_to_parse_int(attributes.getValue("runSeed"), -1)
self.grammarFile = attributes.getValue("grammarFile")
self.templateFile = attributes.getValue("templateFile")
self.outputPath = attributes.getValue("outputPath")
self.gridX = ParseUtils.try_to_parse_int(attributes.getValue("gridX"))
self.gridY = ParseUtils.try_to_parse_int(attributes.getValue("gridY"))
self.itemSize = ParseUtils.try_to_parse_int(attributes.getValue("itemSize"))
if "maxNumVertices" in attributes.keys():
self.maxNumVertices = ParseUtils.try_to_parse_int(attributes.getValue("maxNumVertices"))
if "maxNumIndices" in attributes.keys():
self.maxNumIndices = ParseUtils.try_to_parse_int(attributes.getValue("maxNumIndices"))
if "maxNumElements" in attributes.keys():
self.maxNumElements = ParseUtils.try_to_parse_int(attributes.getValue("maxNumElements"))
self.queuesMem = ParseUtils.try_to_parse_int(attributes.getValue("queuesMem"))
self.numRuns = ParseUtils.try_to_parse_int(attributes.getValue("numRuns"))
self.minNumAxioms = ParseUtils.try_to_parse_int(attributes.getValue("minNumAxioms"))
self.maxNumAxioms = ParseUtils.try_to_parse_int(attributes.getValue("maxNumAxioms"))
self.axiomStep = ParseUtils.try_to_parse_int(attributes.getValue("axiomStep"))
if "globalMinNumAxioms" in attributes.keys():
self.globalMinNumAxioms = ParseUtils.try_to_parse_int(attributes.getValue("globalMinNumAxioms"))
if "optimization" in attributes.keys():
self.optimization = ParseUtils.try_to_parse_int(attributes.getValue("optimization"))
if "instrumentation" in attributes.keys():
self.instrumentation = ParseUtils.try_to_parse_bool(attributes.getValue("instrumentation"))
# NOTE: if a valid whippletree technique is not found, the default value (WhippletreeTechnique.MEGAKERNELS) is used
if "whippletreeTechnique" in attributes.keys():
whippletree_technique_name = attributes.getValue("whippletreeTechnique")
for whippletree_technique, whippletree_technique_name_ in WhippletreeTechnique.NAMES.items():
if whippletree_technique_name_ == whippletree_technique_name:
self.whippletreeTechnique = whippletree_technique
if "H1TO1" in attributes.keys():
self.H1TO1 = ParseUtils.try_to_parse_int(attributes.getValue("H1TO1"))
if "HRR" in attributes.keys():
self.HRR = ParseUtils.try_to_parse_int(attributes.getValue("HRR"))
if "HSTO" in attributes.keys():
self.HSTO = ParseUtils.try_to_parse_int(attributes.getValue("HSTO"))
if "HTSPECS" in attributes.keys():
self.HTSPECS = attributes.getValue("HTSPECS")
if "RSAMPL" in attributes.keys():
self.RSAMPL = ParseUtils.try_to_parse_int(attributes.getValue("RSAMPL"))
if "analyzerSeed" in attributes.keys():
self.analyzerSeed = ParseUtils.try_to_parse_int(attributes.getValue("analyzerSeed"))
|
mit
| 9,163,868,085,743,371,000 | 57.878981 | 132 | 0.636413 | false |
cgarrard/osgeopy-code
|
Chapter4/listing4_2.py
|
1
|
1083
|
import os
from osgeo import ogr
def layers_to_feature_dataset(ds_name, gdb_fn, dataset_name):
"""Copy layers to a feature dataset in a file geodatabase."""
# Open the input datasource.
in_ds = ogr.Open(ds_name)
if in_ds is None:
raise RuntimeError('Could not open datasource')
# Open the geodatabase or create it if it doesn't exist.
gdb_driver = ogr.GetDriverByName('FileGDB')
if os.path.exists(gdb_fn):
gdb_ds = gdb_driver.Open(gdb_fn, 1)
else:
gdb_ds = gdb_driver.CreateDataSource(gdb_fn)
if gdb_ds is None:
raise RuntimeError('Could not open file geodatabase')
# Create an option list so the feature classes will be
# saved in a feature dataset.
options = ['FEATURE_DATASET=' + dataset_name]
# Loop through the layers in the input datasource and copy
# each one into the geodatabase.
for i in range(in_ds.GetLayerCount()):
lyr = in_ds.GetLayer(i)
lyr_name = lyr.GetName()
print('Copying ' + lyr_name + '...')
gdb_ds.CopyLayer(lyr, lyr_name, options)
|
mit
| -5,060,772,019,536,975,000 | 33.935484 | 65 | 0.650046 | false |
pylada/pylada-light
|
src/pylada/config/vasp.py
|
1
|
1440
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" VASP parameters for pylada. """
vasp_program = None
""" Path of vasp binary executable (if launching as external program). """
vasp_has_nlep = False
""" Should be set to True if one wants to use NLEP. """
is_vasp_4 = False
""" Set to True to use vasp4-style POSCARS and INCARS. """
|
gpl-3.0
| 4,206,401,852,313,639,000 | 48.655172 | 103 | 0.711806 | false |
jennywoites/MUSSA
|
MUSSA_Flask/app/API_Rest/Services/DocenteServices/DocenteService.py
|
1
|
5919
|
from app.API_Rest.codes import *
from app.models.docentes_models import Docente
from app import db
from flask_user import roles_accepted
from app.API_Rest.Services.BaseService import BaseService
from app.models.generadorJSON.docentes_generadorJSON import generarJSON_docente
from app.models.docentes_models import CursosDocente
from app.models.horarios_models import Curso
class DocenteService(BaseService):
def getNombreClaseServicio(self):
return "Docente Service"
##########################################
## Servicios ##
##########################################
def get(self, idDocente):
return self.servicio_get_base(idDocente, "idDocente", Docente, generarJSON_docente)
@roles_accepted('admin')
def delete(self, idDocente):
self.logg_parametros_recibidos()
parametros_son_validos, msj, codigo = self.validar_parametros(dict([
self.get_validaciones_entidad_basica("idDocente", idDocente, Docente)
]))
if not parametros_son_validos:
self.logg_error(msj)
return {'Error': msj}, codigo
# Borrado logico
docente = Docente.query.get(idDocente)
docente.eliminado = True
db.session.commit()
cursos = CursosDocente.query.filter_by(docente_id=docente.id).all()
for curso in cursos:
curso.eliminado = True
db.session.commit()
result = SUCCESS_NO_CONTENT
self.logg_resultado(result)
return result
@roles_accepted('admin')
def put(self):
self.logg_parametros_recibidos()
apellido = self.obtener_texto('apellido')
nombre = self.obtener_texto('nombre')
l_ids_cursos = self.obtener_lista('l_ids_cursos')
parametros_son_validos, msj, codigo = self.validar_parametros(dict([
self.get_validaciones_entidad_basica("idDocente", idDocente, Docente),
("apellido", {
self.PARAMETRO: apellido,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.validar_contenido_y_longitud_texto, [3, 35])
]
}),
("nombre", {
self.PARAMETRO: nombre,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.validar_contenido_y_longitud_texto, [0, 40])
]
}),
("l_ids_cursos", {
self.PARAMETRO: l_ids_cursos,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.id_es_valido, []),
(self.existe_id, [Curso])
]
})
]))
if not parametros_son_validos:
self.logg_error(msj)
return {'Error': msj}, codigo
docente = Docente(
apellido=apellido,
nombre=nombre
)
db.session.add(docente)
db.session.commit()
self.actualizar_cursos_que_dicta_el_docente(docente.id, l_ids_cursos)
result = SUCCESS_OK
self.logg_resultado(result)
return result
@roles_accepted('admin')
def post(self, idDocente):
self.logg_parametros_recibidos()
apellido = self.obtener_texto('apellido')
nombre = self.obtener_texto('nombre')
l_ids_cursos = self.obtener_lista('l_ids_cursos')
parametros_son_validos, msj, codigo = self.validar_parametros(dict([
self.get_validaciones_entidad_basica("idDocente", idDocente, Docente),
("apellido", {
self.PARAMETRO: apellido,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.validar_contenido_y_longitud_texto, [3, 35])
]
}),
("nombre", {
self.PARAMETRO: nombre,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.validar_contenido_y_longitud_texto, [0, 40])
]
}),
("l_ids_cursos", {
self.PARAMETRO: l_ids_cursos,
self.ES_OBLIGATORIO: True,
self.FUNCIONES_VALIDACION: [
(self.id_es_valido, []),
(self.existe_id, [Curso])
]
})
]))
if not parametros_son_validos:
self.logg_error(msj)
return {'Error': msj}, codigo
self.actualizar_datos_docente(idDocente, apellido, nombre)
self.actualizar_cursos_que_dicta_el_docente(idDocente, l_ids_cursos)
result = SUCCESS_OK
self.logg_resultado(result)
return result
def actualizar_datos_docente(self, idDocente, apellido, nombre):
docente = Docente.query.get(idDocente)
docente.apellido = apellido
docente.nombre = nombre
db.session.commit()
def actualizar_cursos_que_dicta_el_docente(self, idDocente, l_ids_cursos):
#Marcar como eliminados los que existen pero no estaban en l_ids
for curso_docente in CursosDocente.query.filter_by(docente_id=idDocente).all():
if not curso_docente.curso_id in l_ids_cursos:
curso_docente.eliminado = True
db.session.commit()
for id_curso in l_ids_cursos:
curso = CursosDocente.query.filter_by(docente_id=idDocente) \
.filter_by(curso_id=id_curso).first()
if not curso:
curso = CursosDocente(docente_id=idDocente, curso_id=id_curso)
db.session.add(curso)
curso.eliminado = False
db.session.commit()
#########################################
CLASE = DocenteService
URLS_SERVICIOS = (
'/api/docente/<int:idDocente>',
)
#########################################
|
gpl-3.0
| 8,846,627,085,232,308,000 | 32.822857 | 91 | 0.551445 | false |
lovelysystems/pyjamas
|
library/pyjamas/ui/HorizontalPanel.py
|
1
|
2463
|
# Copyright 2006 James Tauber and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas.ui.CellPanel import CellPanel
from pyjamas.ui import HasHorizontalAlignment
from pyjamas.ui import HasVerticalAlignment
class HorizontalPanel(CellPanel):
def __init__(self, **kwargs):
if not kwargs.has_key('Spacing'): kwargs['Spacing'] = 0
if not kwargs.has_key('Padding'): kwargs['Padding'] = 0
self.horzAlign = HasHorizontalAlignment.ALIGN_LEFT
self.vertAlign = HasVerticalAlignment.ALIGN_TOP
CellPanel.__init__(self, **kwargs)
self.tableRow = DOM.createTR()
DOM.appendChild(self.getBody(), self.tableRow)
def add(self, widget):
self.insert(widget, self.getWidgetCount())
def getHorizontalAlignment(self):
return self.horzAlign
def getVerticalAlignment(self):
return self.vertAlign
def getWidget(self, index):
return self.children[index]
def getWidgetCount(self):
return len(self.children)
def getWidgetIndex(self, child):
return self.children.index(child)
def insert(self, widget, beforeIndex):
widget.removeFromParent()
td = DOM.createTD()
DOM.insertChild(self.tableRow, td, beforeIndex)
CellPanel.insert(self, widget, td, beforeIndex)
self.setCellHorizontalAlignment(widget, self.horzAlign)
self.setCellVerticalAlignment(widget, self.vertAlign)
def remove(self, widget):
if isinstance(widget, int):
widget = self.getWidget(widget)
if widget.getParent() != self:
return False
td = DOM.getParent(widget.getElement())
DOM.removeChild(self.tableRow, td)
CellPanel.remove(self, widget)
return True
def setHorizontalAlignment(self, align):
self.horzAlign = align
def setVerticalAlignment(self, align):
self.vertAlign = align
|
apache-2.0
| -5,496,431,016,375,003,000 | 28.674699 | 74 | 0.688591 | false |
seocam/mirror-test
|
colab/search/forms.py
|
1
|
7726
|
# -*- coding: utf-8 -*-
import unicodedata
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from haystack.forms import SearchForm
from haystack.inputs import AltParser
from haystack.inputs import AutoQuery
from colab.super_archives.models import MailingList
class ColabSearchForm(SearchForm):
q = forms.CharField(label=_('Search'), required=False)
order = forms.CharField(widget=forms.HiddenInput(), required=False)
type = forms.CharField(required=False, label=_(u'Type'))
author = forms.CharField(required=False, label=_(u'Author'))
modified_by = forms.CharField(required=False, label=_(u'Modified by'))
# ticket status
tag = forms.CharField(required=False, label=_(u'Status'))
# mailinglist tag
list = forms.MultipleChoiceField(
required=False,
label=_(u'Mailinglist'),
choices=[(v, v) for v in MailingList.objects.values_list(
'name', flat=True)]
)
milestone = forms.CharField(required=False, label=_(u'Milestone'))
priority = forms.CharField(required=False, label=_(u'Priority'))
component = forms.CharField(required=False, label=_(u'Component'))
severity = forms.CharField(required=False, label=_(u'Severity'))
reporter = forms.CharField(required=False, label=_(u'Reporter'))
keywords = forms.CharField(required=False, label=_(u'Keywords'))
collaborators = forms.CharField(required=False, label=_(u'Collaborators'))
repository_name = forms.CharField(required=False, label=_(u'Repository'))
username = forms.CharField(required=False, label=_(u'Username'))
name = forms.CharField(required=False, label=_(u'Name'))
institution = forms.CharField(required=False, label=_(u'Institution'))
role = forms.CharField(required=False, label=_(u'Role'))
since = forms.DateField(required=False, label=_(u'Since'))
until = forms.DateField(required=False, label=_(u'Until'))
filename = forms.CharField(required=False, label=_(u'Filename'))
used_by = forms.CharField(required=False, label=_(u'Used by'))
mimetype = forms.CharField(required=False, label=_(u'File type'))
size = forms.CharField(required=False, label=_(u'Size'))
def search(self):
if not self.is_valid():
return self.no_query_found()
# filter_or goes here
sqs = self.searchqueryset.all()
mimetype = self.cleaned_data['mimetype']
if mimetype:
filter_mimetypes = {'mimetype__in': []}
for type_, display, mimelist in settings.FILE_TYPE_GROUPINGS:
if type_ in mimetype:
filter_mimetypes['mimetype__in'] += mimelist
if not self.cleaned_data['size']:
sqs = sqs.filter_or(mimetype__in=mimelist)
if self.cleaned_data['size']:
# (1024 * 1024) / 2
# (1024 * 1024) * 10
filter_sizes = {}
filter_sizes_exp = {}
if '<500KB' in self.cleaned_data['size']:
filter_sizes['size__lt'] = 524288
if '500KB__10MB' in self.cleaned_data['size']:
filter_sizes_exp['size__gte'] = 524288
filter_sizes_exp['size__lte'] = 10485760
if '>10MB' in self.cleaned_data['size']:
filter_sizes['size__gt'] = 10485760
if self.cleaned_data['mimetype']:
# Add the mimetypes filters to this dict and filter it
if filter_sizes_exp:
filter_sizes_exp.update(filter_mimetypes)
sqs = sqs.filter_or(**filter_sizes_exp)
for filter_or in filter_sizes.items():
filter_or = dict((filter_or, ))
filter_or.update(filter_mimetypes)
sqs = sqs.filter_or(**filter_or)
else:
for filter_or in filter_sizes.items():
filter_or = dict((filter_or, ))
sqs = sqs.filter_or(**filter_or)
sqs = sqs.filter_or(**filter_sizes_exp)
if self.cleaned_data['used_by']:
sqs = sqs.filter_or(used_by__in=self.cleaned_data['used_by']
.split())
if self.cleaned_data['q']:
q = unicodedata.normalize(
'NFKD', self.cleaned_data.get('q')
).encode('ascii', 'ignore')
dismax_opts = {
'q.alt': '*.*',
'pf': 'title^2.1 author^1.9 description^1.7',
'mm': '2<70%',
# Date boosting:
# http://wiki.apache.org/solr/FunctionQuery#Date_Boosting
'bf': 'recip(ms(NOW/HOUR,modified),3.16e-11,1,1)^10',
}
hayString = 'haystack.backends.whoosh_backend.WhooshEngine'
if settings.HAYSTACK_CONNECTIONS['default']['ENGINE'] != hayString:
sqs = sqs.filter(content=AltParser(
'edismax', q, **dismax_opts))
else:
sqs = sqs.filter(content=AutoQuery(q))
if self.cleaned_data['type']:
sqs = sqs.filter(type=self.cleaned_data['type'])
if self.cleaned_data['order']:
for option, dict_order in settings.ORDERING_DATA.items():
if self.cleaned_data['order'] == option:
if dict_order['fields']:
sqs = sqs.order_by(*dict_order['fields'])
if self.cleaned_data['author']:
sqs = sqs.filter(
fullname_and_username__contains=self.cleaned_data['author']
)
if self.cleaned_data['modified_by']:
modified_by_data = self.cleaned_date['modified_by']
sqs = sqs.filter(
fullname_and_username__contains=modified_by_data
)
if self.cleaned_data['milestone']:
sqs = sqs.filter(milestone=self.cleaned_data['milestone'])
if self.cleaned_data['priority']:
sqs = sqs.filter(priority=self.cleaned_data['priority'])
if self.cleaned_data['severity']:
sqs = sqs.filter(severity=self.cleaned_data['severity'])
if self.cleaned_data['reporter']:
sqs = sqs.filter(reporter=self.cleaned_data['reporter'])
if self.cleaned_data['keywords']:
sqs = sqs.filter(keywords=self.cleaned_data['keywords'])
if self.cleaned_data['collaborators']:
sqs = sqs.filter(collaborators=self.cleaned_data['collaborators'])
if self.cleaned_data['repository_name']:
sqs = sqs.filter(
repository_name=self.cleaned_data['repository_name']
)
if self.cleaned_data['username']:
sqs = sqs.filter(username=self.cleaned_data['username'])
if self.cleaned_data['name']:
sqs = sqs.filter(name=self.cleaned_data['name'])
if self.cleaned_data['institution']:
sqs = sqs.filter(institution=self.cleaned_data['institution'])
if self.cleaned_data['role']:
sqs = sqs.filter(role=self.cleaned_data['role'])
if self.cleaned_data['tag']:
sqs = sqs.filter(tag=self.cleaned_data['tag'])
if self.cleaned_data['list']:
sqs = sqs.filter(tag__in=self.cleaned_data['list'])
if self.cleaned_data['since']:
sqs = sqs.filter(modified__gte=self.cleaned_data['since'])
if self.cleaned_data['until']:
sqs = sqs.filter(modified__lte=self.cleaned_data['until'])
if self.cleaned_data['filename']:
sqs = sqs.filter(filename=self.cleaned_data['filename'])
return sqs
|
gpl-2.0
| 8,863,627,971,120,281,000 | 42.897727 | 79 | 0.576107 | false |
hjanime/VisTrails
|
vistrails/gui/variable_dropbox.py
|
1
|
22113
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" This file contains widgets that can be used for dropping Constant class
variables. It will construct an input form for the value.
QVariableDropBox
QVerticalWidget
QVariableInputWidget
QVariableInputForm
QDragVariableLabel
QHoverVariableLabel
"""
from __future__ import division
from PyQt4 import QtCore, QtGui
from vistrails.core import debug
from vistrails.core.vistrail.module_function import ModuleFunction
from vistrails.core.vistrail.module_param import ModuleParam
from vistrails.core.modules import module_registry
from vistrails.core.modules.basic_modules import Constant
from vistrails.core.vistrail.vistrailvariable import VistrailVariable
from vistrails.gui.common_widgets import QPromptWidget
from vistrails.gui.modules.utils import get_widget_class
from vistrails.gui.modules.constant_configuration import StandardConstantWidget
from vistrails.gui.module_palette import QModuleTreeWidget
from vistrails.gui.theme import CurrentTheme
from vistrails.gui.utils import show_question, YES_BUTTON, NO_BUTTON
import uuid
################################################################################
class QVariableDropBox(QtGui.QScrollArea):
"""
QVariableDropBox is just a widget such that items that subclass
Constant from the module palette can be dropped into its client rect.
It then constructs an input form based on the type of handling widget
"""
def __init__(self, parent=None):
""" QVariableDropBox(parent: QWidget) -> QVariableDropBox
Initialize widget constraints
"""
QtGui.QScrollArea.__init__(self, parent)
self.setAcceptDrops(True)
self.setWidgetResizable(True)
self.vWidget = QVerticalWidget()
self.setWidget(self.vWidget)
self.updateLocked = False
self.controller = None
def dragEnterEvent(self, event):
""" dragEnterEvent(event: QDragEnterEvent) -> None
Set to accept drops from the module palette
"""
if isinstance(event.source(), QModuleTreeWidget):
data = event.mimeData()
if hasattr(data, 'items'):
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
""" dragMoveEvent(event: QDragMoveEvent) -> None
Set to accept drag move event from the module palette
"""
if isinstance(event.source(), QModuleTreeWidget):
data = event.mimeData()
if hasattr(data, 'items'):
event.accept()
def dropEvent(self, event):
""" dropEvent(event: QDragMoveEvent) -> None
Accept drop event to add a new variable
"""
if isinstance(event.source(), QModuleTreeWidget):
data = event.mimeData()
if hasattr(data, 'items'):
event.accept()
assert len(data.items) == 1
item = data.items[0]
if issubclass(item.descriptor.module, Constant):
if item.descriptor and self.controller:
self.lockUpdate()
(text, ok) = QtGui.QInputDialog.getText(self,
'Set Variable Name',
'Enter the variable name',
QtGui.QLineEdit.Normal,
'')
var_name = str(text).strip()
while ok and self.controller.check_vistrail_variable(var_name):
msg =" This variable name is already being used.\
Please enter a different variable name "
(text, ok) = QtGui.QInputDialog.getText(self,
'Set Variable Name',
msg,
QtGui.QLineEdit.Normal,
text)
var_name = str(text).strip()
if ok:
self.vWidget.addVariable(str(uuid.uuid1()), var_name, item.descriptor)
self.scrollContentsBy(0, self.viewport().height())
self.unlockUpdate()
#self.emit(QtCore.SIGNAL("paramsAreaChanged"))
def updateController(self, controller):
""" updateController(controller: VistrailController) -> None
Construct input forms for a controller's variables
"""
# we shouldn't do this whenver the controller changes...
if self.controller != controller:
self.controller = controller
if self.updateLocked: return
self.vWidget.clear()
if controller:
reg = module_registry.get_module_registry()
for var in [v for v in controller.vistrail.vistrail_vars]:
try:
descriptor = reg.get_descriptor_by_name(var.package,
var.module,
var.namespace)
except module_registry.ModuleRegistryException:
debug.critical("Missing Module Descriptor for vistrail"
" variable %s\nPackage: %s\nType: %s"
"\nNamespace: %s" % \
(var.name, var.package, var.module,
var.namespace))
continue
self.vWidget.addVariable(var.uuid, var.name, descriptor,
var.value)
self.vWidget.showPromptByChildren()
else:
self.vWidget.showPrompt(False)
def lockUpdate(self):
""" lockUpdate() -> None
Do not allow updateModule()
"""
self.updateLocked = True
def unlockUpdate(self):
""" unlockUpdate() -> None
Allow updateModule()
"""
self.updateLocked = False
class QVerticalWidget(QPromptWidget):
"""
QVerticalWidget is a widget holding other variable widgets
vertically
"""
def __init__(self, parent=None):
""" QVerticalWidget(parent: QWidget) -> QVerticalWidget
Initialize with a vertical layout
"""
QPromptWidget.__init__(self, parent)
self.setPromptText("Drag a constant from the Modules panel to create a variable")
self.setLayout(QtGui.QVBoxLayout())
self.layout().setMargin(0)
self.layout().setSpacing(5)
self.layout().setAlignment(QtCore.Qt.AlignTop)
self.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
self.setMinimumHeight(20)
self._variable_widgets = []
def addVariable(self, uuid, name, descriptor, value=''):
""" addVariable(uuid:str, name: str, descriptor: ModuleDescriptor, value: str) -> None
Add an input form for the variable
"""
inputForm = QVariableInputWidget(uuid, name, descriptor, value, self)
self.connect(inputForm, QtCore.SIGNAL('deleted(QWidget*)'),
self.delete_form)
self.layout().addWidget(inputForm)
inputForm.show()
self.setMinimumHeight(self.layout().minimumSize().height())
self.showPrompt(False)
self._variable_widgets.append(inputForm)
def clear(self):
""" clear() -> None
Clear and delete all widgets in the layout
"""
self.setEnabled(False)
for v in self._variable_widgets:
self.disconnect(v, QtCore.SIGNAL('deleted(QWidget*)'),
self.delete_form)
self.layout().removeWidget(v)
v.setParent(None)
v.deleteLater()
self._variable_widgets = []
self.setEnabled(True)
def delete_form(self, input_form):
self.disconnect(input_form, QtCore.SIGNAL('deleted(QWidget*)'),
self.delete_form)
var_name = input_form.var_name
variableBox = self.parent().parent()
self.layout().removeWidget(input_form)
self._variable_widgets.remove(input_form)
input_form.setParent(None)
input_form.deleteLater()
self.showPromptByChildren()
if variableBox.controller:
variableBox.lockUpdate()
variableBox.controller.set_vistrail_variable(var_name, None)
variableBox.unlockUpdate()
self.setMinimumHeight(self.layout().minimumSize().height())
class QVariableInputWidget(QtGui.QDockWidget):
def __init__(self, uuid, name, descriptor, value='', parent=None):
QtGui.QDockWidget.__init__(self, parent)
self.var_uuid = uuid
self.var_name = name
self.descriptor = descriptor
self.setFeatures(QtGui.QDockWidget.DockWidgetClosable)
# Create group and titlebar widgets for input widget
self.group_box = QVariableInputForm(descriptor, value, self)
self.setWidget(self.group_box)
title_widget = QtGui.QWidget()
title_layout = QtGui.QHBoxLayout()
self.closeButton = QtGui.QToolButton()
self.closeButton.setAutoRaise(True)
self.closeButton.setIcon(QtGui.QIcon(self.style().standardPixmap(QtGui.QStyle.SP_TitleBarCloseButton)))
self.closeButton.setIconSize(QtCore.QSize(13, 13))
self.closeButton.setFixedWidth(16)
self.label = QHoverVariableLabel(name)
title_layout.addWidget(self.label)
title_layout.addWidget(self.closeButton)
title_widget.setLayout(title_layout)
self.setTitleBarWidget(title_widget)
self.connect(self.closeButton, QtCore.SIGNAL('clicked()'), self.close)
def renameVariable(self, var_name):
# First delete old var entry
variableBox = self.parent().parent().parent()
if variableBox.controller:
variableBox.lockUpdate()
variableBox.controller.set_vistrail_variable(self.var_name, None, False)
variableBox.unlockUpdate()
# Create var entry with new name, but keeping the same uuid
self.var_name = var_name
self.label.setText(var_name)
self.group_box.updateMethod()
def closeEvent(self, event):
choice = show_question('Delete %s?'%self.var_name,
'Are you sure you want to permanently delete the VisTrail variable\
"%s"?\n\nNote: Any workflows using this variable will be left in an invalid state.'%self.var_name,
[NO_BUTTON,YES_BUTTON],
NO_BUTTON)
if choice == NO_BUTTON:
event.ignore()
return
self.emit(QtCore.SIGNAL('deleted(QWidget*)'), self)
def keyPressEvent(self, e):
if e.key() in [QtCore.Qt.Key_Delete, QtCore.Qt.Key_Backspace]:
self.close()
else:
QtGui.QDockWidget.keyPressEvent(self, e)
def check_variable(self, name):
""" check_variable(name: str) -> Boolean
Returns True if the vistrail already has the variable name
"""
variableBox = self.parent().parent().parent()
if variableBox.controller:
return variableBox.controller.check_vistrail_variable(name)
return False
class QVariableInputForm(QtGui.QGroupBox):
"""
QVariableInputForm is a widget with multiple input lines depends on
the method signature
"""
def __init__(self, descriptor, var_strValue="", parent=None):
""" QVariableInputForm(descriptor: ModuleDescriptor, var_strValue: str,
parent: QWidget) -> QVariableInputForm
Initialize with a vertical layout
"""
QtGui.QGroupBox.__init__(self, parent)
self.setLayout(QtGui.QGridLayout())
self.layout().setMargin(5)
self.layout().setSpacing(5)
self.setFocusPolicy(QtCore.Qt.ClickFocus)
self.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Fixed)
self.palette().setColor(QtGui.QPalette.Window,
CurrentTheme.METHOD_SELECT_COLOR)
# Create widget for editing variable
p = ModuleParam(type=descriptor.name, identifier=descriptor.identifier,
namespace=descriptor.namespace)
p.strValue = var_strValue
widget_type = get_widget_class(descriptor)
self.widget = widget_type(p, self)
self.label = QDragVariableLabel(p.type)
self.layout().addWidget(self.label, 0, 0)
self.layout().addWidget(self.widget, 0, 1)
self.updateMethod()
def focusInEvent(self, event):
""" gotFocus() -> None
Make sure the form painted as selected
"""
self.setAutoFillBackground(True)
def focusOutEvent(self, event):
""" lostFocus() -> None
Make sure the form painted as non-selected and then
perform a parameter changes
"""
self.setAutoFillBackground(False)
def updateMethod(self):
""" updateMethod() -> None
Update the variable values in vistrail controller
"""
inputWidget = self.parent()
variableBox = inputWidget.parent().parent().parent()
if variableBox.controller:
variableBox.lockUpdate()
descriptor = inputWidget.descriptor
var = VistrailVariable(inputWidget.var_name, inputWidget.var_uuid,
descriptor.identifier, descriptor.name,
descriptor.namespace, str(self.widget.contents()))
variableBox.controller.set_vistrail_variable(inputWidget.var_name, var)
variableBox.unlockUpdate()
class QDragVariableLabel(QtGui.QLabel):
"""
QDragVariableLabel is a QLabel that can be dragged to connect
to an input port
"""
def __init__(self, var_type='', parent=None):
""" QDragVariableLabel(var_type:str,
parent: QWidget) -> QDragVariableLabel
Initialize the label with a variable type
"""
QtGui.QLabel.__init__(self, parent)
self.var_type = var_type
self.setText(var_type)
self.setAttribute(QtCore.Qt.WA_Hover)
self.setCursor(CurrentTheme.OPEN_HAND_CURSOR)
self.setToolTip('Drag to an input port')
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_DEFAULT_COLOR)
def event(self, event):
""" event(event: QEvent) -> Event Result
Override to handle hover enter and leave events for hot links
"""
if event.type()==QtCore.QEvent.HoverEnter:
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_SELECT_COLOR)
if event.type()==QtCore.QEvent.HoverLeave:
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_DEFAULT_COLOR)
return QtGui.QLabel.event(self, event)
def mousePressEvent(self, event):
""" mousePressEvent(event: QMouseEvent) -> None
If mouse click on the label, show up a dialog to change/add
the variable name
"""
if event.button()==QtCore.Qt.LeftButton:
inputWidget = self.parent().parent()
var_name = inputWidget.var_name
var_uuid = inputWidget.var_uuid
# Create pixmap from variable name and type
drag_str = var_name + ' : ' + self.var_type
drag_label = QDragVariableLabel(drag_str)
drag_label.adjustSize()
painter = QtGui.QPainter()
font = QtGui.QFont()
size = drag_label.size()
image = QtGui.QImage(size.width()+4, size.height()+4, QtGui.QImage.Format_ARGB32_Premultiplied)
image.fill(0)
painter.begin(image)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(self.palette().highlight())
painter.drawRect(QtCore.QRectF(0, 0, image.width(), image.height()))
painter.setFont(font)
painter.setPen(QtCore.Qt.black)
painter.drawText(QtCore.QRect(QtCore.QPoint(2,2), size), QtCore.Qt.AlignLeft | QtCore.Qt.TextSingleLine, drag_str)
painter.end()
pixmap = QtGui.QPixmap.fromImage(image)
# Create drag action
mimeData = QtCore.QMimeData()
portspec = inputWidget.descriptor.get_port_spec('value', 'output')
mimeData.variableData = (portspec, var_uuid, var_name)
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
drag.setHotSpot(pixmap.rect().bottomRight())
drag.setPixmap(pixmap)
drag.start(QtCore.Qt.MoveAction)
class QHoverVariableLabel(QtGui.QLabel):
"""
QHoverVariableLabel is a QLabel that supports hover actions similar
to a hot link
"""
def __init__(self, var_name='', parent=None):
""" QHoverVariableLabel(var_name:str,
parent: QWidget) -> QHoverVariableLabel
Initialize the label with a variable name
"""
QtGui.QLabel.__init__(self, parent)
self.var_name = var_name
self.setText(var_name)
self.setAttribute(QtCore.Qt.WA_Hover)
self.setCursor(QtCore.Qt.PointingHandCursor)
self.setToolTip('Click to rename')
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_DEFAULT_COLOR)
def event(self, event):
""" event(event: QEvent) -> Event Result
Override to handle hover enter and leave events for hot links
"""
if event.type()==QtCore.QEvent.HoverEnter:
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_SELECT_COLOR)
if event.type()==QtCore.QEvent.HoverLeave:
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_DEFAULT_COLOR)
return QtGui.QLabel.event(self, event)
def mousePressEvent(self, event):
""" mousePressEvent(event: QMouseEvent) -> None
If mouse click on the label, show up a dialog to change/add
the variable name
"""
if event.button()==QtCore.Qt.LeftButton:
inputWidget = self.parent().parent()
orig_var_name = inputWidget.var_name
(text, ok) = QtGui.QInputDialog.getText(self,
'Set New Variable Name',
'Enter the new variable name',
QtGui.QLineEdit.Normal,
orig_var_name)
var_name = str(text).strip()
while ok and self.parent().parent().check_variable(var_name):
msg =" This variable name is already being used.\
Please enter a different variable name "
(text, ok) = QtGui.QInputDialog.getText(self,
'Set New Variable Name',
msg,
QtGui.QLineEdit.Normal,
text)
var_name = str(text).strip()
if ok and var_name != orig_var_name:
self.setText(var_name)
inputWidget.renameVariable(var_name)
|
bsd-3-clause
| 32,938,548,882,874,140 | 42.189453 | 126 | 0.575996 | false |
Petraea/jsonbot
|
jsb/lib/less.py
|
1
|
1782
|
# jsb/less.py
#
#
""" maintain bot output cache. """
# jsb imports
from jsb.utils.exception import handle_exception
from jsb.utils.limlist import Limlist
from jsb.lib.cache import get, set, delete
## basic imports
import logging
## Less class
class Less(object):
""" output cache .. caches upto <nr> item of txt lines per channel. """
def clear(self, channel):
""" clear outcache of channel. """
channel = unicode(channel).lower()
try: delete(u"outcache-" + channel)
except KeyError: pass
def add(self, channel, listoftxt):
""" add listoftxt to channel's output. """
channel = unicode(channel).lower()
data = get("outcache-" + channel)
if not data: data = []
data.extend(listoftxt)
set(u"outcache-" + channel, data, 3600)
def set(self, channel, listoftxt):
""" set listoftxt to channel's output. """
channel = unicode(channel).lower()
set(u"outcache-" + channel, listoftxt, 3600)
def get(self, channel):
""" return 1 item popped from outcache. """
channel = unicode(channel).lower()
global get
data = get(u"outcache-" + channel)
if not data: txt = None
else:
try: txt = data.pop(0) ; set(u"outcache-" + channel, data, 3600)
except (KeyError, IndexError): txt = None
if data: size = len(data)
else: size = 0
return (txt, size)
def copy(self, channel):
""" return 1 item popped from outcache. """
channel = unicode(channel).lower()
global get
return get(u"outcache-" + channel)
def more(self, channel):
""" return more entry and remaining size. """
return self.get(channel)
outcache = Less()
|
mit
| 4,724,756,526,734,193,000 | 26.430769 | 76 | 0.589226 | false |
waterdotorg/power.Water
|
project/custom/migrations/0011_auto__add_friendjoinedemaillog__add_unique_friendjoinedemaillog_user_u.py
|
1
|
13099
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FriendJoinedEmailLog'
db.create_table('custom_friendjoinedemaillog', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='user_fje', to=orm['auth.User'])),
('user_referred', self.gf('django.db.models.fields.related.ForeignKey')(related_name='user_referred_fje', to=orm['auth.User'])),
('created_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('custom', ['FriendJoinedEmailLog'])
# Adding unique constraint on 'FriendJoinedEmailLog', fields ['user', 'user_referred']
db.create_unique('custom_friendjoinedemaillog', ['user_id', 'user_referred_id'])
# Adding unique constraint on 'FacebookOGReferredLog', fields ['user', 'user_referred']
db.create_unique('custom_facebookogreferredlog', ['user_id', 'user_referred_id'])
def backwards(self, orm):
# Removing unique constraint on 'FacebookOGReferredLog', fields ['user', 'user_referred']
db.delete_unique('custom_facebookogreferredlog', ['user_id', 'user_referred_id'])
# Removing unique constraint on 'FriendJoinedEmailLog', fields ['user', 'user_referred']
db.delete_unique('custom_friendjoinedemaillog', ['user_id', 'user_referred_id'])
# Deleting model 'FriendJoinedEmailLog'
db.delete_table('custom_friendjoinedemaillog')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'custom.facebookogreferredlog': {
'Meta': {'unique_together': "(('user', 'user_referred'),)", 'object_name': 'FacebookOGReferredLog'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user'", 'to': "orm['auth.User']"}),
'user_referred': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_referred'", 'to': "orm['auth.User']"})
},
'custom.facebookstatusupdate': {
'Meta': {'object_name': 'FacebookStatusUpdate'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'custom.facebookstatusupdatelog': {
'Meta': {'unique_together': "(('facebook_status_update', 'user'),)", 'object_name': 'FacebookStatusUpdateLog'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'facebook_status_update': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['custom.FacebookStatusUpdate']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'custom.friendjoinedemaillog': {
'Meta': {'unique_together': "(('user', 'user_referred'),)", 'object_name': 'FriendJoinedEmailLog'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_fje'", 'to': "orm['auth.User']"}),
'user_referred': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_referred_fje'", 'to': "orm['auth.User']"})
},
'custom.post': {
'Meta': {'object_name': 'Post'},
'content': ('django.db.models.fields.TextField', [], {}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'homepage': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256'}),
'published_date': ('django.db.models.fields.DateTimeField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'custom.profile': {
'Meta': {'object_name': 'Profile'},
'enable_email_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_facebook_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_twitter_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'followers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'blank': 'True'}),
'semaphore_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'semaphore_twitter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'social_data_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_referrer': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'user_referrer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_referrer'", 'null': 'True', 'to': "orm['auth.User']"})
},
'custom.twitterautofriendshiplog': {
'Meta': {'object_name': 'TwitterAutoFriendshipLog'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'custom.twitterstatusupdate': {
'Meta': {'object_name': 'TwitterStatusUpdate'},
'content': ('django.db.models.fields.TextField', [], {}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'custom.twitterstatusupdatelog': {
'Meta': {'unique_together': "(('twitter_status_update', 'user'),)", 'object_name': 'TwitterStatusUpdateLog'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'twitter_status_update': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['custom.TwitterStatusUpdate']"}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['custom']
|
gpl-3.0
| 2,593,868,828,637,019,000 | 75.608187 | 182 | 0.569738 | false |
HugoDelval/ip-reputation-monitoring
|
reputation/db/postgres.py
|
1
|
4686
|
#
# Copyright (C) 2016, OVH SAS
#
# This file is part of ip-reputation-monitoring.
#
# ip-reputation-monitoring is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Everything you need to deal with the databases is here. """
from datetime import datetime
import psycopg2
from psycopg2 import extras
from config import secrets
class Postgres(object):
"""
This class is designed to provide everything needed to deal with postgres
In other words, this class is a typical data access object.
"""
def __enter__(self):
self._open()
return self
def __exit__(self, type_exc, value, traceback):
self._close()
return False
def _open(self):
"""
Open connection to PostgreSQL
"""
ssl = 'require' if secrets.SPAMHAUS_DB['secured'] else None
self._connection = psycopg2.connect(database=secrets.SPAMHAUS_DB['db'],
user=secrets.SPAMHAUS_DB['user'],
password=secrets.SPAMHAUS_DB['password'],
host=secrets.SPAMHAUS_DB['host'],
port=secrets.SPAMHAUS_DB['port'],
sslmode=ssl)
self._cursor = self._connection.cursor(cursor_factory=extras.DictCursor)
def _close(self):
""" Close db's connection. """
self._connection.close()
def update_spamhaus_entries(self, documents):
"""
Update or insert a spamhaus entry into the spamhaus table. For each entry that
is no longer active, update them to set their attr `active` to false.
:param list documents: List of dictionaries representing documents to upsert having at least those mandatory keys: [sbl_number, cidr, first_seen, cause]
"""
now = datetime.now()
# First upsert still active entries
for document in documents:
self._cursor.execute("INSERT INTO spamhaus (sbl_number, cidr, first_seen, cause) "
"VALUES (%(sbl_number)s, %(cidr)s, %(first_seen)s, %(cause)s) "
"ON CONFLICT (sbl_number) DO UPDATE SET "
" last_seen = %(now)s,"
" active = TRUE",
{
"sbl_number": document['sbl_number'],
"cidr": document['cidr'],
"now": now,
"first_seen": document["first_seen"],
"cause": document["cause"]
})
# Now, set inactive all active documents that are not in documents
active_ids = [doc['sbl_number'] for doc in documents]
self._cursor.execute("UPDATE spamhaus "
"SET active = FALSE "
"WHERE active = TRUE AND sbl_number NOT IN %(actives)s",
{
"actives": tuple(active_ids)
})
self._connection.commit()
def find_spamhaus_entries(self, is_active=None):
"""
Retrieve all registered spamhaus tickets.
:param bool is_active: (Optional) Filter tickets depending if they're still active or not.
:rtype: cursor
:return: All desired spamhaus tickets sorted by first_seen date (asc)
"""
if is_active is None:
self._cursor.execute("SELECT * FROM spamhaus "
"ORDER BY first_seen ASC")
return self._cursor.fetchall()
self._cursor.execute("SELECT * FROM spamhaus "
"WHERE active = %(active)s "
"ORDER BY first_seen ASC",
{
"active": is_active
})
return self._cursor.fetchall()
|
gpl-3.0
| 365,307,016,488,537,500 | 40.839286 | 164 | 0.528596 | false |
dmnielsen/zSALT
|
zsalt/mosred.py
|
1
|
3102
|
"""
mosred
Process MOS spectral reductions of the data and produce
the output spectra for each object
"""
import os, sys, glob, shutil
import numpy as np
import pyfits
from scipy.ndimage.filters import median_filter
from pyraf import iraf
from iraf import pysalt
from saltobslog import obslog
from specselfid import specselfid
from specslit import specslit
from specidentify import specidentify
from specrectify import specrectify
def mosred(infile_list, slitmask,propcode=None, dy=0, inter=True, guesstype='rss', guessfile='', rstep=100, automethod='Matchlines'):
#set up the files
infiles=','.join(['%s' % x for x in infile_list])
obsdate=os.path.basename(infile_list[0])[7:15]
#set up some files that will be needed
logfile='spec'+obsdate+'.log'
dbfile='spec%s.db' % obsdate
#create the observation log
obs_dict=obslog(infile_list)
#apply the mask to the data sets
for i in range(len(infile_list)):
specslit(image=infile_list[i], outimage='', outpref='s', exttype='rsmt', slitfile=slitmask,
outputslitfile='', regprefix='ds_', sections=3, width=25.0, sigma=2.2, thres=6.0, order=1, padding=5, yoffset=dy,
inter=False, clobber=True, logfile=logfile, verbose=True)
for i in range(len(infile_list)):
if obs_dict['OBJECT'][i].upper().strip()=='ARC' and obs_dict['PROPID'][i].upper().strip()==propcode:
lamp=obs_dict['LAMPID'][i].strip().replace(' ', '')
arcimage='s'+os.path.basename(infile_list[i])
if lamp == 'NONE': lamp='CuAr'
lampfile=iraf.osfn("../../%s.salt" % lamp)
specselfid(arcimage, '', 'a', arcimage, 'middlerow', 3, clobber=True, logfile=logfile, verbose=True)
specidentify('a'+arcimage, lampfile, dbfile, guesstype=guesstype,
guessfile=guessfile, automethod=automethod, function='legendre', order=3,
rstep=rstep, rstart='middlerow', mdiff=20, thresh=3, niter=5, smooth=3,
inter=False, clobber=True, logfile=logfile, verbose=True)
#specrectify(arcimage, outimages='', outpref='x', solfile=dbfile, caltype='line',
# function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None,
# blank=0.0, clobber=True, logfile=logfile, verbose=True)
objimages=''
spec_list=[]
for i in range(len(infile_list)):
if obs_dict['CCDTYPE'][i].count('OBJECT') and obs_dict['INSTRUME'][i].count('RSS') and \
obs_dict['PROPID'][i].upper().strip()==propcode and \
obs_dict['OBSMODE'][i].count('SPECTROSCOPY'):
img = infile_list[i]
##rectify it
specselfid('s'+img, '', 'a', arcimage, 'middlerow', 3, clobber=True, logfile=logfile, verbose=True)
specrectify('as'+img, outimages='', outpref='x', solfile=dbfile, caltype='line',
function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None,
blank=0.0, clobber=True, logfile=logfile, verbose=True)
|
bsd-3-clause
| -4,401,168,587,431,947,000 | 39.285714 | 133 | 0.628949 | false |
tensorflow/probability
|
tensorflow_probability/python/distributions/poisson_test.py
|
1
|
22478
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import stats
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.distributions import poisson as poisson_lib
from tensorflow_probability.python.distributions.internal import statistical_testing as st
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import implementation_selection
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
@test_util.test_all_tf_execution_regimes
class PoissonTest(test_util.TestCase):
def _make_poisson(self,
rate,
validate_args=True,
force_probs_to_zero_outside_support=False):
return tfd.Poisson(
rate=rate,
validate_args=validate_args,
force_probs_to_zero_outside_support=force_probs_to_zero_outside_support)
def testPoissonShape(self):
lam = tf.constant([3.0] * 5)
poisson = self._make_poisson(rate=lam)
self.assertEqual(self.evaluate(poisson.batch_shape_tensor()), (5,))
self.assertEqual(poisson.batch_shape, tf.TensorShape([5]))
self.assertAllEqual(self.evaluate(poisson.event_shape_tensor()), [])
self.assertEqual(poisson.event_shape, tf.TensorShape([]))
def testInvalidLam(self):
invalid_lams = [-.01, -1., -2.]
for lam in invalid_lams:
with self.assertRaisesOpError('Argument `rate` must be non-negative.'):
poisson = self._make_poisson(rate=lam)
self.evaluate(poisson.rate_parameter())
def testZeroLam(self):
lam = 0.
poisson = tfd.Poisson(rate=lam, validate_args=True)
self.assertAllClose(lam, self.evaluate(poisson.rate))
self.assertAllClose(0., poisson.prob(3))
self.assertAllClose(1., poisson.prob(0))
self.assertAllClose(0., poisson.log_prob(0))
def testPoissonLogPmfDiscreteMatchesScipy(self):
batch_size = 12
lam = tf.constant([3.0] * batch_size)
lam_v = 3.0
x = np.array([-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.],
dtype=np.float32)
poisson = self._make_poisson(
rate=lam, force_probs_to_zero_outside_support=True, validate_args=False)
log_pmf = poisson.log_prob(x)
self.assertEqual(log_pmf.shape, (batch_size,))
self.assertAllClose(self.evaluate(log_pmf), stats.poisson.logpmf(x, lam_v))
pmf = poisson.prob(x)
self.assertEqual(pmf.shape, (batch_size,))
self.assertAllClose(self.evaluate(pmf), stats.poisson.pmf(x, lam_v))
def testPoissonLogPmfContinuousRelaxation(self):
batch_size = 12
lam = tf.constant([3.0] * batch_size)
x = tf.constant([-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.])
poisson = self._make_poisson(
rate=lam,
force_probs_to_zero_outside_support=False,
validate_args=False)
expected_continuous_log_pmf = (
x * poisson.log_rate_parameter()
- tf.math.lgamma(1. + x) - poisson.rate_parameter())
expected_continuous_log_pmf = tf.where(
x >= 0., expected_continuous_log_pmf,
dtype_util.as_numpy_dtype(
expected_continuous_log_pmf.dtype)(-np.inf))
expected_continuous_pmf = tf.exp(expected_continuous_log_pmf)
log_pmf = poisson.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
self.assertAllClose(self.evaluate(log_pmf),
self.evaluate(expected_continuous_log_pmf))
pmf = poisson.prob(x)
self.assertEqual((batch_size,), pmf.shape)
self.assertAllClose(self.evaluate(pmf),
self.evaluate(expected_continuous_pmf))
@test_util.numpy_disable_gradient_test
def testPoissonLogPmfGradient(self):
batch_size = 6
lam = tf.constant([3.0] * batch_size)
lam_v = 3.0
# Only non-negative values, as negative ones cause nans in the expected
# value.
x = np.array([0., 2., 3., 4., 5., 6.], dtype=np.float32)
_, dlog_pmf_dlam = self.evaluate(tfp.math.value_and_gradient(
lambda lam: self._make_poisson(rate=lam).log_prob(x), lam))
# A finite difference approximation of the derivative.
eps = 1e-6
expected = (stats.poisson.logpmf(x, lam_v + eps)
- stats.poisson.logpmf(x, lam_v - eps)) / (2 * eps)
self.assertEqual(dlog_pmf_dlam.shape, (batch_size,))
self.assertAllClose(dlog_pmf_dlam, expected)
@test_util.numpy_disable_gradient_test
def testPoissonLogPmfGradientAtZeroPmf(self):
# Check that the derivative wrt parameter at the zero-prob points is zero.
batch_size = 6
lam = tf.constant([3.0] * batch_size)
x = tf.constant([-2., -1., -0.5, 0.2, 1.5, 10.5])
def poisson_log_prob(lam):
return self._make_poisson(
rate=lam,
force_probs_to_zero_outside_support=True,
validate_args=False).log_prob(x)
_, dlog_pmf_dlam = self.evaluate(tfp.math.value_and_gradient(
poisson_log_prob, lam))
self.assertEqual(dlog_pmf_dlam.shape, (batch_size,))
self.assertAllClose(dlog_pmf_dlam, np.zeros([batch_size]))
def testPoissonLogPmfMultidimensional(self):
batch_size = 6
lam = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
lam_v = np.array([2.0, 4.0, 5.0], dtype=np.float32)
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T
poisson = self._make_poisson(rate=lam)
log_pmf = poisson.log_prob(x)
self.assertEqual(log_pmf.shape, (6, 3))
self.assertAllClose(self.evaluate(log_pmf), stats.poisson.logpmf(x, lam_v))
pmf = poisson.prob(x)
self.assertEqual(pmf.shape, (6, 3))
self.assertAllClose(self.evaluate(pmf), stats.poisson.pmf(x, lam_v))
@test_util.jax_disable_test_missing_functionality(
'`tf.math.igammac` is unimplemented in JAX backend.')
def testPoissonCdf(self):
batch_size = 12
lam = tf.constant([3.0] * batch_size)
lam_v = 3.0
x = np.array([-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.],
dtype=np.float32)
poisson = self._make_poisson(
rate=lam, force_probs_to_zero_outside_support=True, validate_args=False)
log_cdf = poisson.log_cdf(x)
self.assertEqual(log_cdf.shape, (batch_size,))
self.assertAllClose(self.evaluate(log_cdf), stats.poisson.logcdf(x, lam_v))
cdf = poisson.cdf(x)
self.assertEqual(cdf.shape, (batch_size,))
self.assertAllClose(self.evaluate(cdf), stats.poisson.cdf(x, lam_v))
def testPoissonSurvivalFunction(self):
batch_size = 12
lam = tf.constant([3.0] * batch_size)
lam_v = 3.0
x = np.array([-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.],
dtype=np.float32)
poisson = self._make_poisson(
rate=lam, force_probs_to_zero_outside_support=True, validate_args=False)
log_survival = poisson.log_survival_function(x)
self.assertEqual(log_survival.shape, (batch_size,))
self.assertAllClose(
self.evaluate(log_survival), stats.poisson.logsf(x, lam_v))
survival = poisson.survival_function(x)
self.assertEqual(survival.shape, (batch_size,))
self.assertAllClose(self.evaluate(survival), stats.poisson.sf(x, lam_v))
small_probs = tfd.Poisson(rate=0.123).log_survival_function(
np.linspace(10, 19, 10))
self.assertAllFinite(self.evaluate(small_probs))
@test_util.jax_disable_test_missing_functionality(
'`tf.math.igammac` is unimplemented in JAX backend.')
def testPoissonCdfContinuousRelaxation(self):
batch_size = 12
lam = tf.constant([3.0] * batch_size)
x = np.array([-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.],
dtype=np.float32)
expected_continuous_cdf = tf.math.igammac(1. + x, lam)
expected_continuous_cdf = tf.where(x >= 0., expected_continuous_cdf,
tf.zeros_like(expected_continuous_cdf))
expected_continuous_log_cdf = tf.math.log(expected_continuous_cdf)
poisson = self._make_poisson(
rate=lam,
force_probs_to_zero_outside_support=False, validate_args=False)
log_cdf = poisson.log_cdf(x)
self.assertEqual(log_cdf.shape, (batch_size,))
self.assertAllClose(self.evaluate(log_cdf),
self.evaluate(expected_continuous_log_cdf))
cdf = poisson.cdf(x)
self.assertEqual(cdf.shape, (batch_size,))
self.assertAllClose(self.evaluate(cdf),
self.evaluate(expected_continuous_cdf))
@test_util.jax_disable_test_missing_functionality(
'`tf.math.igammac` is unimplemented in JAX backend.')
@test_util.numpy_disable_gradient_test
def testPoissonCdfGradient(self):
batch_size = 12
lam = tf.constant([3.0] * batch_size)
lam_v = 3.0
x = np.array([-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.],
dtype=np.float32)
def cdf(lam):
return self._make_poisson(
rate=lam,
force_probs_to_zero_outside_support=True,
validate_args=False).cdf(x)
_, dcdf_dlam = self.evaluate(tfp.math.value_and_gradient(cdf, lam))
# A finite difference approximation of the derivative.
eps = 1e-6
expected = (stats.poisson.cdf(x, lam_v + eps)
- stats.poisson.cdf(x, lam_v - eps)) / (2 * eps)
self.assertEqual(dcdf_dlam.shape, (batch_size,))
self.assertAllClose(dcdf_dlam, expected)
@test_util.jax_disable_test_missing_functionality(
'`tf.math.igammac` is unimplemented in JAX backend.')
def testPoissonCdfMultidimensional(self):
batch_size = 6
lam = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
lam_v = np.array([2.0, 4.0, 5.0], dtype=np.float32)
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T
poisson = self._make_poisson(
rate=lam, force_probs_to_zero_outside_support=True)
log_cdf = poisson.log_cdf(x)
self.assertEqual(log_cdf.shape, (6, 3))
self.assertAllClose(self.evaluate(log_cdf), stats.poisson.logcdf(x, lam_v))
cdf = poisson.cdf(x)
self.assertEqual(cdf.shape, (6, 3))
self.assertAllClose(self.evaluate(cdf), stats.poisson.cdf(x, lam_v))
def testPoissonMean(self):
lam_v = np.array([1.0, 3.0, 2.5], dtype=np.float32)
poisson = self._make_poisson(rate=lam_v)
self.assertEqual(poisson.mean().shape, (3,))
self.assertAllClose(
self.evaluate(poisson.mean()), stats.poisson.mean(lam_v))
self.assertAllClose(self.evaluate(poisson.mean()), lam_v)
def testPoissonVariance(self):
lam_v = np.array([1.0, 3.0, 2.5], dtype=np.float32)
poisson = self._make_poisson(rate=lam_v)
self.assertEqual(poisson.variance().shape, (3,))
self.assertAllClose(
self.evaluate(poisson.variance()), stats.poisson.var(lam_v))
self.assertAllClose(self.evaluate(poisson.variance()), lam_v)
def testPoissonStd(self):
lam_v = np.array([1.0, 3.0, 2.5], dtype=np.float32)
poisson = self._make_poisson(rate=lam_v)
self.assertEqual(poisson.stddev().shape, (3,))
self.assertAllClose(
self.evaluate(poisson.stddev()), stats.poisson.std(lam_v))
self.assertAllClose(self.evaluate(poisson.stddev()), np.sqrt(lam_v))
def testPoissonMode(self):
lam_v = np.array([1.0, 3.0, 2.5, 3.2, 1.1, 0.05], dtype=np.float32)
poisson = self._make_poisson(rate=lam_v)
self.assertEqual(poisson.mode().shape, (6,))
self.assertAllClose(self.evaluate(poisson.mode()), np.floor(lam_v))
def testPoissonMultipleMode(self):
lam_v = np.array([1.0, 3.0, 2.0, 4.0, 5.0, 10.0], dtype=np.float32)
poisson = self._make_poisson(rate=lam_v)
# For the case where lam is an integer, the modes are: lam and lam - 1.
# In this case, we get back the larger of the two modes.
self.assertEqual((6,), poisson.mode().shape)
self.assertAllClose(lam_v, self.evaluate(poisson.mode()))
def testPoissonSample(self):
lam_v = 4.0
lam = tf.constant(lam_v)
# Choosing `n >= (k/rtol)**2, roughly ensures our sample mean should be
# within `k` std. deviations of actual up to rtol precision.
n = int(100e3)
poisson = self._make_poisson(rate=lam)
samples = poisson.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.mean(), stats.poisson.mean(lam_v), rtol=.01)
self.assertAllClose(sample_values.var(), stats.poisson.var(lam_v),
rtol=.013)
def testAssertValidSample(self):
lam_v = np.array([1.0, 3.0, 2.5], dtype=np.float32)
poisson = self._make_poisson(rate=lam_v)
with self.assertRaisesOpError('Condition x >= 0'):
self.evaluate(poisson.cdf([-1.2, 3., 4.2]))
def testPoissonSampleMultidimensionalMean(self):
lam_v = np.array([np.arange(1, 51, dtype=np.float32)]) # 1 x 50
poisson = self._make_poisson(rate=lam_v)
# Choosing `n >= (k/rtol)**2, roughly ensures our sample mean should be
# within `k` std. deviations of actual up to rtol precision.
n = int(100e3)
samples = poisson.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n, 1, 50))
self.assertEqual(sample_values.shape, (n, 1, 50))
self.assertAllClose(
sample_values.mean(axis=0), stats.poisson.mean(lam_v), rtol=.01, atol=0)
def testPoissonSampleMultidimensionalVariance(self):
lam_v = np.array([np.arange(5, 15, dtype=np.float32)]) # 1 x 10
poisson = self._make_poisson(rate=lam_v)
# Choosing `n >= 2 * lam * (k/rtol)**2, roughly ensures our sample
# variance should be within `k` std. deviations of actual up to rtol
# precision.
n = int(300e3)
samples = poisson.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n, 1, 10))
self.assertEqual(sample_values.shape, (n, 1, 10))
self.assertAllClose(
sample_values.var(axis=0), stats.poisson.var(lam_v), rtol=.03, atol=0)
@test_util.tf_tape_safety_test
def testGradientThroughRate(self):
rate = tf.Variable(3.)
dist = self._make_poisson(rate=rate)
with tf.GradientTape() as tape:
loss = -dist.log_prob([1., 2., 4.])
grad = tape.gradient(loss, dist.trainable_variables)
self.assertLen(grad, 1)
self.assertAllNotNone(grad)
def testAssertsNonNegativeRate(self):
rate = tf.Variable([1., 2., -3.])
self.evaluate(rate.initializer)
with self.assertRaisesOpError('Argument `rate` must be non-negative.'):
dist = self._make_poisson(rate=rate, validate_args=True)
self.evaluate(dist.sample(seed=test_util.test_seed()))
def testAssertsNonNegativeRateAfterMutation(self):
rate = tf.Variable([1., 2., 3.])
self.evaluate(rate.initializer)
dist = self._make_poisson(rate=rate, validate_args=True)
self.evaluate(dist.mean())
with self.assertRaisesOpError('Argument `rate` must be non-negative.'):
with tf.control_dependencies([rate.assign([1., 2., -3.])]):
self.evaluate(dist.sample(seed=test_util.test_seed()))
@test_util.test_all_tf_execution_regimes
class PoissonLogRateTest(PoissonTest):
def _make_poisson(self,
rate,
validate_args=True,
force_probs_to_zero_outside_support=False):
return tfd.Poisson(
log_rate=tf.math.log(rate),
validate_args=validate_args,
force_probs_to_zero_outside_support=force_probs_to_zero_outside_support)
# No need to worry about the non-negativity of `rate` when using the
# `log_rate` parameterization.
def testInvalidLam(self):
pass
def testAssertsNonNegativeRate(self):
pass
def testAssertsNonNegativeRateAfterMutation(self):
pass
# The gradient is not tracked through tf.math.log(rate) in _make_poisson(),
# so log_rate needs to be defined as a Variable and passed directly.
@test_util.tf_tape_safety_test
def testGradientThroughRate(self):
log_rate = tf.Variable(3.)
dist = tfd.Poisson(log_rate=log_rate, validate_args=True)
with tf.GradientTape() as tape:
loss = -dist.log_prob([1., 2., 4.])
grad = tape.gradient(loss, dist.trainable_variables)
self.assertLen(grad, 1)
self.assertAllNotNone(grad)
@test_util.test_graph_and_eager_modes
class PoissonSamplingTest(test_util.TestCase):
@test_util.jax_disable_test_missing_functionality('tf stateless_poisson')
def testSampleCPU(self):
with tf.device('CPU'):
_, runtime = self.evaluate(
poisson_lib.random_poisson(
shape=tf.constant([], dtype=tf.int32),
rates=tf.constant(10.),
seed=test_util.test_seed()))
self.assertEqual(implementation_selection._RUNTIME_CPU, runtime)
def testSampleGPU(self):
if not tf.test.is_gpu_available():
self.skipTest('no GPU')
with tf.device('GPU'):
_, runtime = self.evaluate(poisson_lib.random_poisson(
shape=tf.constant([], dtype=tf.int32),
rates=tf.constant(10.),
seed=test_util.test_seed()))
self.assertEqual(implementation_selection._RUNTIME_DEFAULT, runtime)
def testSampleXLA(self):
self.skip_if_no_xla()
if not tf.executing_eagerly(): return # jit_compile is eager-only.
log_rates = np.random.rand(4, 3).astype(np.float32)
dist = tfd.Poisson(log_rate=log_rates, validate_args=True)
# Verify the compile succeeds going all the way through the distribution.
self.evaluate(
tf.function(lambda: dist.sample(5, seed=test_util.test_seed()),
jit_compile=True)())
# Also test the low-level sampler and verify the XLA-friendly variant.
_, runtime = self.evaluate(
tf.function(poisson_lib.random_poisson, jit_compile=True)(
shape=tf.constant([], dtype=tf.int32),
rates=tf.constant(10.),
seed=test_util.test_seed()))
self.assertEqual(implementation_selection._RUNTIME_DEFAULT, runtime)
def testSamplePoissonLowRates(self):
# Low log rate (< log(10.)) samples would use Knuth's algorithm.
rate = [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5]
log_rate = np.log(rate)
num_samples = int(1e5)
self.assertLess(
self.evaluate(
st.min_num_samples_for_dkwm_cdf_test(
discrepancy=0.04, false_fail_rate=1e-9, false_pass_rate=1e-9)),
num_samples)
samples = poisson_lib._random_poisson_noncpu(
shape=[num_samples],
log_rates=log_rate,
output_dtype=tf.float64,
seed=test_util.test_seed())
poisson = tfd.Poisson(log_rate=log_rate, validate_args=True)
self.evaluate(
st.assert_true_cdf_equal_by_dkwm(
samples,
poisson.cdf,
st.left_continuous_cdf_discrete_distribution(poisson),
false_fail_rate=1e-9))
self.assertAllClose(
self.evaluate(tf.math.reduce_mean(samples, axis=0)),
stats.poisson.mean(rate),
rtol=0.01)
self.assertAllClose(
self.evaluate(tf.math.reduce_variance(samples, axis=0)),
stats.poisson.var(rate),
rtol=0.05)
def testSamplePoissonHighRates(self):
# High rate (>= log(10.)) samples would use rejection sampling.
rate = [10., 10.5, 11., 11.5, 12.0, 12.5, 13.0, 13.5, 14.0, 14.5]
log_rate = np.log(rate)
num_samples = int(1e5)
self.assertLess(
self.evaluate(
st.min_num_samples_for_dkwm_cdf_test(
discrepancy=0.04, false_fail_rate=1e-9, false_pass_rate=1e-9)),
num_samples)
samples = poisson_lib._random_poisson_noncpu(
shape=[num_samples],
log_rates=log_rate,
output_dtype=tf.float64,
seed=test_util.test_seed())
poisson = tfd.Poisson(log_rate=log_rate, validate_args=True)
self.evaluate(
st.assert_true_cdf_equal_by_dkwm(
samples,
poisson.cdf,
st.left_continuous_cdf_discrete_distribution(poisson),
false_fail_rate=1e-9))
self.assertAllClose(
self.evaluate(tf.math.reduce_mean(samples, axis=0)),
stats.poisson.mean(rate),
rtol=0.01)
self.assertAllClose(
self.evaluate(tf.math.reduce_variance(samples, axis=0)),
stats.poisson.var(rate),
rtol=0.05)
def testSamplePoissonLowAndHighRates(self):
rate = [1., 3., 5., 6., 7., 10., 13.0, 14., 15., 18.]
log_rate = np.log(rate)
num_samples = int(1e5)
poisson = tfd.Poisson(log_rate=log_rate, validate_args=True)
self.assertLess(
self.evaluate(
st.min_num_samples_for_dkwm_cdf_test(
discrepancy=0.04, false_fail_rate=1e-9, false_pass_rate=1e-9)),
num_samples)
samples = poisson_lib._random_poisson_noncpu(
shape=[num_samples],
log_rates=log_rate,
output_dtype=tf.float64,
seed=test_util.test_seed())
self.evaluate(
st.assert_true_cdf_equal_by_dkwm(
samples,
poisson.cdf,
st.left_continuous_cdf_discrete_distribution(poisson),
false_fail_rate=1e-9))
def testSamplePoissonInvalidRates(self):
rate = [np.nan, -1., 0., 5., 7., 10., 13.0, 14., 15., 18.]
log_rate = np.log(rate)
samples = self.evaluate(
poisson_lib._random_poisson_noncpu(
shape=[int(1e5)],
log_rates=log_rate,
output_dtype=tf.float64,
seed=test_util.test_seed()))
self.assertAllClose(
self.evaluate(tf.math.reduce_mean(samples, axis=0)),
stats.poisson.mean(rate),
rtol=0.01)
self.assertAllClose(
self.evaluate(tf.math.reduce_variance(samples, axis=0)),
stats.poisson.var(rate),
rtol=0.05)
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
| -6,222,672,986,908,313,000 | 37.822107 | 90 | 0.643518 | false |
petrkotas/twitter-feed
|
twitter_feed/mock/tweets.py
|
1
|
19865
|
HOME_TIMELINE = [
{
'coordinates': None,
'truncated': False,
'created_at': 'Tue Aug 28 21:16:23 +0000 2012',
'favorited': False,
'id_str': '240558470661799936',
'in_reply_to_user_id_str': None,
'entities': {
'urls': [
],
'hashtags': [
],
'user_mentions': [
]
},
'text': 'just another test',
'contributors': None,
'id': 240558470661799936,
'retweet_count': 0,
'in_reply_to_status_id_str': None,
'geo': None,
'retweeted': False,
'in_reply_to_user_id': None,
'place': None,
'source': 'OAuth Dancer Reborn',
'user': {
'name': 'OAuth Dancer',
'profile_sidebar_fill_color': 'DDEEF6',
'profile_background_tile': True,
'profile_sidebar_border_color': 'C0DEED',
'profile_image_url': 'http://a0.twimg.com/profile_images/730275945/oauth-dancer_normal.jpg',
'created_at': 'Wed Mar 03 19:37:35 +0000 2010',
'location': 'San Francisco, CA',
'follow_request_sent': False,
'id_str': '119476949',
'is_translator': False,
'profile_link_color': '0084B4',
'entities': {
'url': {
'urls': [
{
'expanded_url': None,
'url': 'http://bit.ly/oauth-dancer',
'indices': [
0,
26
],
'display_url': None
}
]
},
'description': None
},
'default_profile': False,
'url': 'http://bit.ly/oauth-dancer',
'contributors_enabled': False,
'favourites_count': 7,
'utc_offset': None,
'profile_image_url_https': 'https://si0.twimg.com/profile_images/730275945/oauth-dancer_normal.jpg',
'id': 119476949,
'listed_count': 1,
'profile_use_background_image': True,
'profile_text_color': '333333',
'followers_count': 28,
'lang': 'en',
'protected': False,
'geo_enabled': True,
'notifications': False,
'description': '',
'profile_background_color': 'C0DEED',
'verified': False,
'time_zone': None,
'profile_background_image_url_https': 'https://si0.twimg.com/profile_background_images/80151733/oauth-dance.png',
'statuses_count': 166,
'profile_background_image_url': 'http://a0.twimg.com/profile_background_images/80151733/oauth-dance.png',
'default_profile_image': False,
'friends_count': 14,
'following': False,
'show_all_inline_media': False,
'screen_name': 'oauth_dancer'
},
'in_reply_to_screen_name': None,
'in_reply_to_status_id': None
},
{
'coordinates': {
'coordinates': [
-122.25831,
37.871609
],
'type': 'Point'
},
'truncated': False,
'created_at': 'Tue Aug 28 21:08:15 +0000 2012',
'favorited': False,
'id_str': '240556426106372096',
'in_reply_to_user_id_str': None,
'entities': {
'urls': [
{
'expanded_url': 'http://blogs.ischool.berkeley.edu/i290-abdt-s12/',
'url': 'http://t.co/bfj7zkDJ',
'indices': [
79,
99
],
'display_url': 'blogs.ischool.berkeley.edu/i290-abdt-s12/'
}
],
'hashtags': [
],
'user_mentions': [
{
'name': 'Cal',
'id_str': '17445752',
'id': 17445752,
'indices': [
60,
64
],
'screen_name': 'Cal'
},
{
'name': 'Othman Laraki',
'id_str': '20495814',
'id': 20495814,
'indices': [
70,
77
],
'screen_name': 'othman'
}
]
},
'text': 'lecturing at the \'analyzing big data with twitter\' class at @cal with @othman http://t.co/bfj7zkDJ',
'contributors': None,
'id': 240556426106372096,
'retweet_count': 3,
'in_reply_to_status_id_str': None,
'geo': {
'coordinates': [
37.871609,
-122.25831
],
'type': 'Point'
},
'retweeted': False,
'possibly_sensitive': False,
'in_reply_to_user_id': None,
'place': {
'name': 'Berkeley',
'country_code': 'US',
'country': 'United States',
'attributes': {
},
'url': 'http://api.twitter.com/1/geo/id/5ef5b7f391e30aff.json',
'id': '5ef5b7f391e30aff',
'bounding_box': {
'coordinates': [
[
[
-122.367781,
37.835727
],
[
-122.234185,
37.835727
],
[
-122.234185,
37.905824
],
[
-122.367781,
37.905824
]
]
],
'type': 'Polygon'
},
'full_name': 'Berkeley, CA',
'place_type': 'city'
},
'source': 'Safari on iOS',
'user': {
'name': 'Raffi Krikorian',
'profile_sidebar_fill_color': 'DDEEF6',
'profile_background_tile': False,
'profile_sidebar_border_color': 'C0DEED',
'profile_image_url': 'http://a0.twimg.com/profile_images/1270234259/raffi-headshot-casual_normal.png',
'created_at': 'Sun Aug 19 14:24:06 +0000 2007',
'location': 'San Francisco, California',
'follow_request_sent': False,
'id_str': '8285392',
'is_translator': False,
'profile_link_color': '0084B4',
'entities': {
'url': {
'urls': [
{
'expanded_url': 'http://about.me/raffi.krikorian',
'url': 'http://t.co/eNmnM6q',
'indices': [
0,
19
],
'display_url': 'about.me/raffi.krikorian'
}
]
},
'description': {
'urls': [
]
}
},
'default_profile': True,
'url': 'http://t.co/eNmnM6q',
'contributors_enabled': False,
'favourites_count': 724,
'utc_offset': -28800,
'profile_image_url_https': 'https://si0.twimg.com/profile_images/1270234259/raffi-headshot-casual_normal.png',
'id': 8285392,
'listed_count': 619,
'profile_use_background_image': True,
'profile_text_color': '333333',
'followers_count': 18752,
'lang': 'en',
'protected': False,
'geo_enabled': True,
'notifications': False,
'description': 'Director of @twittereng\'s Platform Services. I break things.',
'profile_background_color': 'C0DEED',
'verified': False,
'time_zone': 'Pacific Time (US & Canada)',
'profile_background_image_url_https': 'https://si0.twimg.com/images/themes/theme1/bg.png',
'statuses_count': 5007,
'profile_background_image_url': 'http://a0.twimg.com/images/themes/theme1/bg.png',
'default_profile_image': False,
'friends_count': 701,
'following': True,
'show_all_inline_media': True,
'screen_name': 'raffi'
},
'in_reply_to_screen_name': None,
'in_reply_to_status_id': None
},
{
'coordinates': None,
'truncated': False,
'created_at': 'Tue Aug 28 19:59:34 +0000 2012',
'favorited': False,
'id_str': '240539141056638977',
'in_reply_to_user_id_str': None,
'entities': {
'urls': [
],
'hashtags': [
],
'user_mentions': [
]
},
'text': 'You\'d be right more often if you thought you were wrong.',
'contributors': None,
'id': 240539141056638977,
'retweet_count': 1,
'in_reply_to_status_id_str': None,
'geo': None,
'retweeted': False,
'in_reply_to_user_id': None,
'place': None,
'source': 'web',
'user': {
'name': 'Taylor Singletary',
'profile_sidebar_fill_color': 'FBFBFB',
'profile_background_tile': True,
'profile_sidebar_border_color': '000000',
'profile_image_url': 'http://a0.twimg.com/profile_images/2546730059/f6a8zq58mg1hn0ha8vie_normal.jpeg',
'created_at': 'Wed Mar 07 22:23:19 +0000 2007',
'location': 'San Francisco, CA',
'follow_request_sent': False,
'id_str': '819797',
'is_translator': False,
'profile_link_color': 'c71818',
'entities': {
'url': {
'urls': [
{
'expanded_url': 'http://www.rebelmouse.com/episod/',
'url': 'http://t.co/Lxw7upbN',
'indices': [
0,
20
],
'display_url': 'rebelmouse.com/episod/'
}
]
},
'description': {
'urls': [
]
}
},
'default_profile': False,
'url': 'http://t.co/Lxw7upbN',
'contributors_enabled': False,
'favourites_count': 15990,
'utc_offset': -28800,
'profile_image_url_https': 'https://si0.twimg.com/profile_images/2546730059/f6a8zq58mg1hn0ha8vie_normal.jpeg',
'id': 819797,
'listed_count': 340,
'profile_use_background_image': True,
'profile_text_color': 'D20909',
'followers_count': 7126,
'lang': 'en',
'protected': False,
'geo_enabled': True,
'notifications': False,
'description': 'Reality Technician, Twitter API team, synthesizer enthusiast; a most excellent adventure in timelines. I know it\'s hard to believe in something you can\'t see.',
'profile_background_color': '000000',
'verified': False,
'time_zone': 'Pacific Time (US & Canada)',
'profile_background_image_url_https': 'https://si0.twimg.com/profile_background_images/643655842/hzfv12wini4q60zzrthg.png',
'statuses_count': 18076,
'profile_background_image_url': 'http://a0.twimg.com/profile_background_images/643655842/hzfv12wini4q60zzrthg.png',
'default_profile_image': False,
'friends_count': 5444,
'following': True,
'show_all_inline_media': True,
'screen_name': 'episod'
},
'in_reply_to_screen_name': None,
'in_reply_to_status_id': None
}
]
USER_TIMELINE = [
{
'coordinates': None,
'favorited': False,
'truncated': False,
'created_at': 'Wed Aug 29 17:12:58 +0000 2012',
'id_str': '240859602684612608',
'entities': {
'urls': [
{
'expanded_url': '/blog/twitter-certified-products',
'url': 'https://t.co/MjJ8xAnT',
'indices': [
52,
73
],
'display_url': 'dev.twitter.com/blog/twitter-c\u2026'
}
],
'hashtags': [
],
'user_mentions': [
]
},
'in_reply_to_user_id_str': None,
'contributors': None,
'text': 'Introducing the Twitter Certified Products Program: https://t.co/MjJ8xAnT',
'retweet_count': 121,
'in_reply_to_status_id_str': None,
'id': 240859602684612608,
'geo': None,
'retweeted': False,
'possibly_sensitive': False,
'in_reply_to_user_id': None,
'place': None,
'user': {
'profile_sidebar_fill_color': 'DDEEF6',
'profile_sidebar_border_color': 'C0DEED',
'profile_background_tile': False,
'name': 'Twitter API',
'profile_image_url': 'http://a0.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_normal.png',
'created_at': 'Wed May 23 06:01:13 +0000 2007',
'location': 'San Francisco, CA',
'follow_request_sent': False,
'profile_link_color': '0084B4',
'is_translator': False,
'id_str': '6253282',
'entities': {
'url': {
'urls': [
{
'expanded_url': None,
'url': '',
'indices': [
0,
22
]
}
]
},
'description': {
'urls': [
]
}
},
'default_profile': True,
'contributors_enabled': True,
'favourites_count': 24,
'url': '',
'profile_image_url_https': 'https://si0.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_normal.png',
'utc_offset': -28800,
'id': 6253282,
'profile_use_background_image': True,
'listed_count': 10775,
'profile_text_color': '333333',
'lang': 'en',
'followers_count': 1212864,
'protected': False,
'notifications': None,
'profile_background_image_url_https': 'https://si0.twimg.com/images/themes/theme1/bg.png',
'profile_background_color': 'C0DEED',
'verified': True,
'geo_enabled': True,
'time_zone': 'Pacific Time (US & Canada)',
'description': 'The Real Twitter API. I tweet about API changes, service issues and happily answer questions about Twitter and our API. Don\'t get an answer? It\'s on my website.',
'default_profile_image': False,
'profile_background_image_url': 'http://a0.twimg.com/images/themes/theme1/bg.png',
'statuses_count': 3333,
'friends_count': 31,
'following': None,
'show_all_inline_media': False,
'screen_name': 'twitterapi'
},
'in_reply_to_screen_name': None,
'source': 'YoruFukurou',
'in_reply_to_status_id': None
},
{
'coordinates': None,
'favorited': False,
'truncated': False,
'created_at': 'Sat Aug 25 17:26:51 +0000 2012',
'id_str': '239413543487819778',
'entities': {
'urls': [
{
'expanded_url': '/issues/485',
'url': 'https://t.co/p5bOzH0k',
'indices': [
97,
118
],
'display_url': 'dev.twitter.com/issues/485'
}
],
'hashtags': [
],
'user_mentions': [
]
},
'in_reply_to_user_id_str': None,
'contributors': None,
'text': 'We are working to resolve issues with application management & logging in to the dev portal: https://t.co/p5bOzH0k ^TS',
'retweet_count': 105,
'in_reply_to_status_id_str': None,
'id': 239413543487819778,
'geo': None,
'retweeted': False,
'possibly_sensitive': False,
'in_reply_to_user_id': None,
'place': None,
'user': {
'profile_sidebar_fill_color': 'DDEEF6',
'profile_sidebar_border_color': 'C0DEED',
'profile_background_tile': False,
'name': 'Twitter API',
'profile_image_url': 'http://a0.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_normal.png',
'created_at': 'Wed May 23 06:01:13 +0000 2007',
'location': 'San Francisco, CA',
'follow_request_sent': False,
'profile_link_color': '0084B4',
'is_translator': False,
'id_str': '6253282',
'entities': {
'url': {
'urls': [
{
'expanded_url': None,
'url': '',
'indices': [
0,
22
]
}
]
},
'description': {
'urls': [
]
}
},
'default_profile': True,
'contributors_enabled': True,
'favourites_count': 24,
'url': '',
'profile_image_url_https': 'https://si0.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_normal.png',
'utc_offset': -28800,
'id': 6253282,
'profile_use_background_image': True,
'listed_count': 10775,
'profile_text_color': '333333',
'lang': 'en',
'followers_count': 1212864,
'protected': False,
'notifications': None,
'profile_background_image_url_https': 'https://si0.twimg.com/images/themes/theme1/bg.png',
'profile_background_color': 'C0DEED',
'verified': True,
'geo_enabled': True,
'time_zone': 'Pacific Time (US & Canada)',
'description': 'The Real Twitter API. I tweet about API changes, service issues and happily answer questions about Twitter and our API. Don\'t get an answer? It\'s on my website.',
'default_profile_image': False,
'profile_background_image_url': 'http://a0.twimg.com/images/themes/theme1/bg.png',
'statuses_count': 3333,
'friends_count': 31,
'following': None,
'show_all_inline_media': False,
'screen_name': 'twitterapi'
},
'in_reply_to_screen_name': None,
'source': 'YoruFukurou',
'in_reply_to_status_id': None
}
]
|
mit
| -6,867,694,552,128,083,000 | 35.382784 | 192 | 0.429801 | false |
radicalbit/ambari
|
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
|
1
|
5636
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import ambari_simplejson as json
from ambari_jinja2 import Environment as JinjaEnvironment
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Directory, File
from resource_management.core.source import InlineTemplate, Template
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.script import Script
def setup_stack_symlinks(struct_out_file):
"""
Invokes <stack-selector-tool> set all against a calculated fully-qualified, "normalized" version based on a
stack version, such as "2.3". This should always be called after a component has been
installed to ensure that all HDP pointers are correct. The stack upgrade logic does not
interact with this since it's done via a custom command and will not trigger this hook.
:return:
"""
import params
if params.upgrade_suspended:
Logger.warning("Skipping running stack-selector-tool because there is a suspended upgrade")
return
if params.host_sys_prepped:
Logger.warning("Skipping running stack-selector-tool becase this is a sys_prepped host. This may cause symlink pointers not to be created for HDP componets installed later on top of an already sys_prepped host.")
return
# get the packages which the stack-select tool should be used on
stack_packages = stack_select.get_packages(stack_select.PACKAGE_SCOPE_INSTALL)
if stack_packages is None:
return
json_version = load_version(struct_out_file)
if not json_version:
Logger.info("There is no advertised version for this component stored in {0}".format(struct_out_file))
return
# On parallel command execution this should be executed by a single process at a time.
with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
for package in stack_packages:
stack_select.select(package, json_version)
def setup_config():
import params
stackversion = params.stack_version_unformatted
Logger.info("FS Type: {0}".format(params.dfs_type))
is_hadoop_conf_dir_present = False
if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir):
is_hadoop_conf_dir_present = True
else:
Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.")
if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
# create core-site only if the hadoop config diretory exists
XmlConfig("core-site.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['core-site'],
configuration_attributes=params.config['configuration_attributes']['core-site'],
owner=params.hdfs_user,
group=params.user_group,
only_if=format("ls {hadoop_conf_dir}"))
Directory(params.logsearch_logfeeder_conf,
mode=0755,
cd_access='a',
create_parents=True
)
if params.logsearch_config_file_exists:
File(format("{logsearch_logfeeder_conf}/" + params.logsearch_config_file_name),
content=Template(params.logsearch_config_file_path,extra_imports=[default])
)
else:
Logger.warning('No logsearch configuration exists at ' + params.logsearch_config_file_path)
def load_version(struct_out_file):
"""
Load version from file. Made a separate method for testing
"""
json_version = None
try:
if os.path.exists(struct_out_file):
with open(struct_out_file, 'r') as fp:
json_info = json.load(fp)
json_version = json_info['version']
except:
pass
return json_version
def link_configs(struct_out_file):
"""
Links configs, only on a fresh install of HDP-2.3 and higher
"""
import params
json_version = load_version(struct_out_file)
if not json_version:
Logger.info("Could not load 'version' from {0}".format(struct_out_file))
return
# On parallel command execution this should be executed by a single process at a time.
with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
for k, v in conf_select.get_package_dirs().iteritems():
conf_select.convert_conf_directories_to_symlinks(k, json_version, v)
|
apache-2.0
| -2,267,958,351,017,531,100 | 40.755556 | 216 | 0.743967 | false |
dichenko/kpk2016
|
Diff/111879.py
|
1
|
1032
|
#Дана возрастающая последовательность целых чисел 1, 2, 4, 5, 7, 9, 10, 12, 14, 16, 17, ...
# Она сформирована следующим образом: берется одно нечетное число, затем два четных,
# затем три нечетных и так далее. Выведите N-й элемент этой последовательности.
def next_chet(x):
if x % 2 == 0:
return x + 2
else: return x + 1
def next_nechet(x):
return x + 1 + x % 2
def is_chet(x):
return x % 2 == 0
n = int(input())
D = [1]
data = D[0]
counter = 1
while len(D) < n:
counter += 1
if is_chet(counter):
for i in range(counter):
k = next_chet(data)
D.append(k)
data = k
if len(D) >= n: break
else:
for i in range(counter):
k = next_nechet(data)
D.append(k)
data = k
if len(D) >= n: break
print(D[-1])
|
gpl-3.0
| 8,481,426,430,898,534,000 | 18.906977 | 91 | 0.53271 | false |
achim1/HTCTools
|
htctools/batchfarming.py
|
1
|
33073
|
"""
Management of Zeuthen Batch farmjobs
"""
import sys
#
#import shlex
#import sys
#import tarfile
#import time
#import logging
#import zmq
#import os
#import sqlite3
#import re
#
#from .. import CONFIG, Logger
#from .metascript import MetaScript
#from .utils import ListSlicer, RotatingFileNamer, GetEnvshells, ExitFunc, CheckBzipIntegrity
#from .databases import OpenSQLiteDB, CloseSQLiteDB
#import databases as file_db
#
#
#from os.path import join,split
#from subprocess import Popen,PIPE
#from glob import glob
#from copy import copy
#from time import sleep
#
##########################################
#
#class SubmissionError(Exception):
# pass
#
##########################################
#
#def JobPropertiesFactory(jobtype):
# """
# Return predefined sets of JobProperties
# """
# props = JobProperties()
# if jobtype == "long_run_high_mem":
# props.fortyeighthour_queue()
# props.request_ram(10000)
# props.as_sl6()
# else:
# raise ValueError("Please give a keyword for a certain job settings preset")
# return props
#
#
####################################################
#
#class JobProperties(object):
# """
# A wrapper for numerous job properties
# """
# def __init__(self):
# self.mailoption = ''
# self.ready_for_submission = False
# self.ressources = {'hcpu':'h_cpu=47:59:59','hvmem':'h_rss=2000M'} # parameter for -l
# self.shelltype = '/bin/zsh' # parameter for -S
# self.finished = False
# self.project = "icecube"
# self.arrayjob = False
# self.usetmpdir = False
# self.envshell = self.choose_envshell("V04-04-01")
# self.logdir = "/dev/null"
#
# def specify_tmpdir_size(self,megabytes):
# """
# Request the size of $TMP
# """
# self.ressources['tmpdir'] = 'tmpdir_size=' + str(megabytes) + 'M '
# self.usetmpdir = True
#
# def send_mail(self,mailoption):
# """
# mailoption could be either 'b','e' or 'a'
# """
# self.mailoption = mailoption
#
# def as_nuastro(self):
# """
# Set the project to z_nuastr
# """
# self.project = "z_nuastr"
#
# def short_queue(self):
# """
# Submit for short queue
# """
# self.ressources['hcpu']='h_cpu=00:29:59'
#
# def specify_logdir(self,directory):
# """
# Giva a name for the logfile
# """
# self.logdir = directory
#
# def as_sl6(self):
# """
# Select nodes with Scientific Linux 6
# """
# self.ressources['os'] = 'os=sl6'
#
# def as_sl5(self):
# """
# Select nodes with Scientific Linux 5
# """
# self.ressources['os'] = 'os=sl5'
#
# def as_arrayjob(self,minarray,maxarray):
# """
# Run job as arrayjob in range min, max
# """
# self.arrayjob = (minarray,maxarray)
#
# def request_gpu(self):
# """
# Run job on gpu nodes
# """
# self.ressources['gpu'] = "gpu"
#
# def specify_hcpu(self,hours,minutes):
# """
# Sel hard limit for cpu time
# """
# self.ressources['hcpu']="h_cpu=" + str(hours) + ":" + str(minutes) + ":59"
#
# def request_ram(self,megabites):
# """
# Set hard limit for ram
# """
# self.ressources['h_rss'] ="h_rss="+str(megabites)+"M"
#
# def twelvehour_queue(self):
# """
# Run job in 12 h queue
# """
# self.ressources['hcpu']='h_cpu=11:59:59'
#
# def twentyfourhour_queue(self):
# """
# Run job in 24 h queue
# """
# self.ressources['hcpu']='h_cpu=23:59:59'
#
# def fortyeighthour_queue(self):
# """
# Run job in 48 h queue
# """
# self.ressources['hcpu']='h_cpu=47:59:59'
#
# def prettyprint(self):
# """
# Give a nice representation which can be written in a
# submit file
# """
#
# prettystring = '#$ -S %s \n' %self.shelltype
# for item in self.ressources.keys():
# prettystring += '#$ -l %s \n' %self.ressources[item]
#
# prettystring += '#$ -cwd\n'
# prettystring += '#$ -j y \n'
# prettystring += '#$ -o %s \n' %self.logdir
# prettystring += '#$ -P %s \n' %self.project
# if self.arrayjob:
# prettystring += '#$ -t %i-%i\n' %(self.arrayjob)
#
# if self.mailoption:
# prettystring += '#$ -m %s\n\n' %self.mailoption + '\n\n'
# else:
# prettystring += '\n'
#
# return prettystring
#
# def choose_envshell(self, qualifier, sim=False):
# """
# Pick an env-shell for this job
# """
# for shell in GetEnvshells(qualifier,sim=sim):
# if qualifier in shell.split("/"):
# self.envshell = shell
# return shell
#
# def __repr__(self):
# return self.prettyprint()
#
# def __str__(self):
# return self.prettyprint()
#
#
#######################################################
#
#class FarmJob(object):
# """
# A wrapper for desy batch farmjobs array-style
# """
#
# def __init__(self,script,jobprops,jobname="batch",script_args=[],delay=0):
#
# jobdir = CONFIG.get("farmjobs","farmscriptdir")
#
# self.script = script
# self.jobname = jobname
# self.jobprops = jobprops
# self.script_args = script_args
# self.ready = False
# self.jobfile = RotatingFileNamer(jobdir,self.jobname,".sh")
# self.delay = delay
# self.jobid = 0
#
# def create_submission_file(self,copy_source=None,copy_dest=None):
# """
# Write the submission file to the
# output directory
# """
#
# f = open(self.jobfile,'w')
# f.write('#!/bin/zsh \n\n')
# if self.delay:
# delay_cond = "if (($SGE_TASK_ID > 600)); then\n sleep 600\n else\n sleep $SGE_TASK_ID\n fi\n\n"
# f.write(delay_cond)
# specs = self.jobprops.prettyprint()
# f.write(specs)
#
# # implement Hape's trap
# f.write("trap 'echo Job successfully completed' 0\n")
# f.write("trap 'echo Job killed by hitting limit;exit 2' USR1\n\n")
# if self.jobprops.usetmpdir:
#
# assert(copy_source is not None)
# #assert(copy_dest is not None)
#
# f.write('cd $TMPDIR \n')
# if copy_source.startswith("/acs"):
# f.write("dccp " + copy_source + '. \n')
# else:
# f.write('cp ' + copy_source + ' . \n')
#
#
# f.write('source /afs/ifh.de/user/s/stoessl/.zshrc' + '\n')
# f.write('envshell='+self.jobprops.envshell+'\n')
# f.write('script='+self.script.filename+'\n\n')
#
# commandostring = '$envshell $script '
# for s_arg in self.script_args:
# commandostring += s_arg
#
#
# f.write(commandostring)
# if self.jobprops.usetmpdir:
# #rm_filename = split(copy_source)[1]
# #f.write('rm -f ' + rm_filename + ' \n')
# if copy_dest is not None:
# f.write('cp * ' + copy_dest + ' \n')
#
# f.close()
# self.ready = True
#
# def submit(self):
# """
# submit the job to the batch system
# """
# if not self.ready:
# raise SubmissionError('need to prepare submission file first, not submitted!')
#
# cline = 'qsub ' + self.jobfile
# jobid = Popen(shlex.split(cline),stdout=PIPE).communicate()[0]
# self.jobid = jobid.split()[2]
# Logger.info('Job submitted with id %s!' %self.jobid)
# return self.jobid
#
###############################################################
#
#def ArrayJobFiles(files,chunksize,subjobs=[]):
# """
# Slice infiles in chunks and return an dict with
# chunknumber, chunk
# """
#
# file_dict = {}
# slices = int(float(len(files))/chunksize)
# for index,file_slice in enumerate(ListSlicer(files, slices)):
# if subjobs:
# if (index + 1) in subjobs:
# file_dict[index + 1] = file_slice # 0 is not a valid index for arrayjobs
# else:
# file_dict[index + 1] = file_slice # 0 is not a valid index for arrayjobs
#
# # filter out lists of length 0
# filtered_file_dict = dict()
# for k in file_dict.keys():
# if file_dict[k]:
# filtered_file_dict[k] = file_dict[k]
# return filtered_file_dict
#
################################################################
#
#def SimpleFarmer(func,job_kwargs,jobname="batch.sh",func_args=[],func_kwargs={},decorator=None):
# """
# Calculate func on farm as simple job
# """
# raise NotImplementedError("SimpleFarmer is not implemented yet!")
#
#
##################################################################
#
#def CopyFileToTmp(file,dcache=False):
# #subprocess.Popen("")
#
# pass
#
##################################################################
#
##FIXME: make it a class for inheritance
##class ArrayJobFarmer:
#
## def __init__(self):
## logdir = CONFIG.get('logging','farmlogs')
## farmjobdir = CONFIG.get('farmjobs','farmscriptdir')
##
## def __call__(self,merge,files,jobprops,jobname="batch",func_kwargs={},delay=False,subjobs=[]):
## pass
#
#def ArrayJobFarmer(func,merge,files,jobprops,jobname="batch",func_kwargs={},delay=False,subjobs=[],presliced=False):
# """
# Calculate func on the farm with arrayjobs with properties defined in job_kwargs
# CAVE: func_args are managed automatically
# """
#
# #if jobprops.usetmpdir:
# # assert merge == 1, "Can not currently copy more than 1 file to tmpdir"
# # copy_files = files
# # files = map(lambda x: os.path.split(x)[1],files)
#
# logdir = CONFIG.get('logging','farmlogs')
# farmjobdir = CONFIG.get('farmjobs','farmscriptdir')
#
# if presliced:
# slicedfiles = files
# n_jobs = len(slicedfiles.keys())
# else:
# # claculate n_jobs seperately, to avoid errors
# n_jobs = len(ArrayJobFiles(files,merge).keys())
# slicedfiles = ArrayJobFiles(files,merge,subjobs=subjobs)
#
# #print slicedfiles
# #print False in map(bool,[j for j in slicedfiles.values()])
# # write a python script
# # magicindex trick:
# # ensure that the sys.argv[1] of the script is used to
# # get the correct slize of files
#
# func_args = ["files[magicindex]"]
# farmpyscript = MetaScript(join(farmjobdir,jobname + ".py"),"w")
# farmpyscript.add_import(sys)
# farmpyscript.add_variable("magicindex","sys.argv[1]")
# farmpyscript.add_json_dumpable("files",slicedfiles)
# #print slicedfiles
# #raise
# if subjobs:
# farmpyscript.add_function(ExitFunc,["files","magicindex"],{},None)
# farmpyscript.add_function(func, func_args, func_kwargs)
# farmpyscript.write_exectutable()
#
# jobprops.as_arrayjob(1,n_jobs) # index starts at 1
# jobprops.specify_logdir(logdir)
# jobprops.as_sl6()
# #if jobprops.usetmpdir:
# # job = FarmJob(farmpyscript,jobprops,jobname=jobname,script_args=["$SGE_TASK_ID"],delay=delay,copy_source)
# #else:
# job = FarmJob(farmpyscript,jobprops,jobname=jobname,script_args=["$SGE_TASK_ID"],delay=delay)
# job.create_submission_file()
# job.submit()
# Logger.info("Job %s submitted with a range of %i:%i!" %(job.jobid,job.jobprops.arrayjob[0],job.jobprops.arrayjob[1]))
# return int(job.jobid.split(".")[0])
#
#######################################################################
#
#def FitJobFarmer(func,jobprops,n_jobs,jobname="fit_",func_kwargs={},decorator=None,func_selectable_args=None):
# """
# Calculate func on the farm with arrayjobs with properties defined in job_kwargs
# CAVE: func_args are managed automatically
# """
#
# logdir = CONFIG.get('logging','farmlogs') + "/fit/"
# farmjobdir = CONFIG.get('farmjobs','farmscriptdir')
#
# # write a python script
#
# #func_args = []
# farmpyscript = MetaScript(join(farmjobdir,jobname + ".py"),"w")
# farmpyscript.add_import(sys)
# if func_selectable_args is not None:
# farmpyscript.add_variable("magicindex","int(sys.argv[1]) - 1")
# farmpyscript.add_json_dumpable("seeds",func_selectable_args)
# func_args = ["seeds[magicindex]"]
# else:
# func_args = ["sys.argv[1]"]
#
# farmpyscript.add_function(func, func_args, func_kwargs, decorator)
# farmpyscript.write_exectutable()
#
# jobprops.as_arrayjob(1,n_jobs) # index starts at 1
# #jobprops.specify_logdir(logdir) # NO LOG! who needs that?
# job = FarmJob(farmpyscript,jobprops,jobname=jobname,script_args=["$SGE_TASK_ID"])
# job.create_submission_file()
# job.submit()
# Logger.info("Job %s submitted with a range of %i:%i!" %(job.jobid,job.jobprops.arrayjob[0],job.jobprops.arrayjob[1]))
# return int(job.jobid.split(".")[0])
#
#######################################################
#
#def TarFiles(infiles,filename="logs.tar"):
# """
# tar files together
# """
# archive = tarfile.open(filename,"w")
# map(archive.add,infiles)
# archive.close()
# Logger.info("Tar file written with %i files and name %s" %(len(infiles),filename))
# return filename
#
#######################################################
#
#def Resubmit(jobnum,errorlist):
# """
# Resubmit the failed jobs of an job-array
# """
# raise NotImplementedError
#
#######################################################
#
#def ResubmitFromDB(jobnum):
# """
# Get jobs from the db which failed and resubmit them
# """
#
# raise NotImplementedError
#
###################################################
#
#def StandardErrorGrepper(logs,summaryfile=None):
# """
# Search the logfiles for typical error patterns
# :param logs:
# :return:
# """
# errors = []
# unfinished = []
# mem_errors = []
# non_processed_files = []
# example_message = ""
#
# def file_len(fname):
# i = 0
# with open(fname) as f:
# for i, l in enumerate(f):
# pass
# return i + 1
#
# infilesfromlog = []
# for log in logs:
# linecnt = file_len(log)
# if not linecnt:
# Logger.warning("%s has zero lenght" %log.__repr__())
# f = open(log)
# txt = f.read()
# try:
# infilestring = txt.split("reader2: FilenameList =")[1].split("\n")[0].strip("[").strip("]").replace("[","").replace("'","").strip()
# infilesfromlog = infilestring.split(",")
# except:
# infilesfromlog = ""
# #exec("infilesfromlog = " + infilestring)
# if "rror" in txt:
# errors.append(log)
# example_message = txt
# non_processed_files += infilesfromlog
# if "MemoryError" in txt:
# mem_errors.append(log)
# non_processed_files += infilesfromlog
# if not "finish" in txt and (not linecnt ==2):
# unfinished.append(log)
# non_processed_files += infilesfromlog
#
# f.close()
#
# if errors or unfinished or mem_errors:
# Logger.info("Errors found, last log \n\n\n %s" %example_message)
#
# return errors,unfinished,mem_errors,example_message,non_processed_files
#
###################################################
#
#def FailedBZipTestSearcher(logs,summaryfile="dummy"):
#
# errors = []
# unfinished = []
# mem_errors = []
# example_message = ""
# summary = open(summaryfile,"w")
#
#
# def file_len(fname):
# i = 0
# with open(fname) as f:
# for i, l in enumerate(f):
# pass
# return i + 1
#
# for log in logs:
# linecnt = file_len(log)
# if not linecnt:
# Logger.warning("%s has zero length" %log.__repr__())
# f = open(log)
# txt = f.read()
# if "ERROR" in txt:
# errors.append(log)
# example_message = txt
# summary.write(txt)
# if "MemoryError" in txt:
# mem_errors.append(log)
# if not "finish" in txt and (not linecnt ==2):
# unfinished.append(log)
# f.close()
#
# if errors:
# Logger.info("Errors found, last log \n\n\n %s" %example_message)
#
# summary.close()
# return errors,unfinished,mem_errors,example_message,[]
#
#
#
###################################################
#
#def PostProcess(jobnum,tar=True,grepfunc=StandardErrorGrepper):
# """
# Check the logs if everthing went fine for this job
# tar: tar the logfiles together and delete the untared
# """
# # change to logdir to avoid nested tar-file
# current_dir = os.getcwd()
#
# logdir = CONFIG.get('logging','farmlogs')
# os.chdir(logdir)
# logs = glob("*.o%s.*" %str(jobnum))
#
# if not logs:
# tared_logs = glob("*.%i.tar" %jobnum)[0]
# Logger.info("Found a tared log for this job %s" %tared_logs)
# tared_logs = tarfile.TarFile(tared_logs)
# tared_logs.extractall()
# logs = glob("*.o%s.*" %str(jobnum))
#
# Logger.info("%i logs found for job %s" %(len(logs),str(jobnum)))
# # use unix grep, as it is faster
# #grepstring_finished = "fgrep finish %s/*.o%s.* | wc -l" %(logdir,str(jobnum))
# #grepstring_errors = "fgrep rror %s/*.o%s.* | wc -l" %(logdir,str(jobnum))
# #print shlex.split(grepstring_finished)
# #finished = Popen(shlex.split(grepstring_finished),stdout=PIPE).communicate()[0]
# #errors = Popen(shlex.split(grepstring_errors),stdout=PIPE).communicate()[0]
# #print finished,errors
# # look through the logs
# summary = join(logdir,"job%isummary.log" %jobnum)
# errors,unfinished,mem_errors,example_message,non_processed_files = grepfunc(logs,summaryfile=summary)
#
# Logger.info("Found %i jobs with errors" %len(errors))
# Logger.info("Found %i unfinished jobs" %len(unfinished))
# Logger.info("Found %i jobs with memory errors" %len(mem_errors))
#
# error_nums = [int(err.split(".")[-1]) for err in errors]
# unfinished_nums = [int(un .split(".")[-1]) for un in unfinished]
# mem_error_nums = [int(mem.split(".")[-1]) for mem in mem_errors]
# Logger.info("List of subjobs with errors %s" %error_nums)
# Logger.info("List of subjobs which are not finished %s" %unfinished_nums)
# Logger.info("List of subjobs with memory errors %s" %mem_error_nums)
# if tar:
# tarname = logs[0].split(".")[0] + ".%s" %jobnum.__repr__() + ".tar"
# TarFiles(logs,filename=tarname)
# map(os.remove,logs)
#
# # change back to previous dir
# os.chdir(current_dir)
#
# sgejobinfo = GetJobInfoSGEJobs(jobnum)
# mem_exceeded = []
# failed = []
# for thisjob in sgejobinfo:
# try:
# maxmem = int(thisjob["category"].split(",")[2].split("vmem")[1].rstrip("M"))
# usedmem = thisjob["maxvmem"]
# if usedmem > maxmem:
# mem_exceeded.append(int(thisjob["task_number"]))
# if int(thisjob["exit_status"] != 0):
# failed.append(int(thisjob["task_number"]))
# except Exception as e:
# Logger.debug("Exception %s arised during job check for database!" %e.__repr__())
#
# error_dict = dict()
# error_dict["mem"] = list(set(mem_error_nums + mem_exceeded))
# error_dict["unfin"] = list(set(unfinished_nums + failed))
# error_dict["error"] = error_nums
# error_dict["nonprfiles"] = non_processed_files
# return error_dict
#
#
#
###################################################
#
#def CheckDataSet(dataset,filetype="raw",usefarm=False):
# """
# Check if all files are fine with bzip -t
# """
#
# assert filetype in ["raw","l3a"], "Can only check for 'raw' or 'l3a' filetype"
#
# rmtmp = False
#
# if filetype == "raw":
# files = dataset.raw_files_on_disk
#
# if filetype == "l3a":
# files = dataset.l3afiles
#
# if usefarm:
# jobprops = JobProperties()
# jobprops.short_queue()
# thejob = ArrayJobFarmer(CheckBzipIntegrity,1,files,jobprops=jobprops,jobname="ch%i" %dataset.dataset)
# while CheckJobOnFarm(thejob):
# sleep(120)
# result = PostProcess(thejob,grepfunc=FailedBZipTestSearcher)
#
# else:
# corrupt = 0
# logfile = open("/afs/ifh.de/user/s/stoessl/scratch/" + "bztest%i" %dataset.dataset + ".log","w")
# for i in files:
# if (i.startswith("/acs") or i.startswith("dcap")):
# if i.startswith("dcap"):
# name = i.replace("dcap://","")
#
# command = "dccp %s /lustre/fs15/group/icecube/stoessl/tmp/" %name
# Popen(shlex.split(command),stdout=PIPE).communicate()[0]
# thefile = join("/lustre/fs15/group/icecube/stoessl/tmp/",split(i)[1])
# print command, thefile
# time.sleep(1)
# rmtmp = True
# else:
# thefile = i
# name = thefile
# print thefile
# #raise
# if not CheckBzipIntegrity(thefile):
# logfile.write("BZ2 Error " + i + "\n" )
# corrupt += 1
# if rmtmp:
# command = "rm /lustre/fs15/group/icecube/stoessl/tmp/%s" %split(name)[1]
# Popen(shlex.split(command),stdout=PIPE).communicate()[0]
#
# logfile.close()
# print len(files),"files"
# print corrupt,"corrupt"
# print "Done"
#
#
#
#####################################################
#
#def _parse_arcx_sgejobs(sgejobinfo):
#
# all_jobs = []
# cnt = 1
# jobinfo = dict()
# for line in sgejobinfo.split("\n"):
# data = line.split("=")
# data = (data[0].strip(),"".join(data[1:]).strip())
# jobinfo[data[0]] = data[1]
# # each job has 29 fields of inrormation
# cnt += 1
# if cnt == 30:
# all_jobs.append(copy(jobinfo))
# cnt = 1
# jobinfo = dict()
#
# return all_jobs
#
##########################################
#
#def _parse_jobinfo(qstatjobinfo):
# """
# parse the output of qstat -j
# """
#
# qstat_dict = dict()
# for line in qstatjobinfo.split("\n"):
# if ":" in line:
# data = line.split(":")
# if len(data) != 2:
# continue
# data = (data[0].strip(),data[1].strip())
# qstat_dict.update([data])
#
# return qstat_dict
#
################################################
#
#def CheckJobOnFarm(jobnum):
# x = GetJobinfo(jobnum)
# return len(x.keys()) > 0
#
#
################################################
#
#def GetJobinfo(jobnum):
# """
# call qstat -j jobnum and parse the output
# """
#
# command = "qstat -ext -j " + str(jobnum)
# qstatinfo = Popen(shlex.split(command),stdout=PIPE).communicate()[0]
# infodict = _parse_jobinfo(qstatinfo)
# return infodict
#
##################################
#
#def GetJobInfoSGEJobs(jobnum):
#
# command = "arcx sgejobs %s" %str(jobnum)
# sgejobinfo = Popen(shlex.split(command),stdout=PIPE).communicate()[0]
# infos = _parse_arcx_sgejobs(sgejobinfo)
# return infos
#
##################################
#
#def GetGCD():
# """
# Get some random gcd
# """
#
# raise NotImplementedError
#
#
#
##################################################
#
#class Shepherd(object):
# """
# Watch the sheep lingering on the beautiful farming grounds in Zeuthen,
# but don't fall asleep....
# """
#
# def __init__(self,fnct,fnct_kwargs,datasetlist,ana_level,maxjobs=20000,dbmanangertype=None,logfile=CONFIG.get("logging","shepherd_log"),port=59322,notify_on_port=None,delay=0):
# self.maxjobs = maxjobs
# self.datasets = datasetlist
# self.fnct = fnct
# self.delay = delay
# self.fnct_kwargs = fnct_kwargs
# self.ana_level = ana_level
# self.notify_on_port= notify_on_port
# self.running_jobs = []
# self.finished_jobs = []
# self.merge = 1
# fh = logging.FileHandler(logfile)
# Logger.addHandler(fh)
# Logger.info("Shepherd initialized!")
# Logger.info("Will process the following datasets %s" %self.datasets.__repr__())
# self.dbmanagertype = dbmanangertype
# context = zmq.Context()
# self.socket = context.socket(zmq.REP)
# self.socket.bind("tcp://127.0.0.1:%s" %str(port))
#
# #def alter_fnct_kwargs(self,fnct_kwargs):
# # self.fnct_kwargs = fnct_kwargs
#
# #def receive_dataset(self):
# # dataset = self.socket.recv()
# # dataset = int(dataset)
# # Logger.info("Got dataset %s" %dataset.__repr__())
# # return dataset
#
# def get_active_jobs(self):
# """
# Count the number of active jobs for this user
# """
# c_line = ['qstat', '-g','d','-u', 'stoessl']
# batch_status = Popen(c_line,stdout=PIPE)
# c_line_wc = ['wc', '-l']
# counter = Popen(c_line_wc,stdin=batch_status.stdout,stdout=PIPE)
# batch_status.stdout.close()
# jobsonfarm = int(counter.communicate()[0])
# batch_status.kill()
# Logger.debug("Got %i jobs currently on the batch system" %jobsonfarm)
# return jobsonfarm
#
#
# def submit_dataset(self,dataset,gcd = "/lustre/fs6/group/i3/stoessl/GeoCalibDetectorStatus_IC79.55380_L2a.i3",ram=4000,cpu="23",subjobs=[],jobname="L3afilter"):
# """
# Submit a dataset to the batch system
# """
#
#
#
# if int(dataset) in [6451,6939]:
# infiles = file_db.GetFilesFromDB( dataset=int(dataset),analysis_level=self.ana_level,coincident=1)
# else:
# infiles = file_db.GetFilesFromDB( dataset=int(dataset),analysis_level=self.ana_level)
# infiles = [ str(file.filename) for file in infiles]
# if len(infiles) > 10000:
# self.merge=10
# else:
# self.merge=1
#
# Logger.info("Will submit job for dataset %i with %i infiles, will merge %i files" %(int(dataset),len(infiles),int(self.merge)))
#
# self.fnct_kwargs.update([("merge",int(self.merge))])
# self.fnct_kwargs.update([("dataset",int(dataset))])
# #jobprops = JobPropertiesFactory("long_run_high_mem")
# jobprops = JobProperties()
# jobprops.request_ram(ram)
# jobprops.specify_hcpu(cpu,"59")
# jobid = 0
# if int(dataset) in [6451,6939]:
# # coincident + polygonato corsika needs double processing
# for __ in range(1):
# jobid = ArrayJobFarmer(self.fnct, int(self.merge), infiles, jobprops, jobname + str(dataset), func_kwargs= self.fnct_kwargs,subjobs=subjobs,delay=self.delay)
# self.fnct_kwargs.update([('coincident',True)])
# self.running_jobs.append(jobid)
# Logger.info("Submitted job %i" %(int(jobid)))
#
# else:
# jobid = ArrayJobFarmer(self.fnct, int(self.merge), infiles, jobprops, jobname + str(dataset), func_kwargs= self.fnct_kwargs,subjobs=subjobs,delay=self.delay)
# self.running_jobs.append(jobid)
# Logger.info("Submitted job %i" %(int(jobid)))
#
#
# return jobid
#
# def _getAFSToken(self):
# """
# Keep the job alive and get a new afs token,
# so that it can still write data to disk
# """
# Popen(['kinit', '-R'])
#
#
# #def add_dataset(self,dataset):
# # """
# # Add another dataset to the inline queue
# # """
# # self.datasets.append(dataset)
#
#
# def do_farming(self):
# """
# Submit and surveil all the datasets which are in
# self.datasets
# """
# #dbmanager = DBManager(managertype=self.dbmanagertype)
# Logger.info("Start to process jobs...")
# jobdict = dict()
#
# if not len(self.datasets):
# raise ValueError("No datasets available for farming!")
#
# while True:
#
# time.sleep(30)
# #maxjobsexceeded = False
# # receive new jobs
# #try:
# # ds = self.receive_dataset()
# # self.datasets.append(ds)
# # Logger.info("Received dataset %s to process"%ds.__repr__())
# #except Exception as e:
# # Logger.warning("Caught exception %s" %e.__repr__())
#
# self._getAFSToken()
#
# while len(self.datasets):
# if self.get_active_jobs() > self.maxjobs:
# #maxjobsexceeded = True
# break
#
# thisset = self.datasets.pop()
# jobid = self.submit_dataset(thisset)
# jobdict[jobid] = thisset
# self.running_jobs.append(jobid)
# time.sleep(5)
#
# for job in self.running_jobs:
# if not CheckJobOnFarm(job): # assume job is finished
# errors = dict()
# try:
# errors = PostProcess(job)
# except Exception as e:
# Logger.warn("Caught Exception %s" %e.__repr__())
# Logger.warn("Can not postprocess %s" %job.__repr__())
# newmem = 4000
# newcpu = "47"
# if errors:
# if errors["mem"]:
# newmem = 10000
#
# failed_jobs = errors["mem"] + errors["unfin"] + errors["error"]
# failed_jobs = list(set(failed_jobs))
# if failed_jobs:
# self.submit_dataset(thisset,ram=newmem, cpu=newcpu, subjobs=failed_jobs)
#
# else:
# # cc = False
# # thisset = jobdict[jobid]
# # if int(thisset) in [6451,6939]:
# # cc = True
# # dbmanager.append(thisset,cc)
# self.finished_jobs.append(job)
#
#
#
# #sendercontext = zmq.Context()
# #sendsocket = sendercontext.socket(zmq.REQ)
# #sendsocket.connect("tcp://127.0.0.1:%s" %str(self.notify_on_port))
# #sendsocket.send(str(thisset))
# # ping a another instance here and let it know that we are finished
#
#
# for job in self.finished_jobs:
# if job in self.running_jobs:
# self.running_jobs.remove(job)
#
#
# def shutdown(self):
# pass
#
###################################################
#
#class JobDBProxy(object):
# """
# Connect to a job-table and manage datastream
# """
#
# def __init__(self,dbfile=CONFIG.get("database","jobdb")):
# self.dbfile = dbfile
#
# def _re_initialize_tables(self,force=False):
# connection,cursor = OpenSQLiteDB(self.dbfile)
# if not force:
# raise ValueError("Need to force re-initialization!")
# #curs = SQLite()
# try:
# cursor.execute("DROP TABLE JOBS")
# except sqlite3.OperationalError as e:
# Logger.warning("Got sqlite3 error %s" %e.__repr__())
# cursor.execute("CREATE TABLE JOBS ( id INTEGER PRIMARY KEY AUTOINCREMENT,dataset int,jobid int)")
# CloseSQLiteDB(connection)
#
# def add_job(self,jobid,dataset):
# connection,cursor = OpenSQLiteDB(self.dbfile)
# cursor.execute("INSERT INTO JOBS (dataset,jobid) VALUES (?,?)" %(dataset,jobid))
# CloseSQLiteDB(connection)
#
# def get_job(self,jobid):
# connection,cursor = OpenSQLiteDB(self.dbfile)
# cursor.execute("SELECT * FROM JOBS WHERE jobid=%i" %(jobid))
# data = cursor.fetchall()
# CloseSQLiteDB(connection)
# return data
#
# def get_dataset(self,jobid):
# connection,cursor = OpenSQLiteDB(self.dbfile)
# cursor.execute("SELECT * FROM JOBS WHERE jobid=%i" %(jobid))
# data = cursor.fetchall()
# CloseSQLiteDB(connection)
# return data
#
#
#
#
#
#
#
#
|
gpl-3.0
| -3,701,935,994,679,156,000 | 32.644964 | 181 | 0.52626 | false |
loafbaker/django_ecommerce1
|
orders/migrations/0004_auto__chg_field_order_billing_address__chg_field_order_shipping_addres.py
|
1
|
8620
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Order.billing_address'
db.alter_column(u'orders_order', 'billing_address_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['accounts.UserAddress']))
# Changing field 'Order.shipping_address'
db.alter_column(u'orders_order', 'shipping_address_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['accounts.UserAddress']))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Order.billing_address'
raise RuntimeError("Cannot reverse this migration. 'Order.billing_address' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Order.billing_address'
db.alter_column(u'orders_order', 'billing_address_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.UserAddress']))
# User chose to not deal with backwards NULL issues for 'Order.shipping_address'
raise RuntimeError("Cannot reverse this migration. 'Order.shipping_address' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Order.shipping_address'
db.alter_column(u'orders_order', 'shipping_address_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.UserAddress']))
models = {
u'accounts.useraddress': {
'Meta': {'ordering': "['-updated', '-timestamp']", 'object_name': 'UserAddress'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'billing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'carts.cart': {
'Meta': {'object_name': 'Cart'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'total': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '100', 'decimal_places': '2'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'orders.order': {
'Meta': {'object_name': 'Order'},
'billing_address': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'billing_address'", 'null': 'True', 'to': u"orm['accounts.UserAddress']"}),
'cart': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['carts.Cart']"}),
'final_total': ('django.db.models.fields.DecimalField', [], {'default': "'10.99'", 'max_digits': '1000', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_id': ('django.db.models.fields.CharField', [], {'default': "'ABC'", 'unique': 'True', 'max_length': '120'}),
'shipping_address': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'shipping_address'", 'null': 'True', 'to': u"orm['accounts.UserAddress']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Started'", 'max_length': '120'}),
'sub_total': ('django.db.models.fields.DecimalField', [], {'default': "'10.99'", 'max_digits': '1000', 'decimal_places': '2'}),
'tax_total': ('django.db.models.fields.DecimalField', [], {'default': "'0.99'", 'max_digits': '1000', 'decimal_places': '2'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['orders']
|
mit
| 829,237,142,568,387,700 | 75.973214 | 195 | 0.577842 | false |
theadviceio/executer
|
tests/utils/test_utils_errors.py
|
1
|
2265
|
import sys
import os
import unittest
import tempfile
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)) + "/../../"))
import utils.errors
# logname = "/tmp/test.log"
# print("-----------------------------")
# print((" current logs in %s" % logname))
# print("-----------------------------")
__version__ = 0.1
__author__ = 'weldpua2008@gmail.com'
def get_temp_filename(deleted=False):
file = tempfile.NamedTemporaryFile(delete=deleted, prefix='_rde_logtmp')
new_file_name = file.name
#file.close
return new_file_name
class TestUtilsErrorsClass(unittest.TestCase):
def test_get_runtime_error(self):
logged_string = "sssssssASdAadDASdasD"
with self.assertRaises(RuntimeError):
utils.errors.get_runtime_error(error=logged_string)
def test_get_io_error(self):
logged_string = "get_io_error_sssssssASdAadDASdasD"
with self.assertRaises(IOError):
utils.errors.get_io_error(error=logged_string)
def test_get_notimplemented_error(self):
logged_string = "get_notimplemented_error_sssssssASdAadDASdasD"
with self.assertRaises(NotImplementedError):
utils.errors.get_notimplemented_error(error=logged_string)
def test_get_transition_error(self):
logged_string = "get_transition_error_sssssssASdAadDASdasD"
with self.assertRaises(utils.errors.TransitionError):
utils.errors.get_transition_error(error=logged_string)
def test_get_key_error(self):
logged_string = "get_key_error_error_sssssssASdAadDASdasD"
with self.assertRaises(KeyError):
utils.errors.get_key_error(error=logged_string)
def test_get_type_error(self):
logged_string = "get_type_error_sssssssASdAadDASdasD"
with self.assertRaises(TypeError):
utils.errors.get_type_error(error=logged_string)
def test_get_value_error(self):
logged_string = "get_value_error_sssssssASdAadDASdasD"
with self.assertRaises(ValueError):
utils.errors.get_value_error(error=logged_string)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| 5,571,883,912,347,106,000 | 31.357143 | 89 | 0.628256 | false |
holgerBerger/go_ludalo
|
top/top.py
|
1
|
13095
|
#!/usr/bin/env python
#
# indices used:
#
# use goludalo
# db.<fs>.createIndex({"ts":1, "nid":1})
#
# use ludalo
# db.jobs.createIndex({"start":1})
# db.jobs.createIndex({"end":1})
# db.jobs.createIndex({"jobid":1})
import time,sys
# CONFIG
DBHOST="localhost"
SNAP = 5
PERFDB="goludalo"
JOBDB="ludalo"
JOBCOLLECTION="jobs"
# map filesystems to batchservers to skip some DB queries
batchservermap={
"nobnec":"intern2",
"alnec":"intern3"
}
batchskip=True # set to false if skipping map should not be used
# END CONFIG
import pymongo
# round current time to snap
def getCurrentSnapTime():
return (int(time.time())/SNAP)*SNAP
# nodestats object, represents a node and its IO statistics
class nodestats(object):
def __init__(self, nodename):
self.nodename = nodename
self.miops = 0
self.wiops = 0
self.wbw = 0
self.riops = 0
self.rbw = 0
self.dt = 0
def __repr__(self):
return "%s: [%d,%d,%d,%d,%d]" % (self.nodename, self.miops, self.wiops, self.wbw, self.riops, self.rbw)
# jobstats object, represents a node and its IO statistics
class jobstats(object):
def __init__(self, jobid):
self.jobid = jobid
self.miops = 0
self.wiops = 0
self.wbw = 0
self.riops = 0
self.rbw = 0
self.nodelist=[]
self.start = 0
self.end = 0
self.owner = ""
self.cmd = ""
self.dt = 1
def addnode(self, node):
self.miops += node.miops
self.wiops += node.wiops
self.wbw += node.wbw
self.riops += node.riops
self.rbw += node.rbw
self.dt = node.dt
def __repr__(self):
return "%s: %s [%d,%d,%d,%d,%d]" % (self.jobid, self.owner, self.miops, self.wiops, self.wbw, self.riops, self.rbw)
# filesystem object, containing mongo connections
class filesystem(object):
def __init__(self, server, fsname):
self.fsname = fsname
self.client = pymongo.MongoClient(server)
self.perfdb = self.client[PERFDB]
self.perfcoll = self.perfdb[fsname]
self.jobdb = self.client[JOBDB]
self.jobcoll = self.jobdb[JOBCOLLECTION]
# get latest timestamp, searching 5 minutes in the past
def getLatestTs(self):
latest=self.perfcoll.find({"ts": {"$gt":getCurrentSnapTime()-300}}).sort("ts",pymongo.DESCENDING)[0][u'ts']
return latest
# get entries for a certain timestamp
def getEntries(self, timestamp):
for p in self.perfcoll.find({"ts":timestamp}):
yield p
# get a tuple of current timestamp and a dict of nodestats of all nodes doing IO at the moment
def currentNodesstats(self):
nodes={}
timestamp=self.getLatestTs()
for e in self.getEntries(timestamp):
node = e["nid"]
if node == "aggr": continue
if node not in nodes:
nodes[node]=nodestats(node)
nodes[node].dt = e['dt']
if 'mdt' in e:
nodes[node].miops += e['v']
elif 'ost' in e:
nodes[node].wiops += e['v'][0]
nodes[node].wbw += e['v'][1]
nodes[node].riops += e['v'][2]
nodes[node].rbw += e['v'][3]
return (timestamp, nodes)
# map a dict of nodestats to running jobs at the time
def mapNodesToJobs(self, timestamp, nodes):
# { "_id" : ObjectId("559e8c86580eb358815b87a2"), "end" : 1436430906, "cmd" : "", "jobid" : "659096.intern2-2015", "nids" : "n151001", "start" : 1436425679, "owner" : "ppb742", "calc" : -1 }
# FIXME -1 = running
jobs={}
nidtojob={}
for j in self.jobcoll.find({ "$and" : [ {"end":-1}, {"start": {"$lt":timestamp}} ] }):
jobid=j["jobid"]
if jobid not in jobs:
jobs[jobid]=jobstats(jobid)
for nid in j["nids"].split(","):
nidtojob[nid]=jobid
jobs[jobid].nodelist.append(nid)
jobs[jobid].start = j["start"]
jobs[jobid].end = j["end"]
jobs[jobid].owner = j["owner"]
jobs[jobid].cmd = j["cmd"]
# joblist contains now list of jobs of ALL clusters!!!
fsjobs=set()
for node in nodes:
try:
job = nidtojob[node]
jobs[job].addnode(nodes[node])
if job not in fsjobs:
fsjobs.add(job)
except KeyError:
jobs[node]=jobstats(node)
jobs[node].addnode(nodes[node])
jobs[node].nodelist.append(node)
if node not in fsjobs:
fsjobs.add(node)
localjobs={}
for j in fsjobs:
localjobs[j]=jobs[j]
return localjobs
# get a certain running job
def getOneRunningJob(self, jobid):
j = self.jobcoll.find_one({"jobid":jobid})
if j == None: return None
job=jobstats(jobid)
for nid in j["nids"].split(","):
job.nodelist.append(nid)
job.start = j["start"]
job.end = j["end"]
job.owner = j["owner"]
job.cmd = j["cmd"]
try:
job.cachets = j["cachets"]
job.miops = j["miops"]
job.wiops = j["wiops"]
job.wbw = j["wbw"]
job.riops = j["riops"]
job.rbw = j["rbw"]
except KeyError:
# no cached data for this job
job.cachets = job.start
job.miops = 0
job.wiops = 0
job.wbw = 0
job.riops = 0
job.rbw = 0
return job
# get all running jobs (from all clusters, can not be avoided)
def getRunningJobs(self):
jobs={}
for j in self.jobcoll.find({"end":-1}):
jobid=j["jobid"]
if batchskip and jobid.find(batchservermap[self.fsname])<0:
continue
if jobid not in jobs:
jobs[jobid]=jobstats(jobid)
for nid in j["nids"].split(","):
jobs[jobid].nodelist.append(nid)
jobs[jobid].start = j["start"]
jobs[jobid].end = j["end"]
jobs[jobid].owner = j["owner"]
jobs[jobid].cmd = j["cmd"]
try:
jobs[jobid].cachets = j["cachets"]
jobs[jobid].miops = j["miops"]
jobs[jobid].wiops = j["wiops"]
jobs[jobid].wbw = j["wbw"]
jobs[jobid].riops = j["riops"]
jobs[jobid].rbw = j["rbw"]
except KeyError:
# no cached data for this job
jobs[jobid].cachets = jobs[jobid].start
jobs[jobid].miops = 0
jobs[jobid].wiops = 0
jobs[jobid].wbw = 0
jobs[jobid].riops = 0
jobs[jobid].rbw = 0
return jobs
# go over all jobs in list, and add all stats of nodes in job from start to end
# to it (end==-1 is covered, so can be used for running jobs as well)
def accumulateJobStats(self, jobs):
fsjobs=set()
for j in jobs:
if batchskip and j.find(batchservermap[self.fsname])<0:
continue
if jobs[j].end == -1:
end = int(time.time())
else:
end = jobs[j].end
# we start from cached data, if nothing is cached, this is start
start = jobs[j].cachets
# print "scanning for",end-start, "sec for",j
for e in self.perfcoll.find({"$and": [ {"ts": {"$gt": start}}, {"ts": {"$lt": end}}, {"nid": {"$in": jobs[j].nodelist}} ] }):
node = e["nid"]
if node == "aggr": continue
if 'mdt' in e:
jobs[j].miops += e['v']
elif 'ost' in e:
jobs[j].wiops += e['v'][0]
jobs[j].wbw += e['v'][1]
jobs[j].riops += e['v'][2]
jobs[j].rbw += e['v'][3]
fsjobs.add(j)
# update cache, write cachets, between start and cachets, data was already summed up
# print "update", j
self.jobcoll.update(
{"jobid":j},
{"$set": {
"cachets":end,
"miops":jobs[j].miops,
"wiops":jobs[j].wiops,
"wbw":jobs[j].wbw,
"riops":jobs[j].riops,
"rbw":jobs[j].rbw
} } )
localjobs={}
for j in fsjobs:
localjobs[j]=jobs[j]
return localjobs
def readFSMaxima(self):
self.maxcoll = self.perfdb["fsmaxima"]
e = self.maxcoll.find_one({"fsname": self.fsname})
if e == None:
return [0, 0, 0, 0, 0, 0]
else:
return e["maxima"]
# 0: nodes
# 1: metadata
# 2: wrqs
# 3: rrqs
# 4: wbw
# 5: rbw
def writeFSMaxima(self, maxima):
r = self.maxcoll.update(
{"fsname": self.fsname},
{"$set": {
"maxima": maxima
}
} ,
upsert=True
)
#print r
# get AGGR values for fs from start to end
def getFSvalues(self, start, end):
timelist = {}
for e in self.perfcoll.find({"$and": [ {"ts": {"$gt": start}}, {"ts": {"$lt": end}}, {"nid": "aggr"} ] }):
ts = e["ts"]
if ts not in timelist:
timelist[ts]={}
timelist[ts]["miops"] = 0
timelist[ts]["wiops"] = 0
timelist[ts]["wbw"] = 0
timelist[ts]["riops"] = 0
timelist[ts]["rbw"] = 0
if 'mdt' in e:
timelist[ts]["miops"] += e['v']
elif 'ost' in e:
timelist[ts]["wiops"] += e['v'][0]
timelist[ts]["wbw"] += e['v'][1]
timelist[ts]["riops"] += e['v'][2]
timelist[ts]["rbw"] += e['v'][3]
return timelist
# print TOP like list of jobs, with current rates
def printTopjobs(fsname, key):
fs = filesystem(DBHOST, fsname)
(timestamp, nodes) = fs.currentNodesstats()
print time.ctime(timestamp),"\n"
jobs = fs.mapNodesToJobs(timestamp, nodes)
if key == "meta":
sortf=lambda x: x.miops
elif key == "iops":
sortf=lambda x: x.wiops+x.riops
elif key == "bw":
sortf=lambda x: x.rbw+x.wbw
else:
print "use meta, iops or bw as sorting key"
sys.exit()
print "JOBID OWNER NODES META WRITE WrBW READ ReBW"
print " IOPS IOPS MB/s IOPS MB/s"
print "=================================================================="
for j in sorted(jobs.values(), key=sortf, reverse=True):
dt = float(j.dt)
print "%-10s %-8s %-5s %6d %6d %9.2f %6d %9.2f" % (j.jobid.split(".")[0], j.owner, len(j.nodelist), j.miops/dt, j.wiops/dt, (j.wbw/dt)/1000000.0, j.riops/dt, (j.rbw/dt)/1000000.0)
# print TOP like list of jobs, with absolute values over runtime (sum over time)
def printJobSummary(fsname, key):
fs = filesystem(DBHOST, fsname)
jobs = fs.getRunningJobs()
jobs = fs.accumulateJobStats(jobs)
if key == "meta":
sortf=lambda x: x.miops
elif key == "iops":
sortf=lambda x: x.wiops+x.riops
elif key == "bw":
sortf=lambda x: x.rbw+x.wbw
else:
print "use meta, iops or bw as sorting key"
sys.exit()
print "JOBID OWNER NODES TIME META WRITE WrBW READ ReBW"
print " [H] KIOPS KIOPS [GB] KIOPS [GB]"
print "======================================================================="
for j in sorted(jobs.values(), key=sortf, reverse=True):
print "%-10s %-8s %-5s %4.1f %6d %6d %9.2f %6d %9.2f" % (j.jobid.split(".")[0], j.owner, len(j.nodelist), (time.time()-j.start)/3600, j.miops/1000, j.wiops/1000, j.wbw/1000000000.0, j.riops/1000, j.rbw/1000000000.0)
if __name__ == '__main__':
if len(sys.argv)<4:
print "usage: top.py [sum|top] fsname [meta|iops|bw]"
print " sum: show aggregated values over runtime of active jobs"
print " top: show current values of active jobs"
print
print " meta: sort for metadata operation"
print " iops: sort for iops"
print " bw: sort for bandwidth"
sys.exit(0)
if sys.argv[1]=="top":
printTopjobs(sys.argv[2], sys.argv[3])
if sys.argv[1]=="sum":
printJobSummary(sys.argv[2], sys.argv[3])
|
gpl-2.0
| 666,660,058,061,147,300 | 33.370079 | 223 | 0.485223 | false |
garnachod/classification
|
src/Clasificadores/NodosPalabras/Nodo.py
|
1
|
1286
|
import random
class Nodo(object):
"""docstring for Nodo"""
def __init__(self, palabra):
super(Nodo, self).__init__()
self.nodosConectados = {}
self.palabra = palabra
def setConexo(self, nodo):
#peso = random.random() - 0.5
peso = 0.1
palabra = nodo.getPalabra()
self.nodosConectados[palabra] = peso
#peso = random.random() - 0.5
peso = 0.1
palabra = self.getPalabra()
nodo.nodosConectados[palabra] = peso
def isConexo(self, palabra):
if palabra in self.nodosConectados:
return True
else:
return False
def getPalabra(self):
return self.palabra
def getPeso(self, palabra):
peso = self.nodosConectados[palabra]
return peso
def sumaAlPeso(self, palabra, cantidad):
self.nodosConectados[palabra] += cantidad
def randomizaTodosPesos(self):
for palabra in self.nodosConectados:
self.nodosConectados[palabra] += (random.random() - 0.5) * 0.1
def randomizaProporPesos(self, probabilidad, alpha):
for palabra in self.nodosConectados:
if random.random() <= probabilidad:
self.nodosConectados[palabra] += (random.random() - 0.5) * alpha
def duplica(self):
duplicado = Nodo(self.palabra)
for palabra in self.nodosConectados:
duplicado.nodosConectados[palabra] = float(self.nodosConectados[palabra])
return duplicado
|
apache-2.0
| -4,430,257,125,241,442,300 | 24.72 | 76 | 0.710731 | false |
Minhua722/NMF
|
egs/ar/local/ar_extract_nmf_feats.py
|
1
|
3850
|
#!/usr/bin/env python
import cv2
import numpy as np
import argparse
import math
import pickle
from sklearn.decomposition import PCA
from nmf_support import *
import sys, os
if __name__ == '__main__':
#------------------------------------------------------
# Args parser
#------------------------------------------------------
parser = argparse.ArgumentParser(description='Extract PCA coefficients for each image')
parser.add_argument('--bases_dir', '-base',
action='store', type=str, required=True,
help='directory of bases (eigen vectors)')
parser.add_argument('--exp_id', '-id',
action='store', type=str, required=True,
help='experiment id (related to directory where bases and feats are stored)')
parser.add_argument('--input_dir', '-in',
action='store', type=str, required=True,
help='data dir with a list of image filenames and labels for training (extracted features will also be stored here)')
args = parser.parse_args()
data_dir = args.input_dir.strip('/')
train_list = "%s/train.list" % data_dir
if not os.path.isfile(train_list):
sys.exit(1)
test_sets = []
for set_i in range(2, 14):
test_sets.append("test%d" % set_i)
bases_dir = "%s/%s/bases" % (args.bases_dir.strip('/'), args.exp_id)
bases_pname = "%s/bases.pickle" % bases_dir
if not os.path.isfile(bases_pname):
sys.exit(1)
feats_dir = "%s/%s" % (args.input_dir, args.exp_id)
with open(bases_pname, "rb") as f:
W = pickle.load(f) # each col of W is a basis
D = W.shape[1] # num of bases (feature dimension)
print "%d NMF bases loaded from %s" % (D, bases_pname)
##########################################################################
# Extract training data features
# load img in each col of V
V_raw, img_height, img_width, train_labels = load_data(train_list)
V = normalize_data(V_raw)
train_label_pname = "%s/train_label.pickle" % data_dir
with open(train_label_pname, "wb") as f:
pickle.dump(train_labels, f)
N = V.shape[1]
#train_coefs_pname = "%s/coefs.pickle" % bases_dir
#with open(train_coefs_pname, "rb") as f:
# H = pickle.load(f)
#print H.shape
#assert(H.shape[0] == D and H.shape[1] == N)
# mean and variance normailization for each row
train_feats = np.transpose(np.dot(V.T, W))
train_feats = train_feats - np.mean(train_feats, axis=0).reshape(1, N)
train_feats = train_feats / np.std(train_feats, axis=0).reshape(1, N)
train_feats_pname = "%s/train_feats.pickle" % feats_dir
with open(train_feats_pname, "wb") as f:
pickle.dump(train_feats, f)
#print np.mean(train_feats, axis=0)
#print np.std(train_feats, axis=0)
print "train set nmf feats stored in %s" % train_feats_pname
############################################################################
# Extract test data features
for set_name in test_sets:
test_list = "%s/%s.list" % (data_dir, set_name)
print "Process %s" % test_list
# load img in each col of V
V_raw, img_height, img_width, test_labels = load_data(test_list)
V = normalize_data(V_raw)
test_label_pname = "%s/%s_label.pickle" % (data_dir, set_name)
with open(test_label_pname, "wb") as f:
pickle.dump(test_labels, f)
N = V.shape[1]
print "%d test images of size %dx%d loaded" % (N, img_height, img_width)
test_feats = np.transpose(np.dot(V.T, W)) # each col is nmf feats for one image
assert(test_feats.shape[0] == D and test_feats.shape[1] == N)
# mean and variance normailization for each col
test_feats = test_feats - np.mean(test_feats, axis=0).reshape(1, N)
test_feats = test_feats / np.std(test_feats, axis=0).reshape(1, N)
test_feats_pname = "%s/%s_feats.pickle" % (feats_dir, set_name)
with open(test_feats_pname, "wb") as f:
pickle.dump(test_feats, f)
#print np.mean(test_feats, axis=0)
#print np.std(test_feats, axis=0)
print "%s nmf feats stored in %s" % (set_name, test_feats_pname)
|
apache-2.0
| -5,565,832,709,138,475,000 | 33.070796 | 120 | 0.624675 | false |
Fastcode/NUClearExample
|
nuclear/b.py
|
1
|
3760
|
#!/usr/bin/env python3
import sys
import os
import argparse
import pkgutil
import re
# Don't make .pyc files
sys.dont_write_bytecode = True
# Go and get all the relevant directories and variables from cmake
nuclear_dir = os.path.dirname(os.path.realpath(__file__))
project_dir = os.path.dirname(nuclear_dir)
# Get the tools directories to find b modules
nuclear_tools_path = os.path.join(nuclear_dir, 'tools')
user_tools_path = os.path.join(project_dir, 'tools')
# Build our cmake cache
cmake_cache = {}
# Try to find our cmake cache file in the pwd
if os.path.isfile('CMakeCache.txt'):
with open('CMakeCache.txt', 'r') as f:
cmake_cache_text = f.readlines()
# Look for a build directory
else:
dirs = ['build']
try:
dirs.extend([os.path.join('build', f) for f in os.listdir('build')])
except FileNotFoundError:
pass
for d in dirs:
if os.path.isfile(os.path.join(project_dir, d, 'CMakeCache.txt')):
with open(os.path.join(project_dir, d, 'CMakeCache.txt'), 'r') as f:
cmake_cache_text = f.readlines()
break;
# If we still didn't find anything
try:
cmake_cache_text
except NameError:
cmake_cache_text = []
# Go and process our lines in our cmake file
for l in cmake_cache_text:
# Remove whitespace at the ends and start
l = l.strip()
# Remove lines that are comments
if len(l) > 0 and not l.startswith('//') and not l.startswith('#'):
# Extract our variable name from our values
g = re.match(r'([a-zA-Z_$][a-zA-Z_.$0-9-]*):(\w+)=(.*)', l).groups()
# Store our value and split it into a list if it is a list
cmake_cache[g[0]] = g[2] if ';' not in g[2].strip(';') else g[2].strip(';').split(';');
# Try to find our source and binary directories
try:
binary_dir = cmake_cache[cmake_cache["CMAKE_PROJECT_NAME"] + '_BINARY_DIR']
except KeyError:
binary_dir = None
try:
source_dir = cmake_cache[cmake_cache["CMAKE_PROJECT_NAME"] + '_SOURCE_DIR']
except:
source_dir = project_dir
if __name__ == "__main__":
if (binary_dir is not None):
# Print some information for the user
print("b script for", cmake_cache["CMAKE_PROJECT_NAME"])
print("\tSource:", cmake_cache[cmake_cache["CMAKE_PROJECT_NAME"] + '_SOURCE_DIR'])
print("\tBinary:", cmake_cache[cmake_cache["CMAKE_PROJECT_NAME"] + '_BINARY_DIR'])
print()
# Add our builtin tools to the path and user tools
sys.path.append(nuclear_tools_path)
sys.path.append(user_tools_path)
# Root parser information
command = argparse.ArgumentParser(description='This script is an optional helper script for performing common tasks for working with the NUClear roles system.')
subcommands = command.add_subparsers(dest='command')
subcommands.help = "The command to run from the script. See each help for more information."
# Get all of the packages that are in the build tools
modules = pkgutil.iter_modules(path=[nuclear_tools_path, user_tools_path])
# Our tools dictionary
tools = {}
# Loop through all the modules we have to set them up in the parser
for loader, module_name, ispkg in modules:
# Get our module, class name and registration function
module = loader.find_module(module_name).load_module(module_name)
tool = getattr(module, 'run')
register = getattr(module, 'register')
# Let the tool register it's arguments
register(subcommands.add_parser(module_name))
# Associate our module_name with this tool
tools[module_name] = tool
# Parse our arguments
args = command.parse_args()
# Pass to our tool
tools[args.command](**vars(args))
|
mit
| 3,648,145,695,033,993,000 | 31.695652 | 164 | 0.651064 | false |
drimer/NetControl
|
webconf/settings.py
|
1
|
2783
|
"""
Django settings for NetControl project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sfc3xmpqskk2&^)kiybw41es_w+2m_z8suv57&6j6+fzl1o@r^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'webapp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'webconf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'webapp.template_context.javascript.templates',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webconf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'client'),
)
|
gpl-2.0
| -5,218,627,443,829,308,000 | 25.009346 | 71 | 0.688825 | false |
yrobla/nova
|
nova/tests/test_libvirt.py
|
1
|
220373
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import eventlet
import fixtures
import json
import mox
import os
import re
import shutil
import tempfile
from lxml import etree
from oslo.config import cfg
from xml.dom import minidom
from nova.api.ec2 import cloud
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_libvirt_utils
from nova.tests import fake_network
import nova.tests.image.fake
from nova.tests import matchers
from nova import utils
from nova import version
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
try:
import libvirt
except ImportError:
import nova.tests.fakelibvirt as libvirt
libvirt_driver.libvirt = libvirt
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('base_dir_name', 'nova.virt.libvirt.imagecache')
LOG = logging.getLogger(__name__)
_fake_network_info = fake_network.fake_get_instance_nw_info
_fake_stub_out_get_nw_info = fake_network.stub_out_nw_api_get_instance_nw_info
_ipv4_like = fake_network.ipv4_like
def _concurrency(signal, wait, done, target):
signal.send()
wait.wait()
done.send()
class FakeVirDomainSnapshot(object):
def __init__(self, dom=None):
self.dom = dom
def delete(self, flags):
pass
class FakeVirtDomain(object):
def __init__(self, fake_xml=None, uuidstr=None):
self.uuidstr = uuidstr
if fake_xml:
self._fake_dom_xml = fake_xml
else:
self._fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
def name(self):
return "fake-domain %s" % self
def info(self):
return [power_state.RUNNING, None, None, None, None]
def create(self):
pass
def managedSave(self, *args):
pass
def createWithFlags(self, launch_flags):
pass
def XMLDesc(self, *args):
return self._fake_dom_xml
def UUIDString(self):
return self.uuidstr
class CacheConcurrencyTestCase(test.TestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
# utils.synchronized() will create the lock_path for us if it
# doesn't already exist. It will also delete it when it's done,
# which can cause race conditions with the multiple threads we
# use for tests. So, create the path here so utils.synchronized()
# won't delete it out from under one of the threads.
self.lock_path = os.path.join(CONF.instances_path, 'locks')
fileutils.ensure_tree(self.lock_path)
def fake_exists(fname):
basedir = os.path.join(CONF.instances_path, CONF.base_dir_name)
if fname == basedir or fname == self.lock_path:
return True
return False
def fake_execute(*args, **kwargs):
pass
def fake_extend(image, size):
pass
self.stubs.Set(os.path, 'exists', fake_exists)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(imagebackend.disk, 'extend', fake_extend)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def test_same_fname_concurrency(self):
# Ensures that the same fname cache runs at a sequentially.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
'name').cache,
_concurrency, 'fname', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
'name').cache,
_concurrency, 'fname', None,
signal=sig2, wait=wait2, done=done2)
wait2.send()
eventlet.sleep(0)
try:
self.assertFalse(done2.ready())
finally:
wait1.send()
done1.wait()
eventlet.sleep(0)
self.assertTrue(done2.ready())
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
def test_different_fname_concurrency(self):
# Ensures that two different fname caches are concurrent.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
'name').cache,
_concurrency, 'fname2', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
'name').cache,
_concurrency, 'fname1', None,
signal=sig2, wait=wait2, done=done2)
eventlet.sleep(0)
# Wait for thread 2 to start.
sig2.wait()
wait2.send()
eventlet.sleep(0)
try:
self.assertTrue(done2.ready())
finally:
wait1.send()
eventlet.sleep(0)
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
class FakeVolumeDriver(object):
def __init__(self, *args, **kwargs):
pass
def attach_volume(self, *args):
pass
def detach_volume(self, *args):
pass
def get_xml(self, *args):
return ""
class FakeConfigGuestDisk(object):
def __init__(self, *args, **kwargs):
self.source_type = None
self.driver_cache = None
class FakeConfigGuest(object):
def __init__(self, *args, **kwargs):
self.driver_cache = None
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
self.flags(fake_call=True)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.get_admin_context()
self.flags(instances_path='')
self.flags(libvirt_snapshots_directory='')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def fake_extend(image, size):
pass
self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
instance_type = db.instance_type_get(self.context, 5)
sys_meta = instance_types.save_instance_type_info({}, instance_type)
nova.tests.image.fake.stub_out_image_service(self.stubs)
self.test_instance = {
'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
'instance_type_id': '5', # m1.small
'extra_specs': {},
'system_metadata': sys_meta}
def tearDown(self):
nova.tests.image.fake.FakeImageService_reset()
super(LibvirtConnTestCase, self).tearDown()
def create_fake_libvirt_mock(self, **kwargs):
"""Defining mocks for LibvirtDriver(libvirt is not used)."""
# A fake libvirt.virConnect
class FakeLibvirtDriver(object):
def defineXML(self, xml):
return FakeVirtDomain()
# Creating mocks
volume_driver = 'iscsi=nova.tests.test_libvirt.FakeVolumeDriver'
self.flags(libvirt_volume_drivers=[volume_driver])
fake = FakeLibvirtDriver()
# Customizing above fake if necessary
for key, val in kwargs.items():
fake.__setattr__(key, val)
self.flags(libvirt_vif_driver="nova.tests.fake_network.FakeVIFDriver")
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn = fake
def fake_lookup(self, instance_name):
return FakeVirtDomain()
def fake_execute(self, *args, **kwargs):
open(args[-1], "a").close()
def create_service(self, **kwargs):
service_ref = {'host': kwargs.get('host', 'dummy'),
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0}
return db.service_create(context.get_admin_context(), service_ref)
def test_get_connector(self):
initiator = 'fake.initiator.iqn'
ip = 'fakeip'
host = 'fakehost'
wwpns = ['100010604b019419']
wwnns = ['200010604b019419']
self.flags(my_ip=ip)
self.flags(host=host)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
expected = {
'ip': ip,
'initiator': initiator,
'host': host,
'wwpns': wwpns,
'wwnns': wwnns
}
volume = {
'id': 'fake'
}
result = conn.get_volume_connector(volume)
self.assertThat(expected, matchers.DictMatches(result))
def test_get_guest_config(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(cfg.acpi, True)
self.assertEquals(cfg.apic, True)
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
self.assertEquals(cfg.vcpus, 1)
self.assertEquals(cfg.os_type, vm_mode.HVM)
self.assertEquals(cfg.os_boot_dev, "hd")
self.assertEquals(cfg.os_root, None)
self.assertEquals(len(cfg.devices), 7)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestInterface)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[6]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(type(cfg.clock),
vconfig.LibvirtConfigGuestClock)
self.assertEquals(cfg.clock.offset, "utc")
self.assertEquals(len(cfg.clock.timers), 2)
self.assertEquals(type(cfg.clock.timers[0]),
vconfig.LibvirtConfigGuestTimer)
self.assertEquals(type(cfg.clock.timers[1]),
vconfig.LibvirtConfigGuestTimer)
self.assertEquals(cfg.clock.timers[0].name, "pit")
self.assertEquals(cfg.clock.timers[0].tickpolicy,
"delay")
self.assertEquals(cfg.clock.timers[1].name, "rtc")
self.assertEquals(cfg.clock.timers[1].tickpolicy,
"catchup")
def test_get_guest_config_with_two_nics(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 2),
None, disk_info)
self.assertEquals(cfg.acpi, True)
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
self.assertEquals(cfg.vcpus, 1)
self.assertEquals(cfg.os_type, vm_mode.HVM)
self.assertEquals(cfg.os_boot_dev, "hd")
self.assertEquals(cfg.os_root, None)
self.assertEquals(len(cfg.devices), 8)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestInterface)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestInterface)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[6]),
vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[7]),
vconfig.LibvirtConfigGuestGraphics)
def test_get_guest_config_bug_1118829(self):
self.flags(libvirt_type='uml')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = {'disk_bus': 'virtio',
'cdrom_bus': 'ide',
'mapping': {u'vda': {'bus': 'virtio',
'type': 'disk',
'dev': u'vda'},
'root': {'bus': 'virtio',
'type': 'disk',
'dev': 'vda'}}}
# NOTE(jdg): For this specific test leave this blank
# This will exercise the failed code path still,
# and won't require fakes and stubs of the iscsi discovery
block_device_info = {}
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
None, block_device_info)
instance_ref = db.instance_get(self.context, instance_ref['id'])
self.assertEquals(instance_ref['root_device_name'], '/dev/vda')
def test_get_guest_config_with_root_device_name(self):
self.flags(libvirt_type='uml')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
block_device_info = {'root_device_name': '/dev/vdb'}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref,
block_device_info)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
None, block_device_info)
self.assertEquals(cfg.acpi, False)
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
self.assertEquals(cfg.vcpus, 1)
self.assertEquals(cfg.os_type, "uml")
self.assertEquals(cfg.os_boot_dev, None)
self.assertEquals(cfg.os_root, '/dev/vdb')
self.assertEquals(len(cfg.devices), 3)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_with_block_device(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': [
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref, info)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
None, info)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[2].target_dev, 'vdc')
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[3].target_dev, 'vdd')
def test_get_guest_config_with_configdrive(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
# make configdrive.enabled_for() return True
instance_ref['config_drive'] = 'ANY_ID'
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[2].target_dev, 'hdd')
def test_get_guest_config_with_vnc(self):
self.flags(libvirt_type='kvm',
vnc_enabled=True,
use_usb_tablet=False)
self.flags(enabled=False, group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 5)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(cfg.devices[4].type, "vnc")
def test_get_guest_config_with_vnc_and_tablet(self):
self.flags(libvirt_type='kvm',
vnc_enabled=True,
use_usb_tablet=True)
self.flags(enabled=False, group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 6)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(cfg.devices[4].type, "tablet")
self.assertEquals(cfg.devices[5].type, "vnc")
def test_get_guest_config_with_spice_and_tablet(self):
self.flags(libvirt_type='kvm',
vnc_enabled=False,
use_usb_tablet=True)
self.flags(enabled=True,
agent_enabled=False,
group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 6)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(cfg.devices[4].type, "tablet")
self.assertEquals(cfg.devices[5].type, "spice")
def test_get_guest_config_with_spice_and_agent(self):
self.flags(libvirt_type='kvm',
vnc_enabled=False,
use_usb_tablet=True)
self.flags(enabled=True,
agent_enabled=True,
group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 6)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestChannel)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(cfg.devices[4].target_name, "com.redhat.spice.0")
self.assertEquals(cfg.devices[5].type, "spice")
def test_get_guest_config_with_vnc_and_spice(self):
self.flags(libvirt_type='kvm',
vnc_enabled=True,
use_usb_tablet=True)
self.flags(enabled=True,
agent_enabled=True,
group='spice')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 8)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[3]),
vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[5]),
vconfig.LibvirtConfigGuestChannel)
self.assertEquals(type(cfg.devices[6]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(type(cfg.devices[7]),
vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(cfg.devices[4].type, "tablet")
self.assertEquals(cfg.devices[5].target_name, "com.redhat.spice.0")
self.assertEquals(cfg.devices[6].type, "vnc")
self.assertEquals(cfg.devices[7].type, "spice")
def test_get_guest_cpu_config_none(self):
self.flags(libvirt_cpu_mode="none")
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(conf.cpu, None)
def test_get_guest_cpu_config_default_kvm(self):
self.flags(libvirt_type="kvm",
libvirt_cpu_mode=None)
def get_lib_version_stub(self):
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-model")
self.assertEquals(conf.cpu.model, None)
def test_get_guest_cpu_config_default_uml(self):
self.flags(libvirt_type="uml",
libvirt_cpu_mode=None)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(conf.cpu, None)
def test_get_guest_cpu_config_default_lxc(self):
self.flags(libvirt_type="lxc",
libvirt_cpu_mode=None)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(conf.cpu, None)
def test_get_guest_cpu_config_host_passthrough_new(self):
def get_lib_version_stub(self):
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-passthrough")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-passthrough")
self.assertEquals(conf.cpu.model, None)
def test_get_guest_cpu_config_host_model_new(self):
def get_lib_version_stub(self):
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-model")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-model")
self.assertEquals(conf.cpu.model, None)
def test_get_guest_cpu_config_custom_new(self):
def get_lib_version_stub(self):
return (0 * 1000 * 1000) + (9 * 1000) + 11
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="custom")
self.flags(libvirt_cpu_model="Penryn")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "custom")
self.assertEquals(conf.cpu.model, "Penryn")
def test_get_guest_cpu_config_host_passthrough_old(self):
def get_lib_version_stub(self):
return (0 * 1000 * 1000) + (9 * 1000) + 7
self.stubs.Set(libvirt.virConnect, "getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-passthrough")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
self.assertRaises(exception.NovaException,
conn.get_guest_config,
instance_ref,
_fake_network_info(self.stubs, 1),
None,
disk_info)
def test_get_guest_cpu_config_host_model_old(self):
def get_lib_version_stub(self):
return (0 * 1000 * 1000) + (9 * 1000) + 7
# Ensure we have a predictable host CPU
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
cpu.features.append(vconfig.LibvirtConfigGuestCPUFeature("tm2"))
cpu.features.append(vconfig.LibvirtConfigGuestCPUFeature("ht"))
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
self.stubs.Set(libvirt_driver.LibvirtDriver,
"get_host_capabilities",
get_host_capabilities_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-model")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, None)
self.assertEquals(conf.cpu.model, "Opteron_G4")
self.assertEquals(conf.cpu.vendor, "AMD")
self.assertEquals(len(conf.cpu.features), 2)
self.assertEquals(conf.cpu.features[0].name, "tm2")
self.assertEquals(conf.cpu.features[1].name, "ht")
def test_get_guest_cpu_config_custom_old(self):
def get_lib_version_stub(self):
return (0 * 1000 * 1000) + (9 * 1000) + 7
self.stubs.Set(libvirt.virConnect,
"getLibVersion",
get_lib_version_stub)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="custom")
self.flags(libvirt_cpu_model="Penryn")
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, None)
self.assertEquals(conf.cpu.model, "Penryn")
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.HVM})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=True)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.XEN})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=False,
xen_only=True)
def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False)
def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True)
def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=False, rescue=instance_data)
def test_xml_uuid(self):
self._check_xml_and_uuid({"disk_format": "raw"})
def test_lxc_container_and_uri(self):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
def test_xml_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data)
def test_xml_user_specified_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data, 'sd')
def test_xml_disk_driver(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_driver(instance_data)
def test_xml_disk_bus_virtio(self):
self._check_xml_and_disk_bus({"disk_format": "raw"},
None,
(("disk", "virtio", "vda"),))
def test_xml_disk_bus_ide(self):
self._check_xml_and_disk_bus({"disk_format": "iso"},
None,
(("cdrom", "ide", "hda"),))
def test_xml_disk_bus_ide_and_virtio(self):
swap = {'device_name': '/dev/vdc',
'swap_size': 1}
ephemerals = [{'num': 0,
'virtual_name': 'ephemeral0',
'device_name': '/dev/vdb',
'size': 1}]
block_device_info = {
'swap': swap,
'ephemerals': ephemerals}
self._check_xml_and_disk_bus({"disk_format": "iso"},
block_device_info,
(("cdrom", "ide", "hda"),
("disk", "virtio", "vdb"),
("disk", "virtio", "vdc")))
def test_list_instances(self):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByID = self.fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 2
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instances()
# Only one should be listed, since domain with ID 0 must be skiped
self.assertEquals(len(instances), 1)
def test_list_defined_instances(self):
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByID = self.fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 1
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0]
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: [1]
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instances()
# Only one defined domain should be listed
self.assertEquals(len(instances), 1)
def test_list_instances_when_instance_deleted(self):
def fake_lookup(instance_name):
raise libvirt.libvirtError("we deleted an instance!")
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 1
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instances = conn.list_instances()
# None should be listed, since we fake deleted the last one
self.assertEquals(len(instances), 0)
def test_get_all_block_devices(self):
xml = [
# NOTE(vish): id 0 is skipped
None,
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/3'/>
</disk>
</devices>
</domain>
""",
]
def fake_lookup(id):
return FakeVirtDomain(xml[id])
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 4
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: range(4)
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = conn.get_all_block_devices()
self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
def test_get_disks(self):
xml = [
# NOTE(vish): id 0 is skipped
None,
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/3'/>
<target dev='vdb' bus='virtio'/>
</disk>
</devices>
</domain>
""",
]
def fake_lookup(id):
return FakeVirtDomain(xml[id])
def fake_lookup_name(name):
return FakeVirtDomain(xml[1])
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 4
libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: range(4)
libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = conn.get_disks(conn.list_instances()[0])
self.assertEqual(devices, ['vda', 'vdb'])
def test_snapshot_in_ami_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for testing ami
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
libvirt_driver.libvirt_utils.disk_type = "qcow2"
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'ami')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_ami_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for testing ami
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
libvirt_driver.libvirt_utils.disk_type = "qcow2"
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'ami')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_in_raw_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.stubs.Set(libvirt_driver.libvirt_utils, 'disk_type', 'raw')
def convert_image(source, dest, out_format):
libvirt_driver.libvirt_utils.files[dest] = ''
self.stubs.Set(images, 'convert_image', convert_image)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'raw')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_raw_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.stubs.Set(libvirt_driver.libvirt_utils, 'disk_type', 'raw')
def convert_image(source, dest, out_format):
libvirt_driver.libvirt_utils.files[dest] = ''
self.stubs.Set(images, 'convert_image', convert_image)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'raw')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_in_qcow2_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(snapshot_image_format='qcow2',
libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
libvirt_driver.libvirt_utils.disk_type = "qcow2"
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'qcow2')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_in_qcow2_format(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(snapshot_image_format='qcow2',
libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, self.test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
libvirt_driver.libvirt_utils.disk_type = "qcow2"
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['disk_format'], 'qcow2')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_no_image_architecture(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for
# testing different base image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_no_image_architecture(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign different image_ref from nova/images/fakes for
# testing different base image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# Assuming that base image already exists in image_service
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_snapshot_no_original_image(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign a non-existent image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_lxc_snapshot_no_original_image(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
self.flags(libvirt_snapshots_directory='./',
libvirt_type='lxc')
# Start test
image_service = nova.tests.image.fake.FakeImageService()
# Assign a non-existent image
test_instance = copy.deepcopy(self.test_instance)
test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
libvirt_driver.utils.execute = self.fake_execute
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(False)
conn.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
self.assertIsNone(func_call_matcher.match())
self.assertEquals(snapshot['properties']['image_state'], 'available')
self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name)
def test_attach_invalid_volume_type(self):
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
{"driver_volume_type": "badtype"},
{"name": "fake-instance"},
"/dev/sda")
def test_multi_nic(self):
instance_data = dict(self.test_instance)
network_info = _fake_network_info(self.stubs, 2)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, instance_data)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = conn.to_xml(instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
interfaces = tree.findall("./devices/interface")
self.assertEquals(len(interfaces), 2)
self.assertEquals(interfaces[0].get('type'), 'bridge')
def _check_xml_and_container(self, instance):
user_context = context.RequestContext(self.user_id,
self.project_id)
instance_ref = db.instance_create(user_context, instance)
self.flags(libvirt_type='lxc')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEquals(conn.uri(), 'lxc:///')
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = conn.to_xml(instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
check = [
(lambda t: t.find('.').get('type'), 'lxc'),
(lambda t: t.find('./os/type').text, 'exe'),
(lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
for i, (check, expected_result) in enumerate(check):
self.assertEqual(check(tree),
expected_result,
'%s failed common check %d' % (xml, i))
target = tree.find('./devices/filesystem/source').get('dir')
self.assertTrue(len(target) > 0)
def _check_xml_and_disk_prefix(self, instance, prefix=None):
user_context = context.RequestContext(self.user_id,
self.project_id)
instance_ref = db.instance_create(user_context, instance)
def _get_prefix(p, default):
if p:
return p + 'a'
return default
type_disk_map = {
'qemu': [
(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'xen': [
(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'sda'))],
'kvm': [
(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'uml': [
(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'ubda'))]
}
for (libvirt_type, checks) in type_disk_map.iteritems():
self.flags(libvirt_type=libvirt_type)
if prefix:
self.flags(libvirt_disk_prefix=prefix)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = conn.to_xml(instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
def _check_xml_and_disk_driver(self, image_meta):
os_open = os.open
directio_supported = True
def os_open_stub(path, flags, *args, **kwargs):
if flags & os.O_DIRECT:
if not directio_supported:
raise OSError(errno.EINVAL,
'%s: %s' % (os.strerror(errno.EINVAL), path))
flags &= ~os.O_DIRECT
return os_open(path, flags, *args, **kwargs)
self.stubs.Set(os, 'open', os_open_stub)
def connection_supports_direct_io_stub(*args, **kwargs):
return directio_supported
self.stubs.Set(libvirt_driver.LibvirtDriver,
'_supports_direct_io', connection_supports_direct_io_stub)
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
self.assertEqual(disk.get("cache"), "none")
directio_supported = False
# The O_DIRECT availability is cached on first use in
# LibvirtDriver, hence we re-create it here
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
self.assertEqual(disk.get("cache"), "writethrough")
def _check_xml_and_disk_bus(self, image_meta,
block_device_info, wantConfig):
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref,
block_device_info,
image_meta)
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta,
block_device_info=block_device_info)
tree = etree.fromstring(xml)
got_disks = tree.findall('./devices/disk')
got_disk_targets = tree.findall('./devices/disk/target')
for i in range(len(wantConfig)):
want_device_type = wantConfig[i][0]
want_device_bus = wantConfig[i][1]
want_device_dev = wantConfig[i][2]
got_device_type = got_disks[i].get('device')
got_device_bus = got_disk_targets[i].get('bus')
got_device_dev = got_disk_targets[i].get('dev')
self.assertEqual(got_device_type, want_device_type)
self.assertEqual(got_device_bus, want_device_bus)
self.assertEqual(got_device_dev, want_device_dev)
def _check_xml_and_uuid(self, image_meta):
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref)
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
self.assertEqual(tree.find('./uuid').text,
instance_ref['uuid'])
def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel,
rescue=None, expect_xen_hvm=False, xen_only=False):
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, instance)
network_ref = db.project_get_networks(context.get_admin_context(),
self.project_id)[0]
type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'kvm': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'uml': ('uml:///system',
[(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./os/type').text,
vm_mode.UML)]),
'xen': ('xen:///',
[(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./os/type').text,
vm_mode.XEN)])}
if expect_xen_hvm or xen_only:
hypervisors_to_check = ['xen']
else:
hypervisors_to_check = ['qemu', 'kvm', 'xen']
if expect_xen_hvm:
type_uri_map = {}
type_uri_map['xen'] = ('xen:///',
[(lambda t: t.find('.').get('type'),
'xen'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM)])
for hypervisor_type in hypervisors_to_check:
check_list = type_uri_map[hypervisor_type][1]
if rescue:
suffix = '.rescue'
else:
suffix = ''
if expect_kernel:
check = (lambda t: t.find('./os/kernel').text.split(
'/')[1], 'kernel' + suffix)
else:
check = (lambda t: t.find('./os/kernel'), None)
check_list.append(check)
# Hypervisors that only support vm_mode.HVM should
# not produce configuration that results in kernel
# arguments
if not expect_kernel and hypervisor_type in ['qemu', 'kvm']:
check = (lambda t: t.find('./os/root'), None)
check_list.append(check)
check = (lambda t: t.find('./os/cmdline'), None)
check_list.append(check)
if expect_ramdisk:
check = (lambda t: t.find('./os/initrd').text.split(
'/')[1], 'ramdisk' + suffix)
else:
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
xpath = "./sysinfo/system/entry"
check = (lambda t: t.findall(xpath)[0].get("name"),
"manufacturer")
check_list.append(check)
check = (lambda t: t.findall(xpath)[0].text,
version.vendor_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].get("name"),
"product")
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].text,
version.product_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[2].get("name"),
"version")
check_list.append(check)
# NOTE(sirp): empty strings don't roundtrip in lxml (they are
# converted to None), so we need an `or ''` to correct for that
check = (lambda t: t.findall(xpath)[2].text or '',
version.version_string_with_package())
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].get("name"),
"serial")
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].text,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].get("name"),
"uuid")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].text,
instance['uuid'])
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
check = (lambda t: t.findall('./devices/serial')[0].get(
'type'), 'file')
check_list.append(check)
check = (lambda t: t.findall('./devices/serial')[1].get(
'type'), 'pty')
check_list.append(check)
check = (lambda t: t.findall('./devices/serial/source')[0].get(
'path').split('/')[1], 'console.log')
check_list.append(check)
else:
check = (lambda t: t.find('./devices/console').get(
'type'), 'pty')
check_list.append(check)
common_checks = [
(lambda t: t.find('.').tag, 'domain'),
(lambda t: t.find('./memory').text, '2097152')]
if rescue:
common_checks += [
(lambda t: t.findall('./devices/disk/source')[0].get(
'file').split('/')[1], 'disk.rescue'),
(lambda t: t.findall('./devices/disk/source')[1].get(
'file').split('/')[1], 'disk')]
else:
common_checks += [(lambda t: t.findall(
'./devices/disk/source')[0].get('file').split('/')[1],
'disk')]
common_checks += [(lambda t: t.findall(
'./devices/disk/source')[1].get('file').split('/')[1],
'disk.local')]
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEquals(conn.uri(), expected_uri)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref,
rescue=rescue)
xml = conn.to_xml(instance_ref, network_info, disk_info,
rescue=rescue)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
for i, (check, expected_result) in enumerate(common_checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed common check %d' %
(check(tree), expected_result, i))
filterref = './devices/interface/filterref'
(network, mapping) = network_info[0]
nic_id = mapping['mac'].replace(':', '')
fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(), conn)
instance_filter_name = fw._instance_filter_name(instance_ref,
nic_id)
self.assertEqual(tree.find(filterref).get('filter'),
instance_filter_name)
# This test is supposed to make sure we don't
# override a specifically set uri
#
# Deliberately not just assigning this string to CONF.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the CONF.
testuri = 'something completely different'
self.flags(libvirt_uri=testuri)
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
self.flags(libvirt_type=libvirt_type)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEquals(conn.uri(), testuri)
db.instance_destroy(user_context, instance_ref['uuid'])
def test_ensure_filtering_rules_for_instance_timeout(self):
# ensure_filtering_fules_for_instance() finishes with timeout.
# Preparing mocks
def fake_none(self, *args):
return
def fake_raise(self):
raise libvirt.libvirtError('ERR')
class FakeTime(object):
def __init__(self):
self.counter = 0
def sleep(self, t):
self.counter += t
fake_timer = FakeTime()
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock()
instance_ref = db.instance_create(self.context, self.test_instance)
# Start test
self.mox.ReplayAll()
try:
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(conn.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(conn.firewall_driver,
'instance_filter_exists',
fake_none)
conn.ensure_filtering_rules_for_instance(instance_ref,
network_info,
time_module=fake_timer)
except exception.NovaException, e:
msg = ('The firewall filter for %s does not exist' %
instance_ref['name'])
c1 = (0 <= str(e).find(msg))
self.assertTrue(c1)
self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
"amount of time")
db.instance_destroy(self.context, instance_ref['uuid'])
def test_check_can_live_migrate_dest_all_pass_with_block_migration(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
self.mox.StubOutWithMock(conn, '_compare_cpu')
# _check_cpu_match
conn._compare_cpu("asdf")
# mounted_on_same_shared_storage
conn._create_shared_storage_test_file().AndReturn(filename)
self.mox.ReplayAll()
return_value = conn.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, True)
self.assertThat({"filename": "file",
'disk_available_mb': 409600,
"disk_over_commit": False,
"block_migration": True},
matchers.DictMatches(return_value))
def test_check_can_live_migrate_dest_all_pass_no_block_migration(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
filename = "file"
self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
self.mox.StubOutWithMock(conn, '_compare_cpu')
# _check_cpu_match
conn._compare_cpu("asdf")
# mounted_on_same_shared_storage
conn._create_shared_storage_test_file().AndReturn(filename)
self.mox.ReplayAll()
return_value = conn.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, False)
self.assertThat({"filename": "file",
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": None},
matchers.DictMatches(return_value))
def test_check_can_live_migrate_dest_incompatible_cpu_raises(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
self.mox.StubOutWithMock(conn, '_compare_cpu')
conn._compare_cpu("asdf").AndRaise(exception.InvalidCPUInfo(
reason='foo')
)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidCPUInfo,
conn.check_can_live_migrate_destination,
self.context, instance_ref,
compute_info, compute_info, False)
def test_check_can_live_migrate_dest_cleanup_works_correctly(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": True,
"disk_over_commit": False,
"disk_available_mb": 1024}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, '_cleanup_shared_storage_test_file')
conn._cleanup_shared_storage_test_file("file")
self.mox.ReplayAll()
conn.check_can_live_migrate_destination_cleanup(self.context,
dest_check_data)
def test_check_can_live_migrate_source_works_correctly(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": True,
"disk_over_commit": False,
"disk_available_mb": 1024}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.StubOutWithMock(conn, "_assert_dest_node_has_enough_disk")
conn._assert_dest_node_has_enough_disk(
self.context, instance_ref, dest_check_data['disk_available_mb'],
False)
self.mox.ReplayAll()
conn.check_can_live_migrate_source(self.context, instance_ref,
dest_check_data)
def test_check_can_live_migrate_source_vol_backed_works_correctly(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": 1024,
"is_volume_backed": True}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.ReplayAll()
ret = conn.check_can_live_migrate_source(self.context, instance_ref,
dest_check_data)
self.assertTrue(type(ret) == dict)
self.assertTrue('is_shared_storage' in ret)
def test_check_can_live_migrate_source_vol_backed_fails(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": 1024,
"is_volume_backed": False}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
conn.check_can_live_migrate_source, self.context,
instance_ref, dest_check_data)
def test_check_can_live_migrate_dest_fail_shared_storage_with_blockm(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": True,
"disk_over_commit": False,
'disk_available_mb': 1024}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidLocalStorage,
conn.check_can_live_migrate_source,
self.context, instance_ref, dest_check_data)
def test_check_can_live_migrate_no_shared_storage_no_blck_mig_raises(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": False,
"disk_over_commit": False,
'disk_available_mb': 1024}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
conn.check_can_live_migrate_source,
self.context, instance_ref, dest_check_data)
def test_check_can_live_migrate_source_with_dest_not_enough_disk(self):
instance_ref = db.instance_create(self.context, self.test_instance)
dest = "fake_host_2"
src = instance_ref['host']
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
conn._check_shared_storage_test_file("file").AndReturn(False)
self.mox.StubOutWithMock(conn, "get_instance_disk_info")
conn.get_instance_disk_info(instance_ref["name"]).AndReturn(
'[{"virt_disk_size":2}]')
dest_check_data = {"filename": "file",
"disk_available_mb": 0,
"block_migration": True,
"disk_over_commit": False}
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,
conn.check_can_live_migrate_source,
self.context, instance_ref, dest_check_data)
def test_live_migration_raises_exception(self):
# Confirms recover method is called when exceptions are raised.
# Preparing data
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = {'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE}
instance_ref = db.instance_create(self.context, self.test_instance)
instance_ref = db.instance_update(self.context, instance_ref['uuid'],
instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
_bandwidth = CONF.live_migration_bandwidth
vdmock.migrateToURI(CONF.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(libvirt.libvirtError('ERR'))
def fake_lookup(instance_name):
if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
self.compute._rollback_live_migration(self.context, instance_ref,
'dest', False)
#start test
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(libvirt.libvirtError,
conn._live_migration,
self.context, instance_ref, 'dest', False,
self.compute._rollback_live_migration)
instance_ref = db.instance_get(self.context, instance_ref['id'])
self.assertTrue(instance_ref['vm_state'] == vm_states.ACTIVE)
self.assertTrue(instance_ref['power_state'] == power_state.RUNNING)
db.instance_destroy(self.context, instance_ref['uuid'])
def test_pre_live_migration_works_correctly_mocked(self):
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
class FakeNetworkInfo():
def fixed_ips(self):
return ["test_ip_addr"]
inst_ref = {'id': 'foo'}
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
driver.block_device_info_get_mapping(vol
).AndReturn(vol['block_device_mapping'])
self.mox.StubOutWithMock(conn, "volume_driver_method")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
conn.volume_driver_method('connect_volume',
v['connection_info'],
disk_info)
self.mox.StubOutWithMock(conn, 'plug_vifs')
conn.plug_vifs(mox.IsA(inst_ref), nw_info)
self.mox.ReplayAll()
result = conn.pre_live_migration(c, inst_ref, vol, nw_info)
self.assertEqual(result, None)
def test_pre_live_migration_vol_backed_works_correctly_mocked(self):
# Creating testdata, using temp dir.
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
class FakeNetworkInfo():
def fixed_ips(self):
return ["test_ip_addr"]
inst_ref = db.instance_create(self.context, self.test_instance)
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(conn, "volume_driver_method")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
conn.volume_driver_method('connect_volume',
v['connection_info'],
disk_info)
self.mox.StubOutWithMock(conn, 'plug_vifs')
conn.plug_vifs(mox.IsA(inst_ref), nw_info)
self.mox.ReplayAll()
migrate_data = {'is_shared_storage': False,
'is_volume_backed': True,
'block_migration': False,
'instance_relative_path': inst_ref['name']
}
ret = conn.pre_live_migration(c, inst_ref, vol, nw_info,
migrate_data)
self.assertEqual(ret, None)
self.assertTrue(os.path.exists('%s/%s/' % (tmpdir,
inst_ref['name'])))
db.instance_destroy(self.context, inst_ref['uuid'])
def test_pre_block_migration_works_correctly(self):
# Replace instances_path since this testcase creates tmpfile
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
# Test data
instance_ref = db.instance_create(self.context, self.test_instance)
dummy_info = [{'path': '%s/disk' % tmpdir,
'disk_size': 10737418240,
'type': 'raw',
'backing_file': ''},
{'backing_file': 'otherdisk_1234567',
'path': '%s/otherdisk' % tmpdir,
'virt_disk_size': 10737418240}]
dummyjson = json.dumps(dummy_info)
# qemu-img should be mockd since test environment might not have
# large disk space.
self.mox.StubOutWithMock(imagebackend.Image, 'cache')
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename='otherdisk',
image_id=self.test_instance['image_ref'],
project_id='fake',
size=10737418240L,
user_id=None).AndReturn(None)
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
conn.pre_block_migration(self.context, instance_ref,
dummyjson)
self.assertTrue(os.path.exists('%s/%s/' %
(tmpdir, instance_ref['uuid'])))
db.instance_destroy(self.context, instance_ref['uuid'])
def test_get_instance_disk_info_works_correctly(self):
# Test data
instance_ref = db.instance_create(self.context, self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
GB = 1024 * 1024 * 1024
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * GB
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * GB
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = conn.get_instance_disk_info(instance_ref['name'])
info = jsonutils.loads(info)
self.assertEquals(info[0]['type'], 'raw')
self.assertEquals(info[0]['path'], '/test/disk')
self.assertEquals(info[0]['disk_size'], 10737418240)
self.assertEquals(info[0]['backing_file'], "")
self.assertEquals(info[0]['over_committed_disk_size'], 0)
self.assertEquals(info[1]['type'], 'qcow2')
self.assertEquals(info[1]['path'], '/test/disk.local')
self.assertEquals(info[1]['virt_disk_size'], 21474836480)
self.assertEquals(info[1]['backing_file'], "file")
self.assertEquals(info[1]['over_committed_disk_size'], 18146236825)
db.instance_destroy(self.context, instance_ref['uuid'])
def test_get_instance_disk_info_excludes_volumes(self):
# Test data
instance_ref = db.instance_create(self.context, self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume1'/>"
"<target dev='vdc' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume2'/>"
"<target dev='vdd' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance_ref['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
GB = 1024 * 1024 * 1024
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * GB
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * GB
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': [
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = conn.get_instance_disk_info(instance_ref['name'],
block_device_info=info)
info = jsonutils.loads(info)
self.assertEquals(info[0]['type'], 'raw')
self.assertEquals(info[0]['path'], '/test/disk')
self.assertEquals(info[0]['disk_size'], 10737418240)
self.assertEquals(info[0]['backing_file'], "")
self.assertEquals(info[0]['over_committed_disk_size'], 0)
self.assertEquals(info[1]['type'], 'qcow2')
self.assertEquals(info[1]['path'], '/test/disk.local')
self.assertEquals(info[1]['virt_disk_size'], 21474836480)
self.assertEquals(info[1]['backing_file'], "file")
self.assertEquals(info[1]['over_committed_disk_size'], 18146236825)
db.instance_destroy(self.context, instance_ref['uuid'])
def test_spawn_with_network_info(self):
# Preparing mocks
def fake_none(*args, **kwargs):
return
def fake_getLibVersion():
return 9007
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
getCapabilities=fake_getCapabilities)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
instance_type = db.instance_type_get(self.context,
instance_ref['instance_type_id'])
sys_meta = instance_types.save_instance_type_info({}, instance_type)
instance_ref['system_metadata'] = sys_meta
instance = db.instance_create(self.context, instance_ref)
# Mock out the get_info method of the LibvirtDriver so that the polling
# in the spawn method of the LibvirtDriver returns immediately
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info')
libvirt_driver.LibvirtDriver.get_info(instance
).AndReturn({'state': power_state.RUNNING})
# Start test
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(conn.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(imagebackend.Image,
'cache',
fake_none)
conn.spawn(self.context, instance, None, [], 'herp',
network_info=network_info)
path = os.path.join(CONF.instances_path, instance['name'])
if os.path.isdir(path):
shutil.rmtree(path)
path = os.path.join(CONF.instances_path, CONF.base_dir_name)
if os.path.isdir(path):
shutil.rmtree(os.path.join(CONF.instances_path,
CONF.base_dir_name))
def test_spawn_without_image_meta(self):
self.create_image_called = False
def fake_none(*args, **kwargs):
return
def fake_create_image(*args, **kwargs):
self.create_image_called = True
def fake_get_info(instance):
return {'state': power_state.RUNNING}
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = db.instance_create(self.context, instance_ref)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_create_image', fake_create_image)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
conn.spawn(self.context, instance, None, [], None)
self.assertTrue(self.create_image_called)
conn.spawn(self.context,
instance,
{'id': instance['image_ref']},
[],
None)
self.assertTrue(self.create_image_called)
def test_spawn_from_volume_calls_cache(self):
self.cache_called_for_disk = False
def fake_none(*args, **kwargs):
return
def fake_cache(*args, **kwargs):
if kwargs.get('image_id') == 'my_fake_image':
self.cache_called_for_disk = True
def fake_get_info(instance):
return {'state': power_state.RUNNING}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(imagebackend.Image, 'cache', fake_cache)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
block_device_info = {'root_device_name': '/dev/vda',
'block_device_mapping': [
{'mount_device': 'vda'}]}
# Volume-backed instance created without image
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance_ref['root_device_name'] = '/dev/vda'
instance = db.instance_create(self.context, instance_ref)
conn.spawn(self.context, instance, None, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
db.instance_destroy(self.context, instance['uuid'])
# Booted from volume but with placeholder image
instance_ref = self.test_instance
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['root_device_name'] = '/dev/vda'
instance = db.instance_create(self.context, instance_ref)
conn.spawn(self.context, instance, None, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
db.instance_destroy(self.context, instance['uuid'])
# Booted from an image
instance_ref['image_ref'] = 'my_fake_image'
instance = db.instance_create(self.context, instance_ref)
conn.spawn(self.context, instance, None, [], None)
self.assertTrue(self.cache_called_for_disk)
db.instance_destroy(self.context, instance['uuid'])
def test_create_image_plain(self):
gotFiles = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name):
self.path = os.path.join(instance['name'], name)
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return {'state': power_state.RUNNING}
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = db.instance_create(self.context, instance_ref)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
image_meta = {'id': instance['image_ref']}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
None,
image_meta)
conn._create_image(context, instance,
disk_info['mapping'])
xml = conn.to_xml(instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * 1024 * 1024 * 1024},
{'filename': 'ephemeral_20_default',
'size': 20 * 1024 * 1024 * 1024},
]
self.assertEquals(gotFiles, wantFiles)
def test_create_image_with_swap(self):
gotFiles = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name):
self.path = os.path.join(instance['name'], name)
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return {'state': power_state.RUNNING}
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
# Turn on some swap to exercise that codepath in _create_image
instance_ref['system_metadata']['instance_type_swap'] = 500
instance = db.instance_create(self.context, instance_ref)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
image_meta = {'id': instance['image_ref']}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
None,
image_meta)
conn._create_image(context, instance,
disk_info['mapping'])
xml = conn.to_xml(instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * 1024 * 1024 * 1024},
{'filename': 'ephemeral_20_default',
'size': 20 * 1024 * 1024 * 1024},
{'filename': 'swap_500',
'size': 500 * 1024 * 1024},
]
self.assertEquals(gotFiles, wantFiles)
def test_get_console_output_file(self):
fake_libvirt_utils.files['console.log'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = db.instance_create(self.context, instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
console_log = '%s/console.log' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % console_log
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
output = conn.get_console_output(instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEquals('67890', output)
def test_get_console_output_pty(self):
fake_libvirt_utils.files['pty'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = db.instance_create(self.context, instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
pty_file = '%s/fake_pty' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='pty'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % pty_file
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
def _fake_flush(self, fake_pty):
return 'foo'
def _fake_append_to_file(self, data, fpath):
return 'pty'
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
output = conn.get_console_output(instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEquals('67890', output)
def test_get_host_ip_addr(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ip = conn.get_host_ip_addr()
self.assertEquals(ip, CONF.my_ip)
def test_broken_connection(self):
for (error, domain) in (
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_REMOTE),
(libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_RPC),
(libvirt.VIR_ERR_INTERNAL_ERROR, libvirt.VIR_FROM_RPC)):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_wrapped_conn")
self.mox.StubOutWithMock(conn._wrapped_conn, "getLibVersion")
self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_domain")
conn._wrapped_conn.getLibVersion().AndRaise(
libvirt.libvirtError("fake failure"))
libvirt.libvirtError.get_error_code().AndReturn(error)
libvirt.libvirtError.get_error_domain().AndReturn(domain)
self.mox.ReplayAll()
self.assertFalse(conn._test_connection())
self.mox.UnsetStubs()
def test_immediate_delete(self):
def fake_lookup_by_name(instance_name):
raise exception.InstanceNotFound(instance_id=instance_name)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
instance = db.instance_create(self.context, self.test_instance)
conn.destroy(instance, {})
def test_destroy_removes_disk(self):
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_undefine_domain')
libvirt_driver.LibvirtDriver._undefine_domain(instance)
self.mox.StubOutWithMock(shutil, "rmtree")
shutil.rmtree(os.path.join(CONF.instances_path, instance['name']))
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_cleanup_lvm')
libvirt_driver.LibvirtDriver._cleanup_lvm(instance)
# Start test
self.mox.ReplayAll()
def fake_destroy(instance):
pass
def fake_os_path_exists(path):
return True
def fake_unplug_vifs(instance, network_info):
pass
def fake_unfilter_instance(instance, network_info):
pass
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_destroy', fake_destroy)
self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
self.stubs.Set(conn.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
conn.destroy(instance, [])
def test_destroy_not_removes_disk(self):
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_undefine_domain')
libvirt_driver.LibvirtDriver._undefine_domain(instance)
# Start test
self.mox.ReplayAll()
def fake_destroy(instance):
pass
def fake_os_path_exists(path):
return True
def fake_unplug_vifs(instance, network_info):
pass
def fake_unfilter_instance(instance, network_info):
pass
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_destroy', fake_destroy)
self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
self.stubs.Set(conn.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
conn.destroy(instance, [], None, False)
def test_destroy_undefines(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndReturn(1)
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
def test_destroy_undefines_no_undefine_flags(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(libvirt.libvirtError('Err'))
mock.undefine()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
def test_destroy_undefines_no_attribute_with_managed_save(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndReturn(True)
mock.managedSaveRemove(0)
mock.undefine()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
def test_destroy_undefines_no_attribute_no_managed_save(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndRaise(AttributeError())
mock.undefine()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(instance, [])
def test_destroy_timed_out(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy().AndRaise(libvirt.libvirtError("timed out"))
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_error_code(self):
return libvirt.VIR_ERR_OPERATION_TIMEOUT
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(libvirt.libvirtError, 'get_error_code',
fake_get_error_code)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
self.assertRaises(exception.InstancePowerOffFailure,
conn.destroy, instance, [])
def test_private_destroy_not_found(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()
mock.destroy()
self.mox.ReplayAll()
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
raise exception.InstanceNotFound(instance_id=instance_name)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
# NOTE(vish): verifies destroy doesn't raise if the instance disappears
conn._destroy(instance)
def test_disk_over_committed_size_total(self):
# Ensure destroy calls managedSaveRemove for saved instance.
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def list_instances():
return ['fake1', 'fake2']
self.stubs.Set(conn, 'list_instances', list_instances)
fake_disks = {'fake1': [{'type': 'qcow2', 'path': '/somepath/disk1',
'virt_disk_size': '10737418240',
'backing_file': '/somepath/disk1',
'disk_size':'83886080',
'over_committed_disk_size':'10653532160'}],
'fake2': [{'type': 'raw', 'path': '/somepath/disk2',
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size':'10737418240',
'over_committed_disk_size':'0'}]}
def get_info(instance_name):
return jsonutils.dumps(fake_disks.get(instance_name))
self.stubs.Set(conn, 'get_instance_disk_info', get_info)
result = conn.get_disk_over_committed_size_total()
self.assertEqual(result, 10653532160)
def test_cpu_info(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
cpu.arch = "x86_64"
cpu.cores = 2
cpu.threads = 1
cpu.sockets = 4
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = "x86_64"
guest.domtype = ["kvm"]
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = "i686"
guest.domtype = ["kvm"]
caps.guests.append(guest)
return caps
self.stubs.Set(libvirt_driver.LibvirtDriver,
'get_host_capabilities',
get_host_capabilities_stub)
want = {"vendor": "AMD",
"features": ["extapic", "3dnow"],
"model": "Opteron_G4",
"arch": "x86_64",
"topology": {"cores": 2, "threads": 1, "sockets": 4}}
got = jsonutils.loads(conn.get_cpu_info())
self.assertEqual(want, got)
def test_diagnostic_vcpus_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
raise libvirt.libvirtError('vcpus missing')
def blockStats(self, path):
return (169L, 688640L, 0L, 0L, -1L)
def interfaceStats(self, path):
return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
def memoryStats(self):
return {'actual': 220160L, 'rss': 200164L}
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'vda_read': 688640L,
'vda_read_req': 169L,
'vda_write': 0L,
'vda_write_req': 0L,
'vda_errors': -1L,
'vdb_read': 688640L,
'vdb_read_req': 169L,
'vdb_write': 0L,
'vdb_write_req': 0L,
'vdb_errors': -1L,
'memory': 280160L,
'memory-actual': 220160L,
'memory-rss': 200164L,
'vnet0_rx': 4408L,
'vnet0_rx_drop': 0L,
'vnet0_rx_errors': 0L,
'vnet0_rx_packets': 82L,
'vnet0_tx': 0L,
'vnet0_tx_drop': 0L,
'vnet0_tx_errors': 0L,
'vnet0_tx_packets': 0L,
}
self.assertEqual(actual, expect)
def test_diagnostic_blockstats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000L, 0),
(1, 1, 1640000000L, 0),
(2, 1, 3040000000L, 0),
(3, 1, 1420000000L, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
raise libvirt.libvirtError('blockStats missing')
def interfaceStats(self, path):
return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
def memoryStats(self):
return {'actual': 220160L, 'rss': 200164L}
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
'cpu2_time': 3040000000L,
'cpu3_time': 1420000000L,
'memory': 280160L,
'memory-actual': 220160L,
'memory-rss': 200164L,
'vnet0_rx': 4408L,
'vnet0_rx_drop': 0L,
'vnet0_rx_errors': 0L,
'vnet0_rx_packets': 82L,
'vnet0_tx': 0L,
'vnet0_tx_drop': 0L,
'vnet0_tx_errors': 0L,
'vnet0_tx_packets': 0L,
}
self.assertEqual(actual, expect)
def test_diagnostic_interfacestats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000L, 0),
(1, 1, 1640000000L, 0),
(2, 1, 3040000000L, 0),
(3, 1, 1420000000L, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169L, 688640L, 0L, 0L, -1L)
def interfaceStats(self, path):
raise libvirt.libvirtError('interfaceStat missing')
def memoryStats(self):
return {'actual': 220160L, 'rss': 200164L}
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
'cpu2_time': 3040000000L,
'cpu3_time': 1420000000L,
'vda_read': 688640L,
'vda_read_req': 169L,
'vda_write': 0L,
'vda_write_req': 0L,
'vda_errors': -1L,
'vdb_read': 688640L,
'vdb_read_req': 169L,
'vdb_write': 0L,
'vdb_write_req': 0L,
'vdb_errors': -1L,
'memory': 280160L,
'memory-actual': 220160L,
'memory-rss': 200164L,
}
self.assertEqual(actual, expect)
def test_diagnostic_memorystats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000L, 0),
(1, 1, 1640000000L, 0),
(2, 1, 3040000000L, 0),
(3, 1, 1420000000L, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169L, 688640L, 0L, 0L, -1L)
def interfaceStats(self, path):
return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
def memoryStats(self):
raise libvirt.libvirtError('memoryStats missing')
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
'cpu2_time': 3040000000L,
'cpu3_time': 1420000000L,
'vda_read': 688640L,
'vda_read_req': 169L,
'vda_write': 0L,
'vda_write_req': 0L,
'vda_errors': -1L,
'vdb_read': 688640L,
'vdb_read_req': 169L,
'vdb_write': 0L,
'vdb_write_req': 0L,
'vdb_errors': -1L,
'memory': 280160L,
'vnet0_rx': 4408L,
'vnet0_rx_drop': 0L,
'vnet0_rx_errors': 0L,
'vnet0_rx_packets': 82L,
'vnet0_tx': 0L,
'vnet0_tx_drop': 0L,
'vnet0_tx_errors': 0L,
'vnet0_tx_packets': 0L,
}
self.assertEqual(actual, expect)
def test_diagnostic_full(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000L, 0),
(1, 1, 1640000000L, 0),
(2, 1, 3040000000L, 0),
(3, 1, 1420000000L, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169L, 688640L, 0L, 0L, -1L)
def interfaceStats(self, path):
return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
def memoryStats(self):
return {'actual': 220160L, 'rss': 200164L}
def maxMemory(self):
return 280160L
def fake_lookup_name(name):
return DiagFakeDomain()
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actual = conn.get_diagnostics({"name": "testvirt"})
expect = {'cpu0_time': 15340000000L,
'cpu1_time': 1640000000L,
'cpu2_time': 3040000000L,
'cpu3_time': 1420000000L,
'vda_read': 688640L,
'vda_read_req': 169L,
'vda_write': 0L,
'vda_write_req': 0L,
'vda_errors': -1L,
'vdb_read': 688640L,
'vdb_read_req': 169L,
'vdb_write': 0L,
'vdb_write_req': 0L,
'vdb_errors': -1L,
'memory': 280160L,
'memory-actual': 220160L,
'memory-rss': 200164L,
'vnet0_rx': 4408L,
'vnet0_rx_drop': 0L,
'vnet0_rx_errors': 0L,
'vnet0_rx_packets': 82L,
'vnet0_tx': 0L,
'vnet0_tx_drop': 0L,
'vnet0_tx_errors': 0L,
'vnet0_tx_packets': 0L,
}
self.assertEqual(actual, expect)
def test_failing_vcpu_count(self):
"""Domain can fail to return the vcpu description in case it's
just starting up or shutting down. Make sure None is handled
gracefully.
"""
class DiagFakeDomain(object):
def __init__(self, vcpus):
self._vcpus = vcpus
def vcpus(self):
if self._vcpus is None:
return None
else:
return ([1] * self._vcpus, [True] * self._vcpus)
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = driver._conn
self.mox.StubOutWithMock(driver, 'list_instance_ids')
self.mox.StubOutWithMock(conn, 'lookupByID')
driver.list_instance_ids().AndReturn([1, 2])
conn.lookupByID(1).AndReturn(DiagFakeDomain(None))
conn.lookupByID(2).AndReturn(DiagFakeDomain(5))
self.mox.ReplayAll()
self.assertEqual(5, driver.get_vcpu_used())
def test_get_instance_capabilities(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
caps = vconfig.LibvirtConfigCaps()
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = 'x86_64'
guest.domtype = ['kvm', 'qemu']
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = 'i686'
guest.domtype = ['kvm']
caps.guests.append(guest)
return caps
self.stubs.Set(libvirt_driver.LibvirtDriver,
'get_host_capabilities',
get_host_capabilities_stub)
want = [('x86_64', 'kvm', 'hvm'),
('x86_64', 'qemu', 'hvm'),
('i686', 'kvm', 'hvm')]
got = conn.get_instance_capabilities()
self.assertEqual(want, got)
def test_event_dispatch(self):
# Validate that the libvirt self-pipe for forwarding
# events between threads is working sanely
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
got_events = []
def handler(event):
got_events.append(event)
conn.register_event_listener(handler)
conn._init_events_pipe()
event1 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STARTED)
event2 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_PAUSED)
conn._queue_event(event1)
conn._queue_event(event2)
conn._dispatch_events()
want_events = [event1, event2]
self.assertEqual(want_events, got_events)
event3 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_RESUMED)
event4 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STOPPED)
conn._queue_event(event3)
conn._queue_event(event4)
conn._dispatch_events()
want_events = [event1, event2, event3, event4]
self.assertEqual(want_events, got_events)
def test_event_lifecycle(self):
# Validate that libvirt events are correctly translated
# to Nova events
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
got_events = []
def handler(event):
got_events.append(event)
conn.register_event_listener(handler)
conn._init_events_pipe()
fake_dom_xml = """
<domain type='kvm'>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
dom = FakeVirtDomain(fake_dom_xml,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
conn._event_lifecycle_callback(conn._conn,
dom,
libvirt.VIR_DOMAIN_EVENT_STOPPED,
0,
conn)
conn._dispatch_events()
self.assertEqual(len(got_events), 1)
self.assertEqual(type(got_events[0]), virtevent.LifecycleEvent)
self.assertEqual(got_events[0].uuid,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
self.assertEqual(got_events[0].transition,
virtevent.EVENT_LIFECYCLE_STOPPED)
def test_set_cache_mode(self):
self.flags(disk_cachemodes=['file=directsync'])
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
conn.set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'directsync')
def test_set_cache_mode_invalid_mode(self):
self.flags(disk_cachemodes=['file=FAKE'])
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
conn.set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, None)
def test_set_cache_mode_invalid_object(self):
self.flags(disk_cachemodes=['file=directsync'])
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuest()
fake_conf.driver_cache = 'fake'
conn.set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'fake')
def _test_shared_storage_detection(self, is_same):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(os, 'unlink')
conn.get_host_ip_addr().AndReturn('bar')
utils.execute('ssh', 'foo', 'touch', mox.IgnoreArg())
os.path.exists(mox.IgnoreArg()).AndReturn(is_same)
if is_same:
os.unlink(mox.IgnoreArg())
else:
utils.execute('ssh', 'foo', 'rm', mox.IgnoreArg())
self.mox.ReplayAll()
return conn._is_storage_shared_with('foo', '/path')
def test_shared_storage_detection_same_host(self):
self.assertTrue(self._test_shared_storage_detection(True))
def test_shared_storage_detection_different_host(self):
self.assertFalse(self._test_shared_storage_detection(False))
def test_shared_storage_detection_easy(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(os, 'unlink')
conn.get_host_ip_addr().AndReturn('foo')
self.mox.ReplayAll()
self.assertTrue(conn._is_storage_shared_with('foo', '/path'))
class HostStateTestCase(test.TestCase):
cpu_info = ('{"vendor": "Intel", "model": "pentium", "arch": "i686", '
'"features": ["ssse3", "monitor", "pni", "sse2", "sse", '
'"fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", '
'"mtrr", "sep", "apic"], '
'"topology": {"cores": "1", "threads": "1", "sockets": "1"}}')
instance_caps = [("x86_64", "kvm", "hvm"), ("i686", "kvm", "hvm")]
class FakeConnection(object):
"""Fake connection object."""
def get_vcpu_total(self):
return 1
def get_vcpu_used(self):
return 0
def get_cpu_info(self):
return HostStateTestCase.cpu_info
def get_local_gb_info(self):
return {'total': 100, 'used': 20, 'free': 80}
def get_memory_mb_total(self):
return 497
def get_memory_mb_used(self):
return 88
def get_hypervisor_type(self):
return 'QEMU'
def get_hypervisor_version(self):
return 13091
def get_hypervisor_hostname(self):
return 'compute1'
def get_host_uptime(self):
return ('10:01:16 up 1:36, 6 users, '
'load average: 0.21, 0.16, 0.19')
def get_disk_available_least(self):
return 13091
def get_instance_capabilities(self):
return HostStateTestCase.instance_caps
def test_update_status(self):
hs = libvirt_driver.HostState(self.FakeConnection())
stats = hs._stats
self.assertEquals(stats["vcpus"], 1)
self.assertEquals(stats["vcpus_used"], 0)
self.assertEquals(stats["cpu_info"],
{"vendor": "Intel", "model": "pentium", "arch": "i686",
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
"fxsr", "clflush", "pse36", "pat", "cmov",
"mca", "pge", "mtrr", "sep", "apic"],
"topology": {"cores": "1", "threads": "1", "sockets": "1"}
})
self.assertEquals(stats["disk_total"], 100)
self.assertEquals(stats["disk_used"], 20)
self.assertEquals(stats["disk_available"], 80)
self.assertEquals(stats["host_memory_total"], 497)
self.assertEquals(stats["host_memory_free"], 409)
self.assertEquals(stats["hypervisor_type"], 'QEMU')
self.assertEquals(stats["hypervisor_version"], 13091)
self.assertEquals(stats["hypervisor_hostname"], 'compute1')
class NWFilterFakes:
def __init__(self):
self.filters = {}
def nwfilterLookupByName(self, name):
if name in self.filters:
return self.filters[name]
raise libvirt.libvirtError('Filter Not Found')
def filterDefineXMLMock(self, xml):
class FakeNWFilterInternal:
def __init__(self, parent, name, xml):
self.name = name
self.parent = parent
self.xml = xml
def undefine(self):
del self.parent.filters[self.name]
pass
tree = etree.fromstring(xml)
name = tree.get('name')
if name not in self.filters:
self.filters[name] = FakeNWFilterInternal(self, name, xml)
return True
class IptablesFirewallTestCase(test.TestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
class FakeLibvirtDriver(object):
def nwfilterDefineXML(*args, **kwargs):
"""setup_basic_rules in nwfilter calls this."""
pass
self.fake_libvirt_connection = FakeLibvirtDriver()
self.fw = firewall.IptablesFirewallDriver(
fake.FakeVirtAPI(),
get_connection=lambda: self.fake_libvirt_connection)
in_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
'# Completed on Tue Dec 18 15:50:25 2012',
'# Generated by iptables-save v1.4.12 on Tue Dec 18 15:50:25 201;',
'*mangle',
':PREROUTING ACCEPT [241:39722]',
':INPUT ACCEPT [230:39282]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [266:26558]',
':POSTROUTING ACCEPT [267:26590]',
'-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM '
'--checksum-fill',
'COMMIT',
'# Completed on Tue Dec 18 15:50:25 2012',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake',
'instance_type_id': 1})
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': 'fake',
'project_id': 'fake',
'name': 'testgroup',
'description': 'test group'})
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': 'fake',
'project_id': 'fake',
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
# self.fw.add_instance(instance_ref)
def fake_iptables_execute(*cmd, **kwargs):
process_input = kwargs.get('process_input', None)
if cmd == ('ip6tables-save', '-c'):
return '\n'.join(self.in6_filter_rules), None
if cmd == ('iptables-save', '-c'):
return '\n'.join(self.in_rules), None
if cmd == ('iptables-restore', '-c'):
lines = process_input.split('\n')
if '*filter' in lines:
self.out_rules = lines
return '', ''
if cmd == ('ip6tables-restore', '-c',):
lines = process_input.split('\n')
if '*filter' in lines:
self.out6_rules = lines
return '', ''
network_model = _fake_network_info(self.stubs, 1, spectacular=True)
from nova.network import linux_net
linux_net.iptables_manager.execute = fake_iptables_execute
from nova.compute import utils as compute_utils
self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
lambda instance: network_model)
network_info = network_model.legacy()
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
in_rules = filter(lambda l: not l.startswith('#'),
self.in_rules)
for rule in in_rules:
if 'nova' not in rule:
self.assertTrue(rule in self.out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self.out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self.out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp '
'-s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp '
'--icmp-type 8 -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp -m multiport '
'--dports 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -s '
'%s' % ip['address'])
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"Protocol/port-less acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp '
'-m multiport --dports 80:81 -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = _fake_network_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = _fake_network_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
network_info = _fake_network_info(self.stubs, networks_count,
ipv4_addr_per_network)
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
# Extra rules are for the DHCP request
rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
networks_count) + 2
self.assertEquals(ipv4_network_rules, rules)
self.assertEquals(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
instance_ref = self._create_instance_ref()
self.mox.StubOutWithMock(self.fw,
'instance_rules')
self.mox.StubOutWithMock(self.fw,
'add_filters_for_instance',
use_mock_anything=True)
self.fw.instance_rules(instance_ref,
mox.IgnoreArg()).AndReturn((None, None))
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
mox.IgnoreArg())
self.fw.instance_rules(instance_ref,
mox.IgnoreArg()).AndReturn((None, None))
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg())
self.fw.instances[instance_ref['id']] = instance_ref
self.fw.do_refresh_security_group_rules("fake")
def test_unfilter_instance_undefines_nwfilter(self):
admin_ctxt = context.get_admin_context()
fakefilter = NWFilterFakes()
_xml_mock = fakefilter.filterDefineXMLMock
self.fw.nwfilter._conn.nwfilterDefineXML = _xml_mock
_lookup_name = fakefilter.nwfilterLookupByName
self.fw.nwfilter._conn.nwfilterLookupByName = _lookup_name
instance_ref = self._create_instance_ref()
network_info = _fake_network_info(self.stubs, 1)
self.fw.setup_basic_filtering(instance_ref, network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
original_filter_count = len(fakefilter.filters)
self.fw.unfilter_instance(instance_ref, network_info)
# should undefine just the instance filter
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
# FRAGILE: peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
# create a firewall via setup_basic_filtering like libvirt_conn.spawn
# should have a chain with 0 rules
network_info = _fake_network_info(self.stubs, 1)
self.fw.setup_basic_filtering(instance_ref, network_info)
self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
admin_ctxt = context.get_admin_context()
# add a rule and send the update message, check for 1 rule
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
# remove a rule from the db, cast to compute to refresh rule
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
class NWFilterTestCase(test.TestCase):
def setUp(self):
super(NWFilterTestCase, self).setUp()
class Mock(object):
pass
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.fake_libvirt_connection = Mock()
self.fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(),
lambda: self.fake_libvirt_connection)
def test_cidr_rule_nwfilter_xml(self):
cloud_controller = cloud.CloudController()
cloud_controller.create_security_group(self.context,
'testgroup',
'test group description')
cloud_controller.authorize_security_group_ingress(self.context,
'testgroup',
from_port='80',
to_port='81',
ip_protocol='tcp',
cidr_ip='0.0.0.0/0')
security_group = db.security_group_get_by_name(self.context,
'fake',
'testgroup')
self.teardown_security_group()
def teardown_security_group(self):
cloud_controller = cloud.CloudController()
cloud_controller.delete_security_group(self.context, 'testgroup')
def setup_and_return_security_group(self):
cloud_controller = cloud.CloudController()
cloud_controller.create_security_group(self.context,
'testgroup',
'test group description')
cloud_controller.authorize_security_group_ingress(self.context,
'testgroup',
from_port='80',
to_port='81',
ip_protocol='tcp',
cidr_ip='0.0.0.0/0')
return db.security_group_get_by_name(self.context, 'fake', 'testgroup')
def _create_instance(self):
return db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake',
'instance_type_id': 1})
def _create_instance_type(self, params=None):
"""Create a test instance."""
if not params:
params = {}
context = self.context.elevated()
inst = {}
inst['name'] = 'm1.small'
inst['memory_mb'] = '1024'
inst['vcpus'] = '1'
inst['root_gb'] = '10'
inst['ephemeral_gb'] = '20'
inst['flavorid'] = '1'
inst['swap'] = '2048'
inst['rxtx_factor'] = 1
inst.update(params)
return db.instance_type_create(context, inst)['id']
def test_creates_base_rule_first(self):
# These come pre-defined by libvirt
self.defined_filters = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing',
'allow-dhcp-server']
self.recursive_depends = {}
for f in self.defined_filters:
self.recursive_depends[f] = []
def _filterDefineXMLMock(xml):
dom = minidom.parseString(xml)
name = dom.firstChild.getAttribute('name')
self.recursive_depends[name] = []
for f in dom.getElementsByTagName('filterref'):
ref = f.getAttribute('filter')
self.assertTrue(ref in self.defined_filters,
('%s referenced filter that does ' +
'not yet exist: %s') % (name, ref))
dependencies = [ref] + self.recursive_depends[ref]
self.recursive_depends[name] += dependencies
self.defined_filters.append(name)
return True
self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock
instance_ref = self._create_instance()
inst_id = instance_ref['id']
inst_uuid = instance_ref['uuid']
def _ensure_all_called(mac, allow_dhcp):
instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
mac.translate(None, ':'))
requiredlist = ['no-arp-spoofing', 'no-ip-spoofing',
'no-mac-spoofing']
if allow_dhcp:
requiredlist.append('allow-dhcp-server')
for required in requiredlist:
self.assertTrue(required in
self.recursive_depends[instance_filter],
"Instance's filter does not include %s" %
required)
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_uuid,
self.security_group['id'])
instance = db.instance_get(self.context, inst_id)
network_info = _fake_network_info(self.stubs, 1)
# since there is one (network_info) there is one vif
# pass this vif's mac to _ensure_all_called()
# to set the instance_filter properly
mac = network_info[0][1]['mac']
self.fw.setup_basic_filtering(instance, network_info)
allow_dhcp = False
for (network, mapping) in network_info:
if mapping['dhcp_server']:
allow_dhcp = True
break
_ensure_all_called(mac, allow_dhcp)
db.instance_remove_security_group(self.context, inst_uuid,
self.security_group['id'])
self.teardown_security_group()
db.instance_destroy(context.get_admin_context(), instance_ref['uuid'])
def test_unfilter_instance_undefines_nwfilters(self):
admin_ctxt = context.get_admin_context()
fakefilter = NWFilterFakes()
self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
instance_ref = self._create_instance()
inst_id = instance_ref['id']
inst_uuid = instance_ref['uuid']
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_uuid,
self.security_group['id'])
instance = db.instance_get(self.context, inst_id)
network_info = _fake_network_info(self.stubs, 1)
self.fw.setup_basic_filtering(instance, network_info)
original_filter_count = len(fakefilter.filters)
self.fw.unfilter_instance(instance, network_info)
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_nwfilter_parameters(self):
admin_ctxt = context.get_admin_context()
fakefilter = NWFilterFakes()
self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
instance_ref = self._create_instance()
inst_id = instance_ref['id']
inst_uuid = instance_ref['uuid']
self.security_group = self.setup_and_return_security_group()
db.instance_add_security_group(self.context, inst_uuid,
self.security_group['id'])
instance = db.instance_get(self.context, inst_id)
network_info = _fake_network_info(self.stubs, 1)
self.fw.setup_basic_filtering(instance, network_info)
(network, mapping) = network_info[0]
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self.fw._instance_filter_name(instance, nic_id)
f = fakefilter.nwfilterLookupByName(instance_filter_name)
tree = etree.fromstring(f.xml)
for fref in tree.findall('filterref'):
parameters = fref.findall('./parameter')
for parameter in parameters:
if parameter.get('name') == 'IP':
self.assertTrue(_ipv4_like(parameter.get('value'),
'192.168'))
elif parameter.get('name') == 'DHCPSERVER':
dhcp_server = mapping['dhcp_server']
self.assertEqual(parameter.get('value'), dhcp_server)
elif parameter.get('name') == 'RASERVER':
ra_server = mapping.get('gateway_v6') + "/128"
self.assertEqual(parameter.get('value'), ra_server)
elif parameter.get('name') == 'PROJNET':
ipv4_cidr = network['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
self.assertEqual(parameter.get('value'), net)
elif parameter.get('name') == 'PROJMASK':
ipv4_cidr = network['cidr']
net, mask = netutils.get_net_and_mask(ipv4_cidr)
self.assertEqual(parameter.get('value'), mask)
elif parameter.get('name') == 'PROJNET6':
ipv6_cidr = network['cidr_v6']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
self.assertEqual(parameter.get('value'), net)
elif parameter.get('name') == 'PROJMASK6':
ipv6_cidr = network['cidr_v6']
net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
self.assertEqual(parameter.get('value'), prefix)
else:
raise exception.InvalidParameterValue('unknown parameter '
'in filter')
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
class LibvirtUtilsTestCase(test.TestCase):
def test_get_iscsi_initiator(self):
self.mox.StubOutWithMock(utils, 'execute')
initiator = 'fake.initiator.iqn'
rval = ("junk\nInitiatorName=%s\njunk\n" % initiator, None)
utils.execute('cat', '/etc/iscsi/initiatorname.iscsi',
run_as_root=True).AndReturn(rval)
# Start test
self.mox.ReplayAll()
result = libvirt_utils.get_iscsi_initiator()
self.assertEqual(initiator, result)
def test_get_missing_iscsi_initiator(self):
self.mox.StubOutWithMock(utils, 'execute')
file_path = '/etc/iscsi/initiatorname.iscsi'
utils.execute('cat', file_path, run_as_root=True).AndRaise(
exception.FileNotFound(file_path=file_path)
)
# Start test
self.mox.ReplayAll()
result = libvirt_utils.get_iscsi_initiator()
self.assertIsNone(result)
def test_create_image(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('qemu-img', 'create', '-f', 'raw',
'/some/path', '10G')
utils.execute('qemu-img', 'create', '-f', 'qcow2',
'/some/stuff', '1234567891234')
# Start test
self.mox.ReplayAll()
libvirt_utils.create_image('raw', '/some/path', '10G')
libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234')
def test_create_cow_image(self):
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
rval = ('', '')
os.path.exists('/some/path').AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', '/some/path').AndReturn(rval)
utils.execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'backing_file=/some/path',
'/the/new/cow')
# Start test
self.mox.ReplayAll()
libvirt_utils.create_cow_image('/some/path', '/the/new/cow')
def test_pick_disk_driver_name(self):
type_map = {'kvm': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'qemu': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'xen': ([True, 'phy'], [False, 'tap'], [None, 'tap']),
'uml': ([True, None], [False, None], [None, None]),
'lxc': ([True, None], [False, None], [None, None])}
for (libvirt_type, checks) in type_map.iteritems():
self.flags(libvirt_type=libvirt_type)
for (is_block_dev, expected_result) in checks:
result = libvirt_utils.pick_disk_driver_name(is_block_dev)
self.assertEquals(result, expected_result)
def test_get_disk_size(self):
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(utils, 'execute')
os.path.exists('/some/path').AndReturn(True)
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/some/path').AndReturn(('''image: 00000001
file format: raw
virtual size: 4.4M (4592640 bytes)
disk size: 4.4M''', ''))
# Start test
self.mox.ReplayAll()
self.assertEquals(disk.get_disk_size('/some/path'), 4592640)
def test_copy_image(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
src_fd, src_path = tempfile.mkstemp()
try:
with os.fdopen(src_fd, 'w') as fp:
fp.write('canary')
libvirt_utils.copy_image(src_path, dst_path)
with open(dst_path, 'r') as fp:
self.assertEquals(fp.read(), 'canary')
finally:
os.unlink(src_path)
finally:
os.unlink(dst_path)
def test_write_to_file(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
libvirt_utils.write_to_file(dst_path, 'hello')
with open(dst_path, 'r') as fp:
self.assertEquals(fp.read(), 'hello')
finally:
os.unlink(dst_path)
def test_write_to_file_with_umask(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
os.unlink(dst_path)
libvirt_utils.write_to_file(dst_path, 'hello', umask=0277)
with open(dst_path, 'r') as fp:
self.assertEquals(fp.read(), 'hello')
mode = os.stat(dst_path).st_mode
self.assertEquals(mode & 0277, 0)
finally:
os.unlink(dst_path)
def test_chown(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('chown', 'soren', '/some/path', run_as_root=True)
self.mox.ReplayAll()
libvirt_utils.chown('/some/path', 'soren')
def _do_test_extract_snapshot(self, dest_format='raw', out_format='raw'):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('qemu-img', 'convert', '-f', 'qcow2', '-O', out_format,
'-s', 'snap1', '/path/to/disk/image', '/extracted/snap')
# Start test
self.mox.ReplayAll()
libvirt_utils.extract_snapshot('/path/to/disk/image', 'qcow2',
'snap1', '/extracted/snap', dest_format)
def test_extract_snapshot_raw(self):
self._do_test_extract_snapshot()
def test_extract_snapshot_iso(self):
self._do_test_extract_snapshot(dest_format='iso')
def test_extract_snapshot_qcow2(self):
self._do_test_extract_snapshot(dest_format='qcow2', out_format='qcow2')
def test_load_file(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
# We have a test for write_to_file. If that is sound, this suffices
libvirt_utils.write_to_file(dst_path, 'hello')
self.assertEquals(libvirt_utils.load_file(dst_path), 'hello')
finally:
os.unlink(dst_path)
def test_file_open(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
# We have a test for write_to_file. If that is sound, this suffices
libvirt_utils.write_to_file(dst_path, 'hello')
with libvirt_utils.file_open(dst_path, 'r') as fp:
self.assertEquals(fp.read(), 'hello')
finally:
os.unlink(dst_path)
def test_get_fs_info(self):
class FakeStatResult(object):
def __init__(self):
self.f_bsize = 4096
self.f_frsize = 4096
self.f_blocks = 2000
self.f_bfree = 1000
self.f_bavail = 900
self.f_files = 2000
self.f_ffree = 1000
self.f_favail = 900
self.f_flag = 4096
self.f_namemax = 255
self.path = None
def fake_statvfs(path):
self.path = path
return FakeStatResult()
self.stubs.Set(os, 'statvfs', fake_statvfs)
fs_info = libvirt_utils.get_fs_info('/some/file/path')
self.assertEquals('/some/file/path', self.path)
self.assertEquals(8192000, fs_info['total'])
self.assertEquals(3686400, fs_info['free'])
self.assertEquals(4096000, fs_info['used'])
def test_fetch_image(self):
self.mox.StubOutWithMock(images, 'fetch_to_raw')
context = 'opaque context'
target = '/tmp/targetfile'
image_id = '4'
user_id = 'fake'
project_id = 'fake'
images.fetch_to_raw(context, image_id, target, user_id, project_id)
self.mox.ReplayAll()
libvirt_utils.fetch_image(context, target, image_id,
user_id, project_id)
def test_fetch_raw_image(self):
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
def fake_rename(old, new):
self.executes.append(('mv', old, new))
def fake_unlink(path):
self.executes.append(('rm', path))
def fake_rm_on_errror(path):
self.executes.append(('rm', '-f', path))
def fake_qemu_img_info(path):
class FakeImgInfo(object):
pass
file_format = path.split('.')[-1]
if file_format == 'part':
file_format = path.split('.')[-2]
elif file_format == 'converted':
file_format = 'raw'
if 'backing' in path:
backing_file = 'backing'
else:
backing_file = None
FakeImgInfo.file_format = file_format
FakeImgInfo.backing_file = backing_file
return FakeImgInfo()
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os, 'rename', fake_rename)
self.stubs.Set(os, 'unlink', fake_unlink)
self.stubs.Set(images, 'fetch', lambda *_: None)
self.stubs.Set(images, 'qemu_img_info', fake_qemu_img_info)
self.stubs.Set(utils, 'delete_if_exists', fake_rm_on_errror)
context = 'opaque context'
image_id = '4'
user_id = 'fake'
project_id = 'fake'
target = 't.qcow2'
self.executes = []
expected_commands = [('qemu-img', 'convert', '-O', 'raw',
't.qcow2.part', 't.qcow2.converted'),
('rm', 't.qcow2.part'),
('mv', 't.qcow2.converted', 't.qcow2')]
images.fetch_to_raw(context, image_id, target, user_id, project_id)
self.assertEqual(self.executes, expected_commands)
target = 't.raw'
self.executes = []
expected_commands = [('mv', 't.raw.part', 't.raw')]
images.fetch_to_raw(context, image_id, target, user_id, project_id)
self.assertEqual(self.executes, expected_commands)
target = 'backing.qcow2'
self.executes = []
expected_commands = [('rm', '-f', 'backing.qcow2.part')]
self.assertRaises(exception.ImageUnacceptable,
images.fetch_to_raw,
context, image_id, target, user_id, project_id)
self.assertEqual(self.executes, expected_commands)
del self.executes
def test_get_disk_backing_file(self):
with_actual_path = False
def fake_execute(*args, **kwargs):
if with_actual_path:
return ("some: output\n"
"backing file: /foo/bar/baz (actual path: /a/b/c)\n"
"...: ...\n"), ''
else:
return ("some: output\n"
"backing file: /foo/bar/baz\n"
"...: ...\n"), ''
def return_true(*args, **kwargs):
return True
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os.path, 'exists', return_true)
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'baz')
with_actual_path = True
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'c')
class LibvirtDriverTestCase(test.TestCase):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
def setUp(self):
super(LibvirtDriverTestCase, self).setUp()
self.libvirtconnection = libvirt_driver.LibvirtDriver(
fake.FakeVirtAPI(), read_only=True)
def _create_instance(self, params=None):
"""Create a test instance."""
if not params:
params = {}
sys_meta = instance_types.save_instance_type_info(
{}, instance_types.get_instance_type_by_name('m1.tiny'))
inst = {}
inst['image_ref'] = '1'
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['host'] = 'host1'
inst['root_gb'] = 10
inst['ephemeral_gb'] = 20
inst['config_drive'] = 1
inst['kernel_id'] = 2
inst['ramdisk_id'] = 3
inst['config_drive_id'] = 1
inst['key_data'] = 'ABCDEFG'
inst['system_metadata'] = sys_meta
inst.update(params)
return db.instance_create(context.get_admin_context(), inst)
def test_migrate_disk_and_power_off_exception(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off. """
self.counter = 0
self.checked_shared_storage = False
def fake_get_instance_disk_info(instance, xml=None,
block_device_info=None):
return '[]'
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
self.counter += 1
if self.counter == 1:
assert False, "intentional failure"
def fake_os_path_exists(path):
return True
def fake_is_storage_shared(dest, inst_base):
self.checked_shared_storage = True
return False
self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(self.libvirtconnection, '_is_storage_shared_with',
fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
ins_ref = self._create_instance()
self.assertRaises(AssertionError,
self.libvirtconnection.migrate_disk_and_power_off,
None, ins_ref, '10.0.0.2', None, None)
def test_migrate_disk_and_power_off(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off. """
disk_info = [{'type': 'qcow2', 'path': '/test/disk',
'virt_disk_size': '10737418240',
'backing_file': '/base/disk',
'disk_size': '83886080'},
{'type': 'raw', 'path': '/test/disk.local',
'virt_disk_size': '10737418240',
'backing_file': '/base/disk.local',
'disk_size': '83886080'}]
disk_info_text = jsonutils.dumps(disk_info)
def fake_get_instance_disk_info(instance, xml=None,
block_device_info=None):
return disk_info_text
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
pass
self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(utils, 'execute', fake_execute)
ins_ref = self._create_instance()
# dest is different host case
out = self.libvirtconnection.migrate_disk_and_power_off(
None, ins_ref, '10.0.0.2', None, None)
self.assertEquals(out, disk_info_text)
# dest is same host case
out = self.libvirtconnection.migrate_disk_and_power_off(
None, ins_ref, '10.0.0.1', None, None)
self.assertEquals(out, disk_info_text)
def test_wait_for_running(self):
def fake_get_info(instance):
if instance['name'] == "not_found":
raise exception.NotFound
elif instance['name'] == "running":
return {'state': power_state.RUNNING}
else:
return {'state': power_state.SHUTDOWN}
self.stubs.Set(self.libvirtconnection, 'get_info',
fake_get_info)
# instance not found case
self.assertRaises(exception.NotFound,
self.libvirtconnection._wait_for_running,
{'name': 'not_found',
'uuid': 'not_found_uuid'})
# instance is running case
self.assertRaises(utils.LoopingCallDone,
self.libvirtconnection._wait_for_running,
{'name': 'running',
'uuid': 'running_uuid'})
# else case
self.libvirtconnection._wait_for_running({'name': 'else',
'uuid': 'other_uuid'})
def test_finish_migration(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_migration. """
disk_info = [{'type': 'qcow2', 'path': '/test/disk',
'local_gb': 10, 'backing_file': '/base/disk'},
{'type': 'raw', 'path': '/test/disk.local',
'local_gb': 10, 'backing_file': '/base/disk.local'}]
disk_info_text = jsonutils.dumps(disk_info)
def fake_can_resize_fs(path, size, use_cow=False):
return False
def fake_extend(path, size):
pass
def fake_to_xml(instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
return ""
def fake_plug_vifs(instance, network_info):
pass
def fake_create_image(context, inst,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None):
pass
def fake_create_domain(xml, instance=None):
return None
def fake_enable_hairpin(instance):
pass
def fake_execute(*args, **kwargs):
pass
def fake_get_info(instance):
return {'state': power_state.RUNNING}
self.flags(use_cow_images=True)
self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
self.stubs.Set(libvirt_driver.disk, 'can_resize_fs',
fake_can_resize_fs)
self.stubs.Set(self.libvirtconnection, 'to_xml', fake_to_xml)
self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(self.libvirtconnection, '_create_image',
fake_create_image)
self.stubs.Set(self.libvirtconnection, '_create_domain',
fake_create_domain)
self.stubs.Set(self.libvirtconnection, '_enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.libvirtconnection, 'firewall_driver', fw)
self.stubs.Set(self.libvirtconnection, 'get_info',
fake_get_info)
ins_ref = self._create_instance()
self.libvirtconnection.finish_migration(
context.get_admin_context(), None, ins_ref,
disk_info_text, None, None, None)
def test_finish_revert_migration(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_revert_migration. """
def fake_execute(*args, **kwargs):
pass
def fake_plug_vifs(instance, network_info):
pass
def fake_create_domain(xml, instance=None):
return None
def fake_enable_hairpin(instance):
pass
def fake_get_info(instance):
return {'state': power_state.RUNNING}
def fake_to_xml(instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None):
return ""
self.stubs.Set(self.libvirtconnection, 'to_xml', fake_to_xml)
self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.libvirtconnection, 'firewall_driver', fw)
self.stubs.Set(self.libvirtconnection, '_create_domain',
fake_create_domain)
self.stubs.Set(self.libvirtconnection, '_enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(self.libvirtconnection, 'get_info',
fake_get_info)
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
ins_ref = self._create_instance()
os.mkdir(os.path.join(tmpdir, ins_ref['name']))
libvirt_xml_path = os.path.join(tmpdir,
ins_ref['name'],
'libvirt.xml')
f = open(libvirt_xml_path, 'w')
f.close()
self.libvirtconnection.finish_revert_migration(ins_ref, None)
def _test_finish_revert_migration_after_crash(self, backup_made, new_made):
class FakeLoopingCall:
def start(self, *a, **k):
return self
def wait(self):
return None
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(shutil, 'rmtree')
self.mox.StubOutWithMock(utils, 'execute')
self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None)
self.stubs.Set(self.libvirtconnection, 'to_xml', lambda *a, **k: None)
self.stubs.Set(self.libvirtconnection, '_create_domain_and_network',
lambda *a: None)
self.stubs.Set(utils, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
libvirt_utils.get_instance_path({}).AndReturn('/fake/foo')
os.path.exists('/fake/foo_resize').AndReturn(backup_made)
if backup_made:
os.path.exists('/fake/foo').AndReturn(new_made)
if new_made:
shutil.rmtree('/fake/foo')
utils.execute('mv', '/fake/foo_resize', '/fake/foo')
self.mox.ReplayAll()
self.libvirtconnection.finish_revert_migration({}, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(True, False)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(False, False)
def test_cleanup_failed_migration(self):
self.mox.StubOutWithMock(shutil, 'rmtree')
shutil.rmtree('/fake/inst')
self.mox.ReplayAll()
self.libvirtconnection._cleanup_failed_migration('/fake/inst')
def test_confirm_migration(self):
ins_ref = self._create_instance()
self.mox.StubOutWithMock(self.libvirtconnection, "_cleanup_resize")
self.libvirtconnection._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
self.mox.ReplayAll()
self.libvirtconnection.confirm_migration("migration_ref", ins_ref,
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_same_host(self):
ins_ref = self._create_instance({'host': CONF.host})
def fake_os_path_exists(path):
return True
def fake_shutil_rmtree(target):
pass
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.stubs.Set(shutil, 'rmtree', fake_shutil_rmtree)
self.mox.ReplayAll()
self.libvirtconnection._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_not_same_host(self):
host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host})
def fake_os_path_exists(path):
return True
def fake_shutil_rmtree(target):
pass
def fake_undefine_domain(instance):
pass
def fake_unplug_vifs(instance, network_info):
pass
def fake_unfilter_instance(instance, network_info):
pass
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.stubs.Set(shutil, 'rmtree', fake_shutil_rmtree)
self.stubs.Set(self.libvirtconnection, '_undefine_domain',
fake_undefine_domain)
self.stubs.Set(self.libvirtconnection, 'unplug_vifs',
fake_unplug_vifs)
self.stubs.Set(self.libvirtconnection.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.mox.ReplayAll()
self.libvirtconnection._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
def test_get_instance_disk_info_exception(self):
instance_name = "fake-instance-name"
class FakeExceptionDomain(FakeVirtDomain):
def __init__(self):
super(FakeExceptionDomain, self).__init__()
def XMLDesc(self, *args):
raise libvirt.libvirtError("Libvirt error")
def fake_lookup_by_name(instance_name):
return FakeExceptionDomain()
self.stubs.Set(self.libvirtconnection, '_lookup_by_name',
fake_lookup_by_name)
self.assertRaises(exception.InstanceNotFound,
self.libvirtconnection.get_instance_disk_info,
instance_name)
class LibvirtVolumeUsageTestCase(test.TestCase):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver
.get_all_volume_usage"""
def setUp(self):
super(LibvirtVolumeUsageTestCase, self).setUp()
self.conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
# creating instance
inst = {}
inst['uuid'] = '875a8070-d0b9-4949-8b31-104d125c9a64'
self.ins_ref = db.instance_create(self.c, inst)
# verify bootable volume device path also
self.bdms = [{'volume_id': 1,
'device_name': '/dev/vde'},
{'volume_id': 2,
'device_name': 'vda'}]
def test_get_all_volume_usage(self):
def fake_block_stats(instance_name, disk):
return (169L, 688640L, 0L, 0L, -1L)
self.stubs.Set(self.conn, 'block_stats', fake_block_stats)
vol_usage = self.conn.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
expected_usage = [{'volume': 1,
'instance': self.ins_ref,
'rd_bytes': 688640L, 'wr_req': 0L,
'flush_operations': -1L, 'rd_req': 169L,
'wr_bytes': 0L},
{'volume': 2,
'instance': self.ins_ref,
'rd_bytes': 688640L, 'wr_req': 0L,
'flush_operations': -1L, 'rd_req': 169L,
'wr_bytes': 0L}]
self.assertEqual(vol_usage, expected_usage)
def test_get_all_volume_usage_device_not_found(self):
def fake_lookup(instance_name):
raise libvirt.libvirtError('invalid path')
self.stubs.Set(self.conn, '_lookup_by_name', fake_lookup)
vol_usage = self.conn.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
self.assertEqual(vol_usage, [])
class LibvirtNonblockingTestCase(test.TestCase):
"""Test libvirt_nonblocking option."""
def setUp(self):
super(LibvirtNonblockingTestCase, self).setUp()
self.flags(libvirt_nonblocking=True, libvirt_uri="test:///default")
def test_connection_to_primitive(self):
# Test bug 962840.
import nova.virt.libvirt.driver as libvirt_driver
connection = libvirt_driver.LibvirtDriver('')
jsonutils.to_primitive(connection._conn, convert_instances=True)
|
apache-2.0
| -5,483,306,512,135,139,000 | 40.642668 | 79 | 0.541877 | false |
rtrajano/sphinx_rtfd
|
docs/conf.py
|
1
|
8478
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import sphinx_rtfd
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sphinx and RTFD Demo'
copyright = u'2014, Rigel Trajano'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = sphinx_rtfd.__version__
# The full version, including alpha/beta/rc tags.
release = sphinx_rtfd.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinx_rtfddoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'sphinx_rtfd.tex',
u'Sphinx and RTFD Demo Documentation',
u'Rigel Trajano', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sphinx_rtfd',
u'Sphinx and RTFD Demo Documentation',
[u'Rigel Trajano'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sphinx_rtfd',
u'Sphinx and RTFD Demo Documentation',
u'Rigel Trajano',
'sphinx_rtfd',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
bsd-3-clause
| 6,779,916,785,083,622,000 | 29.829091 | 76 | 0.704883 | false |
noba3/KoTos
|
addons/plugin.video.xstream/sites/gstream_in.py
|
1
|
15009
|
# -*- coding: utf-8 -*-
from resources.lib.util import cUtil
from resources.lib.gui.gui import cGui
from resources.lib.gui.guiElement import cGuiElement
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.parser import cParser
from resources.lib.config import cConfig
from resources.lib import logger
from resources.lib.handler.ParameterHandler import ParameterHandler
import hashlib
SITE_IDENTIFIER = 'gstream_in'
SITE_NAME = 'G-Stream'
SITE_ICON = 'gstream.png'
URL_MAIN = 'http://gstream.to'
URL_LOGIN = URL_MAIN + '/login.php'
URL_SHOW_MOVIE = 'http://gstream.to/showthread.php?t='
URL_CATEGORIES = 'http://gstream.to/forumdisplay.php?f='
URL_SEARCH = 'http://gstream.to/search.php'
oConfig = cConfig()
username = oConfig.getSetting('gstream_in-username')
password = oConfig.getSetting('gstream_in-password')
def load():
oGui = cGui()
sSecurityValue = __getSecurityCookieValue()
__login()
__createMainMenuEntry(oGui, 'Aktuelle KinoFilme', 542, sSecurityValue)
oGui.addFolder(cGuiElement('HD Filme',SITE_IDENTIFIER,'showHDMovies'))
__createMainMenuEntry(oGui, 'Action', 591, sSecurityValue)
__createMainMenuEntry(oGui, 'Horror', 593, sSecurityValue)
__createMainMenuEntry(oGui, 'Komoedie', 592, sSecurityValue)
__createMainMenuEntry(oGui, 'Thriller', 595, sSecurityValue)
__createMainMenuEntry(oGui, 'Drama', 594, sSecurityValue)
__createMainMenuEntry(oGui, 'Fantasy', 655, sSecurityValue)
__createMainMenuEntry(oGui, 'Abenteuer', 596, sSecurityValue)
__createMainMenuEntry(oGui, 'Animation', 677, sSecurityValue)
__createMainMenuEntry(oGui, 'Dokumentation', 751, sSecurityValue)
#__createMainMenuEntry(oGui, 'Serien', 543, sSecurityValue)
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('displaySearch')
oGuiElement.setTitle('Suche Filme')
params = ParameterHandler()
params.setParam('securityCookie', sSecurityValue)
params.setParam('searchType', '528')
oGui.addFolder(oGuiElement, params)
# Serien parsing nicht implementiert
#oGuiElement = cGuiElement()
#oGuiElement.setSiteName(SITE_IDENTIFIER)
#oGuiElement.setFunction('displaySearch')
#oGuiElement.setTitle('Suche Serien')
#params.setParam('searchType', '532')
#oGui.addFolder(oGuiElement, params)
if showAdult():
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('showXXX')
oGuiElement.setTitle('XXX')
oGui.addFolder(oGuiElement, params)
oGui.setEndOfDirectory()
def __login():
hPassword = hashlib.md5(password).hexdigest()
oRequest = cRequestHandler(URL_LOGIN)
oRequest.addParameters('vb_login_username', username)
oRequest.addParameters('vb_login_password', '')
oRequest.addParameters('s', '')
oRequest.addParameters('do', 'login')
oRequest.addParameters('vb_login_md5password', hPassword)
oRequest.addParameters('vb_login_md5password_utf', hPassword)
oRequest.ignoreDiscard(True)
oRequest.request()
# needed to add this, so other sites doesn't delete the cookie in global search
# alternatively we could call login in showHoster, but this would generate more login requests...
cookie = oRequest.getCookie("bbsessionhash")
if cookie:
cookie.discard = False
oRequest.setCookie(cookie)
def __createMainMenuEntry(oGui, sMenuName, iCategoryId, sSecurityValue=''):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setTitle(sMenuName)
oGuiElement.setFunction('parseMovieResultSite')
params = ParameterHandler()
params.setParam('normalySiteUrl', URL_CATEGORIES + str(iCategoryId) + '&order=desc&page=')
params.setParam('siteUrl', URL_CATEGORIES + str(iCategoryId) + '&order=desc&page=1')
params.setParam('iPage', 1)
params.setParam('securityCookie', sSecurityValue)
oGui.addFolder(oGuiElement, params)
def __getSecurityCookieValue():
oRequest = cRequestHandler(URL_MAIN, False, True)
oRequest.ignoreDiscard(True)
sHtmlContent = oRequest.request()
header = oRequest.getResponseHeader()
sPattern = '>DDoS protection by CloudFlare<'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if not aResult[0]:
logger.info('No known protection found')
return ''
logger.info('CF DDos protection active')
#Challengeform suchen
sPattern = ('a\.value = ([0-9\*\+\-]+);.*?<form id="challenge-form" action="([^"]+)".*?'
'name="([^"]+)" value="([^"]+)".*?name="([^"]+)"/>.*?</form>')
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if not aResult[0]:
logger.info('ChallengeForm not found')
return False
aResult = aResult[1][0]
constant = len(oRequest.getRealUrl().split('/')[2])
exp = aResult[0]
url = aResult[1]
valueName1 = aResult[2]
value1 = aResult[3]
valueName2 = aResult[4]
value2 = str(eval(exp)+constant)
url = '%s%s?%s=%s&%s=%s' % (URL_MAIN, url, valueName1, value1, valueName2, value2)
oRequest = cRequestHandler(url, caching = False, ignoreErrors = True)
oRequest.addHeaderEntry('Host', 'gstream.to')
oRequest.addHeaderEntry('Referer', URL_MAIN)
oRequest.addHeaderEntry('Connection', 'keep-alive')
oRequest.addHeaderEntry('DNT', '1')
oRequest.ignoreDiscard(True)
sHtmlContent = oRequest.request()
return True
def __getHtmlContent(sUrl = None, sSecurityValue=None):
params = ParameterHandler()
# Test if a url is available and set it
if sUrl is None and not params.exist('siteUrl'):
logger.info("There is no url we can request.")
return False
else:
if sUrl is None:
sUrl = params.getValue('siteUrl')
# Test if a security value is available
if sSecurityValue is None:
if params.exist('securityCookie'):
sSecurityValue = params.getValue('securityCookie')
else :
sSecurityValue = ''
# Make the request
oRequest = cRequestHandler(sUrl)
#oRequest.addHeaderEntry('Cookie', sSecurityValue)
#oRequest.addHeaderEntry('Accept', '*/*')
#oRequest.addHeaderEntry('Host', 'gstream.to')
oRequest.ignoreDiscard(True)
return oRequest.request()
def showXXX():
params = ParameterHandler()
oGui = cGui()
__createMainMenuEntry(oGui, 'Alle Pornos', 661)
#im Moment können keine Clips abgespielt werden da die Cliphoster nicht aufgelöst werden können
#__createMainMenuEntry(oGui, 'Clips', 669, sSecurityValue)
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('displaySearch')
oGuiElement.setTitle('Suche XXX Streams')
params.setParam('searchType', '530')
oGui.addFolder(oGuiElement, params)
__createMainMenuEntry(oGui, 'Amateure', '661&prefixid=Amateure1')
__createMainMenuEntry(oGui, 'Anal', '661&prefixid=Anal')
__createMainMenuEntry(oGui, 'Asia', '661&prefixid=Asia')
__createMainMenuEntry(oGui, 'Black', '661&prefixid=Ebony')
__createMainMenuEntry(oGui, 'Blowjob', '661&prefixid=Blowjob')
__createMainMenuEntry(oGui, 'Deutsch', '661&prefixid=Deutsch')
__createMainMenuEntry(oGui, 'Fetish', '661&prefixid=Fetish')
__createMainMenuEntry(oGui, 'Große Brüste', '661&prefixid=GrosseBrueste')
__createMainMenuEntry(oGui, 'Gruppensex', '661&prefixid=Gruppensex')
__createMainMenuEntry(oGui, 'Gay', '661&prefixid=Gay')
__createMainMenuEntry(oGui, 'Hardcore', '661&prefixid=Hardcore')
__createMainMenuEntry(oGui, 'International', '661&prefixid=International')
__createMainMenuEntry(oGui, 'Lesben', '661&prefixid=Lesben')
__createMainMenuEntry(oGui, 'Masturbation', '661&prefixid=Masturbation')
__createMainMenuEntry(oGui, 'Teens', '661&prefixid=Teens')
oGui.setEndOfDirectory()
def showHDMovies():
oGui = cGui()
sUrl = 'http://gstream.to/search.php?do=process&prefixchoice[]=hd'
oRequest = cRequestHandler(sUrl, caching = False)
oRequest.ignoreDiscard(True)
oRequest.request()
sUrl = oRequest.getRealUrl()
__parseMovieResultSite(oGui, sUrl)
oGui.setEndOfDirectory()
def displaySearch():
oGui = cGui()
sSearchText = oGui.showKeyBoard()
if (sSearchText != False):
_search(oGui, sSearchText)
else:
return
oGui.setEndOfDirectory()
def _search(oGui, sSearchText):
__login()
params = ParameterHandler()
sSearchType = params.getValue('searchType')
if not sSearchType:
sSearchType = '528'
sUrl = URL_SEARCH+'?do=process&childforums=1&do=process&exactname=1&forumchoice[]='+sSearchType+\
'&query=' + str(sSearchText) + '&quicksearch=1&s=&securitytoken=guest&titleonly=1'
oRequest = cRequestHandler(sUrl, caching = False)
oRequest.ignoreDiscard(True)
oRequest.request()
sUrl = oRequest.getRealUrl()
__parseMovieResultSite(oGui, sUrl)
def parseMovieResultSite():
oGui = cGui()
params = ParameterHandler()
if (params.exist('siteUrl')):
siteUrl = params.getValue('siteUrl')
normalySiteUrl = params.getValue('normalySiteUrl')
iPage = params.getValue('iPage')
__parseMovieResultSite(oGui, siteUrl, normalySiteUrl, iPage)
oGui.setEndOfDirectory()
def __parseMovieResultSite(oGui, siteUrl, normalySiteUrl = '', iPage = 1):
if not normalySiteUrl:
normalySiteUrl = siteUrl+'&page='
params = ParameterHandler()
sPattern = 'class="p1".*?<img class="large" src="(http://[^"]+)".*?<a href="[^"]+" id=".*?([^"_]+)"(.*?)>([^<]+)</a>(.*?)</tr>'
#sPattern = 'class="alt1Active".*?<a href="(forumdisplay.php[^"]+)".*?>([^<]+)<.*?(src="([^"]+)|</td>).*?</tr>' #Serien
# request
sHtmlContent = __getHtmlContent(sUrl = siteUrl)
# parse content
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
return
total = len(aResult[1])
for img, link, hdS, title, yearS in aResult[1]:
sMovieTitle = title.replace('&','&')
sTitle = sMovieTitle
sUrl = URL_SHOW_MOVIE + str(link)
year = ''
aResult = oParser.parse(yearS, ' ([0-9]{4}) -')
if aResult[0]:
year = aResult[1][0]
aResult = oParser.parse(hdS, '(title="HD Quali")')
if aResult[0]:
sTitle = sTitle + ' [HD]'
oGuiElement = cGuiElement(sTitle,SITE_IDENTIFIER,'getHosters')
oGuiElement.setMediaType('movie')
oGuiElement.setYear(year)
oGuiElement.setThumbnail(img)
params.setParam('movieUrl', sUrl)
params.setParam('sMovieTitle', sMovieTitle)
oGui.addFolder(oGuiElement, params, bIsFolder = False, iTotal = total)
# check for next site
iTotalPages = __getTotalPages(iPage, sHtmlContent)
if (iTotalPages >= int(iPage)+1):
params = ParameterHandler()
params.setParam('iPage', int(iPage)+1)
params.setParam('normalySiteUrl', normalySiteUrl)
params.setParam('siteUrl', normalySiteUrl+str(int(iPage)+1))
oGui.addNextPage(SITE_IDENTIFIER,'parseMovieResultSite', params, iTotalPages)
if iTotalPages > 1:
oGuiElement = cGuiElement('Go to page x of '+str(iTotalPages),SITE_IDENTIFIER,'gotoPage')
params = ParameterHandler()
oGui.addFolder(oGuiElement, params)
oGui.setView('movies')
def gotoPage():
oGui = cGui()
pageNum = oGui.showNumpad()
if not pageNum:
return
params = ParameterHandler()
siteUrl = params.getValue('normalySiteUrl')+pageNum
__parseMovieResultSite(oGui, siteUrl, iPage = int(pageNum))
oGui.setEndOfDirectory()
def __getTotalPages(iPage, sHtml):
sPattern = '>Seite [0-9]+ von ([0-9]+)<'
oParser = cParser()
aResult = oParser.parse(sHtml, sPattern)
if (aResult[0] == True):
iTotalCount = int(aResult[1][0])
return iTotalCount
return 0
def __createDisplayStart(iPage):
return (20 * int(iPage)) - 20
def __createInfo(oGui, sHtmlContent):
sPattern = '<td class="alt1" id="td_post_.*?<img src="([^"]+)".*?<b>Inhalt:</b>(.*?)<br />'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
for aEntry in aResult[1]:
sThumbnail = str(aEntry[0])
sDescription = cUtil().removeHtmlTags(str(aEntry[1])).replace('\t', '').strip()
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setTitle('info (press Info Button)')
oGuiElement.setThumbnail(sThumbnail)
oGuiElement.setFunction('dummyFolder')
oGuiElement.setDescription(sDescription)
oGui.addFolder(oGuiElement)
def showAdult():
oConfig = cConfig()
if oConfig.getSetting('showAdult')=='true':
return True
return False
def dummyFolder():
oGui = cGui()
oGui.setEndOfDirectory()
#### Hosterhandling
def getHosters():
hosters = []
params = ParameterHandler()
if (params.exist('movieUrl') and params.exist('sMovieTitle')):
sSiteUrl = params.getValue('movieUrl')
sMovieTitle = params.getValue('sMovieTitle')
sHtmlContent = __getHtmlContent(sUrl = sSiteUrl)
sPattern = 'id="ame_noshow_post.*?<a href="([^"]+)" title="[^"]+" target="_blank">([^<]+)</a>'
aResult = cParser().parse(sHtmlContent, sPattern)
if aResult[0] == True:
for aEntry in aResult[1]:
sUrl = aEntry[0]
# extract hoster domainname
if 'gstream.to/secure/' in sUrl :
sHoster = sUrl.split('secure/')[-1].split('/')[0].split('.')[-2]
else:
sHoster = sUrl.split('//')[-1].split('/')[0].split('.')[-2]
hoster = {}
hoster['link'] = sUrl
hoster['name'] = sHoster
hosters.append(hoster)
hosters.append('getHosterUrl')
return hosters
def getHosterUrl(sUrl = False):
params = ParameterHandler()
if not sUrl:
sUrl = params.getValue('url')
results = []
if 'gstream.to/secure/' in sUrl :
sHoster = sUrl.split('secure/')[-1].split('/')[0]
oRequest = cRequestHandler(sUrl, False)
oRequest.addHeaderEntry('Cookie', params.getValue('securityCookie'))
oRequest.addHeaderEntry('Referer', params.getValue('movieUrl'))
oRequest.ignoreDiscard(True)
try:
oRequest.request()
sUrl = oRequest.getRealUrl()
sUrl = 'http://%s%s' % (sHoster, sUrl.split(sHoster)[-1])
except:
pass
result = {}
result['streamUrl'] = sUrl
result['resolved'] = False
results.append(result)
return results
|
gpl-2.0
| -2,172,259,095,798,689,000 | 37.469231 | 131 | 0.650247 | false |
rokihi/ObjectDetector
|
YOLO_tiny_tf.py
|
1
|
9863
|
import numpy as np
import tensorflow as tf
import cv2
import time
import sys
class YOLO_TF:
fromfile = None
tofile_img = 'test/output.jpg'
tofile_txt = 'test/output.txt'
imshow = True
filewrite_img = False
filewrite_txt = False
disp_console = True
weights_file = 'weights/YOLO_tiny.ckpt'
alpha = 0.1
threshold = 0.1
iou_threshold = 0.5
num_class = 20
num_box = 2
grid_size = 7
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train","tvmonitor"]
w_img = 640
h_img = 480
def __init__(self,argvs = []):
self.argv_parser(argvs)
self.build_networks()
if self.fromfile is not None: self.detect_from_file(self.fromfile)
def argv_parser(self,argvs):
for i in range(1,len(argvs),2):
if argvs[i] == '-fromfile' : self.fromfile = argvs[i+1]
if argvs[i] == '-tofile_img' : self.tofile_img = argvs[i+1] ; self.filewrite_img = True
if argvs[i] == '-tofile_txt' : self.tofile_txt = argvs[i+1] ; self.filewrite_txt = True
if argvs[i] == '-imshow' :
if argvs[i+1] == '1' :self.imshow = True
else : self.imshow = False
if argvs[i] == '-disp_console' :
if argvs[i+1] == '1' :self.disp_console = True
else : self.disp_console = False
def build_networks(self):
if self.disp_console : print "Building YOLO_tiny graph..."
self.x = tf.placeholder('float32',[None,448,448,3])
self.conv_1 = self.conv_layer(1,self.x,16,3,1)
self.pool_2 = self.pooling_layer(2,self.conv_1,2,2)
self.conv_3 = self.conv_layer(3,self.pool_2,32,3,1)
self.pool_4 = self.pooling_layer(4,self.conv_3,2,2)
self.conv_5 = self.conv_layer(5,self.pool_4,64,3,1)
self.pool_6 = self.pooling_layer(6,self.conv_5,2,2)
self.conv_7 = self.conv_layer(7,self.pool_6,128,3,1)
self.pool_8 = self.pooling_layer(8,self.conv_7,2,2)
self.conv_9 = self.conv_layer(9,self.pool_8,256,3,1)
self.pool_10 = self.pooling_layer(10,self.conv_9,2,2)
self.conv_11 = self.conv_layer(11,self.pool_10,512,3,1)
self.pool_12 = self.pooling_layer(12,self.conv_11,2,2)
self.conv_13 = self.conv_layer(13,self.pool_12,1024,3,1)
self.conv_14 = self.conv_layer(14,self.conv_13,1024,3,1)
self.conv_15 = self.conv_layer(15,self.conv_14,1024,3,1)
self.fc_16 = self.fc_layer(16,self.conv_15,256,flat=True,linear=False)
self.fc_17 = self.fc_layer(17,self.fc_16,4096,flat=False,linear=False)
#skip dropout_18
self.fc_19 = self.fc_layer(19,self.fc_17,1470,flat=False,linear=True)
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
self.saver = tf.train.Saver()
self.saver.restore(self.sess,self.weights_file)
if self.disp_console : print "Loading complete!" + '\n'
def conv_layer(self,idx,inputs,filters,size,stride):
channels = inputs.get_shape()[3]
weight = tf.Variable(tf.truncated_normal([size,size,int(channels),filters], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[filters]))
pad_size = size//2
pad_mat = np.array([[0,0],[pad_size,pad_size],[pad_size,pad_size],[0,0]])
inputs_pad = tf.pad(inputs,pad_mat)
conv = tf.nn.conv2d(inputs_pad, weight, strides=[1, stride, stride, 1], padding='VALID',name=str(idx)+'_conv')
conv_biased = tf.add(conv,biases,name=str(idx)+'_conv_biased')
if self.disp_console : print ' Layer %d : Type = Conv, Size = %d * %d, Stride = %d, Filters = %d, Input channels = %d' % (idx,size,size,stride,filters,int(channels))
return tf.maximum(self.alpha*conv_biased,conv_biased,name=str(idx)+'_leaky_relu')
def pooling_layer(self,idx,inputs,size,stride):
if self.disp_console : print ' Layer %d : Type = Pool, Size = %d * %d, Stride = %d' % (idx,size,size,stride)
return tf.nn.max_pool(inputs, ksize=[1, size, size, 1],strides=[1, stride, stride, 1], padding='SAME',name=str(idx)+'_pool')
def fc_layer(self,idx,inputs,hiddens,flat = False,linear = False):
input_shape = inputs.get_shape().as_list()
if flat:
dim = input_shape[1]*input_shape[2]*input_shape[3]
inputs_transposed = tf.transpose(inputs,(0,3,1,2))
inputs_processed = tf.reshape(inputs_transposed, [-1,dim])
else:
dim = input_shape[1]
inputs_processed = inputs
weight = tf.Variable(tf.truncated_normal([dim,hiddens], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[hiddens]))
if self.disp_console : print ' Layer %d : Type = Full, Hidden = %d, Input dimension = %d, Flat = %d, Activation = %d' % (idx,hiddens,int(dim),int(flat),1-int(linear))
if linear : return tf.add(tf.matmul(inputs_processed,weight),biases,name=str(idx)+'_fc')
ip = tf.add(tf.matmul(inputs_processed,weight),biases)
return tf.maximum(self.alpha*ip,ip,name=str(idx)+'_fc')
def detect_from_cvmat(self,img):
s = time.time()
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_19,feed_dict=in_dict)
self.result = self.interpret_output(net_output[0])
self.show_results(img,self.result)
strtime = str(time.time()-s)
if self.disp_console : print 'Elapsed time : ' + strtime + ' secs' + '\n'
def detect_from_file(self,filename):
if self.disp_console : print 'Detect from ' + filename
img = cv2.imread(filename)
#img = misc.imread(filename)
self.detect_from_cvmat(img)
def detect_from_crop_sample(self):
self.w_img = 640
self.h_img = 420
f = np.array(open('person_crop.txt','r').readlines(),dtype='float32')
inputs = np.zeros((1,448,448,3),dtype='float32')
for c in range(3):
for y in range(448):
for x in range(448):
inputs[0,y,x,c] = f[c*448*448+y*448+x]
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_19,feed_dict=in_dict)
self.boxes, self.probs = self.interpret_output(net_output[0])
img = cv2.imread('person.jpg')
self.show_results(self.boxes,img)
def interpret_output(self,output):
probs = np.zeros((7,7,2,20))
class_probs = np.reshape(output[0:980],(7,7,20))
scales = np.reshape(output[980:1078],(7,7,2))
boxes = np.reshape(output[1078:],(7,7,2,4))
offset = np.transpose(np.reshape(np.array([np.arange(7)]*14),(2,7,7)),(1,2,0))
boxes[:,:,:,0] += offset
boxes[:,:,:,1] += np.transpose(offset,(1,0,2))
boxes[:,:,:,0:2] = boxes[:,:,:,0:2] / 7.0
boxes[:,:,:,2] = np.multiply(boxes[:,:,:,2],boxes[:,:,:,2])
boxes[:,:,:,3] = np.multiply(boxes[:,:,:,3],boxes[:,:,:,3])
boxes[:,:,:,0] *= self.w_img
boxes[:,:,:,1] *= self.h_img
boxes[:,:,:,2] *= self.w_img
boxes[:,:,:,3] *= self.h_img
for i in range(2):
for j in range(20):
probs[:,:,i,j] = np.multiply(class_probs[:,:,j],scales[:,:,i])
filter_mat_probs = np.array(probs>=self.threshold,dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(filter_mat_probs,axis=3)[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0 : continue
for j in range(i+1,len(boxes_filtered)):
if self.iou(boxes_filtered[i],boxes_filtered[j]) > self.iou_threshold :
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered>0.0,dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
probs_filtered = probs_filtered[filter_iou]
classes_num_filtered = classes_num_filtered[filter_iou]
result = []
for i in range(len(boxes_filtered)):
result.append([self.classes[classes_num_filtered[i]],boxes_filtered[i][0],boxes_filtered[i][1],boxes_filtered[i][2],boxes_filtered[i][3],probs_filtered[i]])
return result
def show_results(self,img,results):
img_cp = img.copy()
if self.filewrite_txt :
ftxt = open(self.tofile_txt,'w')
for i in range(len(results)):
x = int(results[i][1])
y = int(results[i][2])
w = int(results[i][3])//2
h = int(results[i][4])//2
if self.disp_console : print ' class : ' + results[i][0] + ' , [x,y,w,h]=[' + str(x) + ',' + str(y) + ',' + str(int(results[i][3])) + ',' + str(int(results[i][4]))+'], Confidence = ' + str(results[i][5])
if self.filewrite_img or self.imshow:
cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
cv2.putText(img_cp,results[i][0] + ' : %.2f' % results[i][5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
if self.filewrite_txt :
ftxt.write(results[i][0] + ',' + str(x) + ',' + str(y) + ',' + str(w) + ',' + str(h)+',' + str(results[i][5]) + '\n')
if self.filewrite_img :
if self.disp_console : print ' image file writed : ' + self.tofile_img
cv2.imwrite(self.tofile_img,img_cp)
if self.imshow :
cv2.imshow('YOLO_tiny detection',img_cp)
cv2.waitKey(1)
if self.filewrite_txt :
if self.disp_console : print ' txt file writed : ' + self.tofile_txt
ftxt.close()
def iou(self,box1,box2):
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb < 0 or lr < 0 : intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
def training(self): #TODO add training function!
return None
def main(argvs):
yolo = YOLO_TF(argvs)
cv2.waitKey(1000)
if __name__=='__main__':
main(sys.argv)
|
lgpl-3.0
| -605,665,277,583,583,900 | 40.616034 | 209 | 0.645848 | false |
lyan6/genenetwork2
|
wqflask/wqflask/views.py
|
1
|
30443
|
# -*- coding: utf-8 -*-
#
# Main routing table for GN2
from __future__ import absolute_import, division, print_function
import traceback # for error page
import os # for error gifs
import random # for random error gif
import datetime # for errors
import time # for errors
import sys
import csv
import xlsxwriter
import StringIO # Todo: Use cStringIO?
import gc
import cPickle as pickle
import uuid
import simplejson as json
import yaml
#Switching from Redis to StrictRedis; might cause some issues
import redis
Redis = redis.StrictRedis()
import flask
import base64
import array
import sqlalchemy
from wqflask import app
from flask import g, Response, request, make_response, render_template, send_from_directory, jsonify, redirect
from wqflask import search_results
from wqflask import export_traits
from wqflask import gsearch
from wqflask import update_search_results
from wqflask import docs
from wqflask import news
from wqflask.submit_bnw import get_bnw_input
from base.data_set import DataSet # Used by YAML in marker_regression
from wqflask.show_trait import show_trait
from wqflask.show_trait import export_trait_data
from wqflask.heatmap import heatmap
from wqflask.marker_regression import marker_regression
from wqflask.marker_regression import marker_regression_gn1
from wqflask.network_graph import network_graph
from wqflask.correlation import show_corr_results
from wqflask.correlation_matrix import show_corr_matrix
from wqflask.correlation import corr_scatter_plot
from wqflask.wgcna import wgcna_analysis
from wqflask.ctl import ctl_analysis
#from wqflask.trait_submission import submit_trait
from utility import temp_data
from utility.tools import SQL_URI,TEMPDIR,USE_REDIS,USE_GN_SERVER,GN_SERVER_URL,GN_VERSION,JS_TWITTER_POST_FETCHER_PATH
from utility.helper_functions import get_species_groups
from base import webqtlFormData
from base.webqtlConfig import GENERATED_IMAGE_DIR
from utility.benchmark import Bench
from pprint import pformat as pf
from wqflask import user_manager
from wqflask import collect
from wqflask.database import db_session
import werkzeug
import utility.logger
logger = utility.logger.getLogger(__name__ )
@app.before_request
def connect_db():
db = getattr(g, '_database', None)
if db is None:
logger.debug("Get new database connector")
g.db = g._database = sqlalchemy.create_engine(SQL_URI)
logger.debug(g.db)
@app.teardown_appcontext
def shutdown_session(exception=None):
db = getattr(g, '_database', None)
if db is not None:
logger.debug("remove db_session")
db_session.remove()
g.db = None
#@app.before_request
#def trace_it():
# from wqflask import tracer
# tracer.turn_on()
@app.errorhandler(Exception)
def handle_bad_request(e):
err_msg = str(e)
logger.error(err_msg)
logger.error(request.url)
# get the stack trace and send it to the logger
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(traceback.format_exc())
now = datetime.datetime.utcnow()
time_str = now.strftime('%l:%M%p UTC %b %d, %Y')
formatted_lines = [request.url + " ("+time_str+")"]+traceback.format_exc().splitlines()
# Handle random animations
# Use a cookie to have one animation on refresh
animation = request.cookies.get(err_msg[:32])
if not animation:
list = [fn for fn in os.listdir("./wqflask/static/gif/error") if fn.endswith(".gif") ]
animation = random.choice(list)
resp = make_response(render_template("error.html",message=err_msg,stack=formatted_lines,error_image=animation,version=GN_VERSION))
# logger.error("Set cookie %s with %s" % (err_msg, animation))
resp.set_cookie(err_msg[:32],animation)
return resp
@app.route("/")
def index_page():
logger.info("Sending index_page")
logger.error(request.url)
params = request.args
if 'import_collections' in params:
import_collections = params['import_collections']
if import_collections == "true":
g.cookie_session.import_traits_to_user()
if USE_GN_SERVER:
# The menu is generated using GN_SERVER
return render_template("index_page.html", gn_server_url = GN_SERVER_URL, version=GN_VERSION)
else:
# Old style static menu (OBSOLETE)
return render_template("index_page_orig.html", version=GN_VERSION)
@app.route("/tmp/<img_path>")
def tmp_page(img_path):
logger.info("In tmp_page")
logger.info("img_path:", img_path)
logger.error(request.url)
initial_start_vars = request.form
logger.info("initial_start_vars:", initial_start_vars)
imgfile = open(GENERATED_IMAGE_DIR + img_path, 'rb')
imgdata = imgfile.read()
imgB64 = imgdata.encode("base64")
bytesarray = array.array('B', imgB64)
return render_template("show_image.html",
img_base64 = bytesarray )
@app.route("/twitter/<path:filename>")
def twitter(filename):
return send_from_directory(JS_TWITTER_POST_FETCHER_PATH, filename)
#@app.route("/data_sharing")
#def data_sharing_page():
# logger.info("In data_sharing")
# fd = webqtlFormData.webqtlFormData(request.args)
# logger.info("1Have fd")
# sharingInfoObject = SharingInfo.SharingInfo(request.args['GN_AccessionId'], None)
# info, htmlfilelist = sharingInfoObject.getBody(infoupdate="")
# logger.info("type(htmlfilelist):", type(htmlfilelist))
# htmlfilelist = htmlfilelist.encode("utf-8")
# #template_vars = SharingInfo.SharingInfo(request.args['GN_AccessionId'], None)
# logger.info("1 Made it to rendering")
# return render_template("data_sharing.html",
# info=info,
# htmlfilelist=htmlfilelist)
@app.route("/search", methods=('GET',))
def search_page():
logger.info("in search_page")
logger.error(request.url)
if 'info_database' in request.args:
logger.info("Going to sharing_info_page")
template_vars = sharing_info_page()
if template_vars.redirect_url:
logger.info("Going to redirect")
return flask.redirect(template_vars.redirect_url)
else:
return render_template("data_sharing.html", **template_vars.__dict__)
else:
result = None
if USE_REDIS:
with Bench("Trying Redis cache"):
key = "search_results:v1:" + json.dumps(request.args, sort_keys=True)
logger.debug("key is:", pf(key))
result = Redis.get(key)
if result:
logger.info("Redis cache hit on search results!")
result = pickle.loads(result)
else:
logger.info("Skipping Redis cache (USE_REDIS=False)")
logger.info("request.args is", request.args)
the_search = search_results.SearchResultPage(request.args)
result = the_search.__dict__
logger.debugf("result", result)
if USE_REDIS:
Redis.set(key, pickle.dumps(result, pickle.HIGHEST_PROTOCOL))
Redis.expire(key, 60*60)
if result['search_term_exists']:
return render_template("search_result_page.html", **result)
else:
return render_template("search_error.html")
@app.route("/gsearch", methods=('GET',))
def gsearchact():
logger.error(request.url)
result = gsearch.GSearch(request.args).__dict__
type = request.args['type']
if type == "gene":
return render_template("gsearch_gene.html", **result)
elif type == "phenotype":
return render_template("gsearch_pheno.html", **result)
@app.route("/gsearch_updating", methods=('POST',))
def gsearch_updating():
logger.info("REQUEST ARGS:", request.values)
logger.error(request.url)
result = update_search_results.GSearch(request.args).__dict__
return result['results']
# type = request.args['type']
# if type == "gene":
# return render_template("gsearch_gene_updating.html", **result)
# elif type == "phenotype":
# return render_template("gsearch_pheno.html", **result)
@app.route("/docedit")
def docedit():
logger.error(request.url)
doc = docs.Docs(request.args['entry'])
return render_template("docedit.html", **doc.__dict__)
@app.route('/generated/<filename>')
def generated_file(filename):
logger.error(request.url)
return send_from_directory(GENERATED_IMAGE_DIR,filename)
@app.route("/help")
def help():
logger.error(request.url)
doc = docs.Docs("help")
return render_template("docs.html", **doc.__dict__)
@app.route("/wgcna_setup", methods=('POST',))
def wcgna_setup():
logger.info("In wgcna, request.form is:", request.form) # We are going to get additional user input for the analysis
logger.error(request.url)
return render_template("wgcna_setup.html", **request.form) # Display them using the template
@app.route("/wgcna_results", methods=('POST',))
def wcgna_results():
logger.info("In wgcna, request.form is:", request.form)
logger.error(request.url)
wgcna = wgcna_analysis.WGCNA() # Start R, load the package and pointers and create the analysis
wgcnaA = wgcna.run_analysis(request.form) # Start the analysis, a wgcnaA object should be a separate long running thread
result = wgcna.process_results(wgcnaA) # After the analysis is finished store the result
return render_template("wgcna_results.html", **result) # Display them using the template
@app.route("/ctl_setup", methods=('POST',))
def ctl_setup():
logger.info("In ctl, request.form is:", request.form) # We are going to get additional user input for the analysis
logger.error(request.url)
return render_template("ctl_setup.html", **request.form) # Display them using the template
@app.route("/ctl_results", methods=('POST',))
def ctl_results():
logger.info("In ctl, request.form is:", request.form)
logger.error(request.url)
ctl = ctl_analysis.CTL() # Start R, load the package and pointers and create the analysis
ctlA = ctl.run_analysis(request.form) # Start the analysis, a ctlA object should be a separate long running thread
result = ctl.process_results(ctlA) # After the analysis is finished store the result
return render_template("ctl_results.html", **result) # Display them using the template
@app.route("/news")
def news_route():
newsobject = news.News()
return render_template("news.html", **newsobject.__dict__)
@app.route("/references")
def references():
# doc = docs.Docs("references")
# return render_template("docs.html", **doc.__dict__)
return render_template("reference.html")
@app.route("/intro")
def intro():
doc = docs.Docs("intro")
return render_template("docs.html", **doc.__dict__)
@app.route("/policies")
def policies():
doc = docs.Docs("policies")
return render_template("docs.html", **doc.__dict__)
@app.route("/links")
def links():
doc = docs.Docs("links")
return render_template("docs.html", **doc.__dict__)
@app.route("/environments")
def environments():
doc = docs.Docs("environments")
return render_template("docs.html", **doc.__dict__)
@app.route("/submit_trait")
def submit_trait_form():
logger.error(request.url)
species_and_groups = get_species_groups()
return render_template("submit_trait.html", **{'species_and_groups' : species_and_groups, 'gn_server_url' : GN_SERVER_URL, 'version' : GN_VERSION})
@app.route("/create_temp_trait", methods=('POST',))
def create_temp_trait():
logger.error(request.url)
print("REQUEST.FORM:", request.form)
#template_vars = submit_trait.SubmitTrait(request.form)
doc = docs.Docs("links")
return render_template("links.html", **doc.__dict__)
#return render_template("show_trait.html", **template_vars.__dict__)
@app.route('/export_trait_excel', methods=('POST',))
def export_trait_excel():
"""Excel file consisting of the sample data from the trait data and analysis page"""
logger.info("In export_trait_excel")
logger.info("request.form:", request.form)
logger.error(request.url)
sample_data = export_trait_data.export_sample_table(request.form)
logger.info("sample_data - type: %s -- size: %s" % (type(sample_data), len(sample_data)))
buff = StringIO.StringIO()
workbook = xlsxwriter.Workbook(buff, {'in_memory': True})
worksheet = workbook.add_worksheet()
for i, row in enumerate(sample_data):
worksheet.write(i, 0, row[0])
worksheet.write(i, 1, row[1])
if len(row) > 2:
worksheet.write(i, 2, row[2])
workbook.close()
excel_data = buff.getvalue()
buff.close()
return Response(excel_data,
mimetype='application/vnd.ms-excel',
headers={"Content-Disposition":"attachment;filename=sample_data.xlsx"})
@app.route('/export_trait_csv', methods=('POST',))
def export_trait_csv():
"""CSV file consisting of the sample data from the trait data and analysis page"""
logger.info("In export_trait_csv")
logger.info("request.form:", request.form)
logger.error(request.url)
sample_data = export_trait_data.export_sample_table(request.form)
logger.info("sample_data - type: %s -- size: %s" % (type(sample_data), len(sample_data)))
buff = StringIO.StringIO()
writer = csv.writer(buff)
for row in sample_data:
writer.writerow(row)
csv_data = buff.getvalue()
buff.close()
return Response(csv_data,
mimetype='text/csv',
headers={"Content-Disposition":"attachment;filename=sample_data.csv"})
@app.route('/export_traits_csv', methods=('POST',))
def export_traits_csv():
"""CSV file consisting of the traits from the search result page"""
logger.info("In export_traits_csv")
logger.info("request.form:", request.form)
logger.error(request.url)
csv_data = export_traits.export_search_results_csv(request.form)
return Response(csv_data,
mimetype='text/csv',
headers={"Content-Disposition":"attachment;filename=trait_list.csv"})
@app.route('/export_perm_data', methods=('POST',))
def export_perm_data():
"""CSV file consisting of the permutation data for the mapping results"""
logger.error(request.url)
num_perm = float(request.form['num_perm'])
perm_data = json.loads(request.form['perm_results'])
buff = StringIO.StringIO()
writer = csv.writer(buff)
writer.writerow(["Suggestive LRS (p=0.63) = " + str(perm_data[int(num_perm*0.37-1)])])
writer.writerow(["Significant LRS (p=0.05) = " + str(perm_data[int(num_perm*0.95-1)])])
writer.writerow(["Highly Significant LRS (p=0.01) = " + str(perm_data[int(num_perm*0.99-1)])])
writer.writerow("")
writer.writerow([str(num_perm) + " Permutations"])
writer.writerow("")
for item in perm_data:
writer.writerow([item])
csv_data = buff.getvalue()
buff.close()
return Response(csv_data,
mimetype='text/csv',
headers={"Content-Disposition":"attachment;filename=perm_data.csv"})
@app.route("/show_temp_trait", methods=('POST',))
def show_temp_trait_page():
logger.error(request.url)
template_vars = show_trait.ShowTrait(request.form)
#logger.info("js_data before dump:", template_vars.js_data)
template_vars.js_data = json.dumps(template_vars.js_data,
default=json_default_handler,
indent=" ")
# Sorting the keys messes up the ordered dictionary, so don't do that
#sort_keys=True)
#logger.info("js_data after dump:", template_vars.js_data)
#logger.info("show_trait template_vars:", pf(template_vars.__dict__))
return render_template("show_trait.html", **template_vars.__dict__)
@app.route("/show_trait")
def show_trait_page():
logger.error(request.url)
template_vars = show_trait.ShowTrait(request.args)
#logger.info("js_data before dump:", template_vars.js_data)
template_vars.js_data = json.dumps(template_vars.js_data,
default=json_default_handler,
indent=" ")
# Sorting the keys messes up the ordered dictionary, so don't do that
#sort_keys=True)
#logger.info("js_data after dump:", template_vars.js_data)
#logger.info("show_trait template_vars:", pf(template_vars.__dict__))
return render_template("show_trait.html", **template_vars.__dict__)
@app.route("/heatmap", methods=('POST',))
def heatmap_page():
logger.info("In heatmap, request.form is:", pf(request.form))
logger.error(request.url)
start_vars = request.form
temp_uuid = uuid.uuid4()
traits = [trait.strip() for trait in start_vars['trait_list'].split(',')]
if traits[0] != "":
version = "v5"
key = "heatmap:{}:".format(version) + json.dumps(start_vars, sort_keys=True)
logger.info("key is:", pf(key))
with Bench("Loading cache"):
result = Redis.get(key)
if result:
logger.info("Cache hit!!!")
with Bench("Loading results"):
result = pickle.loads(result)
else:
logger.info("Cache miss!!!")
template_vars = heatmap.Heatmap(request.form, temp_uuid)
template_vars.js_data = json.dumps(template_vars.js_data,
default=json_default_handler,
indent=" ")
result = template_vars.__dict__
for item in template_vars.__dict__.keys():
logger.info(" ---**--- {}: {}".format(type(template_vars.__dict__[item]), item))
pickled_result = pickle.dumps(result, pickle.HIGHEST_PROTOCOL)
logger.info("pickled result length:", len(pickled_result))
Redis.set(key, pickled_result)
Redis.expire(key, 60*60)
with Bench("Rendering template"):
rendered_template = render_template("heatmap.html", **result)
else:
rendered_template = render_template("empty_collection.html", **{'tool':'Heatmap'})
return rendered_template
@app.route("/mapping_results_container")
def mapping_results_container_page():
return render_template("mapping_results_container.html")
@app.route("/loading", methods=('POST',))
def loading_page():
logger.error(request.url)
initial_start_vars = request.form
logger.debug("Marker regression called with initial_start_vars:", initial_start_vars.items())
#temp_uuid = initial_start_vars['temp_uuid']
wanted = (
'temp_uuid',
'trait_id',
'dataset',
'method',
'trimmed_markers',
'selected_chr',
'chromosomes',
'mapping_scale',
'score_type',
'suggestive',
'significant',
'num_perm',
'permCheck',
'perm_output',
'num_bootstrap',
'bootCheck',
'bootstrap_results',
'LRSCheck',
'covariates',
'maf',
'use_loco',
'manhattan_plot',
'control_marker',
'control_marker_db',
'do_control',
'genofile',
'pair_scan',
'startMb',
'endMb',
'graphWidth',
'lrsMax',
'additiveCheck',
'showSNP',
'showGenes',
'viewLegend',
'haplotypeAnalystCheck',
'mapmethod_rqtl_geno',
'mapmodel_rqtl_geno'
)
start_vars_container = {}
start_vars = {}
for key, value in initial_start_vars.iteritems():
if key in wanted or key.startswith(('value:')):
start_vars[key] = value
start_vars_container['start_vars'] = start_vars
rendered_template = render_template("loading.html", **start_vars_container)
return rendered_template
@app.route("/marker_regression", methods=('POST',))
def marker_regression_page():
initial_start_vars = request.form
logger.debug("Marker regression called with initial_start_vars:", initial_start_vars.items())
logger.error(request.url)
temp_uuid = initial_start_vars['temp_uuid']
wanted = (
'trait_id',
'dataset',
'geno_db_exists',
'method',
'mapping_results_path',
'trimmed_markers',
'selected_chr',
'chromosomes',
'mapping_scale',
'plotScale',
'score_type',
'suggestive',
'significant',
'num_perm',
'permCheck',
'perm_output',
'num_bootstrap',
'bootCheck',
'bootstrap_results',
'LRSCheck',
'covariates',
'maf',
'use_loco',
'manhattan_plot',
'control_marker',
'control_marker_db',
'do_control',
'genofile',
'pair_scan',
'startMb',
'endMb',
'graphWidth',
'lrsMax',
'additiveCheck',
'showSNP',
'showGenes',
'viewLegend',
'haplotypeAnalystCheck',
'mapmethod_rqtl_geno',
'mapmodel_rqtl_geno'
)
start_vars = {}
for key, value in initial_start_vars.iteritems():
if key in wanted or key.startswith(('value:')):
start_vars[key] = value
logger.debug("Marker regression called with start_vars:", start_vars)
version = "v3"
key = "marker_regression:{}:".format(version) + json.dumps(start_vars, sort_keys=True)
logger.info("key is:", pf(key))
with Bench("Loading cache"):
result = None # Just for testing
#result = Redis.get(key)
#logger.info("************************ Starting result *****************")
#logger.info("result is [{}]: {}".format(type(result), result))
#logger.info("************************ Ending result ********************")
if result:
logger.info("Cache hit!!!")
with Bench("Loading results"):
result = pickle.loads(result)
else:
logger.info("Cache miss!!!")
with Bench("Total time in MarkerRegression"):
template_vars = marker_regression.MarkerRegression(start_vars, temp_uuid)
if template_vars.mapping_method != "gemma" and template_vars.mapping_method != "plink":
template_vars.js_data = json.dumps(template_vars.js_data,
default=json_default_handler,
indent=" ")
result = template_vars.__dict__
if result['pair_scan']:
with Bench("Rendering template"):
img_path = result['pair_scan_filename']
logger.info("img_path:", img_path)
initial_start_vars = request.form
logger.info("initial_start_vars:", initial_start_vars)
imgfile = open(TEMPDIR + img_path, 'rb')
imgdata = imgfile.read()
imgB64 = imgdata.encode("base64")
bytesarray = array.array('B', imgB64)
result['pair_scan_array'] = bytesarray
rendered_template = render_template("pair_scan_results.html", **result)
else:
#for item in template_vars.__dict__.keys():
# logger.info(" ---**--- {}: {}".format(type(template_vars.__dict__[item]), item))
gn1_template_vars = marker_regression_gn1.MarkerRegression(result).__dict__
#pickled_result = pickle.dumps(result, pickle.HIGHEST_PROTOCOL)
#logger.info("pickled result length:", len(pickled_result))
#Redis.set(key, pickled_result)
#Redis.expire(key, 1*60)
with Bench("Rendering template"):
if (gn1_template_vars['mapping_method'] == "gemma") or (gn1_template_vars['mapping_method'] == "plink"):
gn1_template_vars.pop('qtlresults', None)
print("TEMPLATE KEYS:", list(gn1_template_vars.keys()))
rendered_template = render_template("marker_regression_gn1.html", **gn1_template_vars)
# with Bench("Rendering template"):
# if result['pair_scan'] == True:
# img_path = result['pair_scan_filename']
# logger.info("img_path:", img_path)
# initial_start_vars = request.form
# logger.info("initial_start_vars:", initial_start_vars)
# imgfile = open(TEMPDIR + '/' + img_path, 'rb')
# imgdata = imgfile.read()
# imgB64 = imgdata.encode("base64")
# bytesarray = array.array('B', imgB64)
# result['pair_scan_array'] = bytesarray
# rendered_template = render_template("pair_scan_results.html", **result)
# else:
# rendered_template = render_template("marker_regression.html", **result)
# rendered_template = render_template("marker_regression_gn1.html", **gn1_template_vars)
return rendered_template
@app.route("/export_mapping_results", methods = ('POST',))
def export_mapping_results():
logger.info("request.form:", request.form)
logger.error(request.url)
file_path = request.form.get("results_path")
results_csv = open(file_path, "r").read()
response = Response(results_csv,
mimetype='text/csv',
headers={"Content-Disposition":"attachment;filename=mapping_results.csv"})
return response
@app.route("/export", methods = ('POST',))
def export():
logger.info("request.form:", request.form)
logger.error(request.url)
svg_xml = request.form.get("data", "Invalid data")
filename = request.form.get("filename", "manhattan_plot_snp")
response = Response(svg_xml, mimetype="image/svg+xml")
response.headers["Content-Disposition"] = "attachment; filename=%s"%filename
return response
@app.route("/export_pdf", methods = ('POST',))
def export_pdf():
import cairosvg
logger.info("request.form:", request.form)
logger.error(request.url)
svg_xml = request.form.get("data", "Invalid data")
logger.info("svg_xml:", svg_xml)
filename = request.form.get("filename", "interval_map_pdf")
filepath = GENERATED_IMAGE_DIR+filename
pdf_file = cairosvg.svg2pdf(bytestring=svg_xml)
response = Response(pdf_file, mimetype="application/pdf")
response.headers["Content-Disposition"] = "attachment; filename=%s"%filename
return response
@app.route("/network_graph", methods=('POST',))
def network_graph_page():
logger.info("In network_graph, request.form is:", pf(request.form))
logger.error(request.url)
start_vars = request.form
traits = [trait.strip() for trait in start_vars['trait_list'].split(',')]
if traits[0] != "":
template_vars = network_graph.NetworkGraph(start_vars)
template_vars.js_data = json.dumps(template_vars.js_data,
default=json_default_handler,
indent=" ")
return render_template("network_graph.html", **template_vars.__dict__)
else:
return render_template("empty_collection.html", **{'tool':'Network Graph'})
@app.route("/corr_compute", methods=('POST',))
def corr_compute_page():
logger.info("In corr_compute, request.form is:", pf(request.form))
logger.error(request.url)
#fd = webqtlFormData.webqtlFormData(request.form)
template_vars = show_corr_results.CorrelationResults(request.form)
return render_template("correlation_page.html", **template_vars.__dict__)
@app.route("/corr_matrix", methods=('POST',))
def corr_matrix_page():
logger.info("In corr_matrix, request.form is:", pf(request.form))
logger.error(request.url)
start_vars = request.form
traits = [trait.strip() for trait in start_vars['trait_list'].split(',')]
if traits[0] != "":
template_vars = show_corr_matrix.CorrelationMatrix(start_vars)
template_vars.js_data = json.dumps(template_vars.js_data,
default=json_default_handler,
indent=" ")
return render_template("correlation_matrix.html", **template_vars.__dict__)
else:
return render_template("empty_collection.html", **{'tool':'Correlation Matrix'})
@app.route("/corr_scatter_plot")
def corr_scatter_plot_page():
logger.error(request.url)
template_vars = corr_scatter_plot.CorrScatterPlot(request.args)
template_vars.js_data = json.dumps(template_vars.js_data,
default=json_default_handler,
indent=" ")
return render_template("corr_scatterplot.html", **template_vars.__dict__)
@app.route("/submit_bnw", methods=('POST',))
def submit_bnw():
logger.error(request.url)
template_vars = get_bnw_input(request.form)
return render_template("empty_collection.html", **{'tool':'Correlation Matrix'})
# Todo: Can we simplify this? -Sam
def sharing_info_page():
"""Info page displayed when the user clicks the "Info" button next to the dataset selection"""
logger.info("In sharing_info_page")
logger.error(request.url)
fd = webqtlFormData.webqtlFormData(request.args)
template_vars = SharingInfoPage.SharingInfoPage(fd)
return template_vars
# Take this out or secure it before putting into production
@app.route("/get_temp_data")
def get_temp_data():
logger.error(request.url)
temp_uuid = request.args['key']
return flask.jsonify(temp_data.TempData(temp_uuid).get_all())
##########################################################################
def json_default_handler(obj):
'''Based on http://stackoverflow.com/a/2680060/1175849'''
# Handle datestamps
if hasattr(obj, 'isoformat'):
return obj.isoformat()
# Handle integer keys for dictionaries
elif isinstance(obj, int):
return str(int)
# Handle custom objects
if hasattr(obj, '__dict__'):
return obj.__dict__
#elif type(obj) == "Dataset":
# logger.info("Not going to serialize Dataset")
# return None
else:
raise TypeError, 'Object of type %s with value of %s is not JSON serializable' % (
type(obj), repr(obj))
|
agpl-3.0
| -2,919,960,334,922,159,000 | 36.817391 | 151 | 0.621621 | false |
Praetonus/Arc
|
src/arc/huffman.py
|
1
|
5696
|
#!/usr/bin/python3.4
#-*- coding: utf-8 -*-
########################################################################
# Copyright 2014 Benoît Vey #
# #
# This file is part of Arc. #
# #
# Arc is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# Arc is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with Arc. If not, see <http://www.gnu.org/licenses/>. #
########################################################################
import ctypes
def compress(pathI, pathO):
with open(pathI, "rb") as inputFile:
freqs = frequencies(inputFile)
rootNode = makeTree(freqs)
codes = makeCodes(rootNode)
inputFile.seek(0)
cmpStr = compressedString(inputFile, codes)
with open(pathO, "wb") as outputFile:
cmpWrite(outputFile, codes, cmpStr)
def decompress(pathI, pathO):
with open(pathI, "rb") as inputFile:
freqMap = makeFreqMap(inputFile)
cmpStr = makeString(inputFile)
with open(pathO, "wb") as outputFile:
decmpWrite(outputFile, freqMap, cmpStr)
class Node:
def __init__(self, char, weight):
self.leftLeaf = None
self.rightLeaf = None
self.char = char
self.weight = weight
self.isEnd = True
self.pastLeft = True
self.pastRight = True
def frequencies(inputFile):
freqs = {}
char = bytes()
while 1:
char = inputFile.read(1)
if char == b"":
break
if char in freqs:
freqs[char] += 1
else:
freqs[char] = 1
return freqs
def makeTree(freqs):
nodes = []
for char, weight in freqs.items():
nodes.append(ctypes.pointer(ctypes.py_object(Node(char, weight))))
while len(nodes) > 1:
node1 = nodes[0]
nodes.remove(nodes[0])
node2 = nodes[0]
nodes.remove(nodes[0])
newNode = ctypes.pointer(ctypes.py_object(Node(b"", node1[0].weight + node2[0].weight)))
newNode[0].leftLeaf = node1
newNode[0].rightLeaf= node2
newNode[0].isEnd = False
newNode[0].pastLeft = False
newNode[0].pastRight = False
i = 0
while i < len(nodes) and nodes[i][0].weight < newNode[0].weight:
i += 1
nodes.insert(i, newNode)
return nodes[0]
def makeCodes(root):
codes = {}
while 1:
currentNode = root
code = str()
blocked = False
while not currentNode[0].isEnd and not blocked:
if not currentNode[0].pastLeft:
if currentNode[0].leftLeaf[0].pastLeft and currentNode[0].leftLeaf[0].pastRight:
currentNode[0].pastLeft = True
currentNode = currentNode[0].leftLeaf
code += "0"
elif not currentNode[0].pastRight:
if currentNode[0].rightLeaf[0].pastLeft and currentNode[0].rightLeaf[0].pastRight:
currentNode[0].pastRight = True
currentNode = currentNode[0].rightLeaf
code += "1"
else:
blocked = True
if currentNode[0].isEnd:
codes[currentNode[0].char] = code
currentNode[0].pastLeft = True
currentNode[0].pastRight = True
if blocked and currentNode == root:
break
return codes
def compressedString(inputFile, codes):
cmpStr = str()
char = bytes()
while 1:
char = inputFile.read(1)
if char == b"":
break
if char in codes:
cmpStr += codes[char]
while len(cmpStr) % 8 != 0:
cmpStr += "0"
return cmpStr
def cmpWrite(outputFile, codes, cmpStr):
outputFile.write(len(codes).to_bytes(1, "little"))
for char, code in codes.items():
outputFile.write(char)
if (code[0] == "0"):
while len(code) < 8:
code = "1" + code
else:
while len(code) < 8:
code = "0" + code
value = 0
for i in range(0, 8):
if code[7 - i] == "1":
value += 2 ** i
outputFile.write(value.to_bytes(1, "little"))
value = 0
count = 0
for char in cmpStr:
if char == "1":
value += 2 ** (7 - count)
if count == 7:
outputFile.write(value.to_bytes(1, "little"))
value = 0
count = 0
else:
count += 1
def makeFreqMap(inputFile):
freqMap = {}
size = int.from_bytes(inputFile.read(1), "little")
for i in range(0, size):
char = int.from_bytes(inputFile.read(1), "little")
strValue = int.from_bytes(inputFile.read(1), "little")
strCode = []
j = 7
while j >= 0:
if strValue >= 2 ** j:
strValue -= 2 ** j
strCode.append("1")
else:
strCode.append("0")
j -= 1
if strCode[0] == "1":
while strCode[0] == "1":
strCode.pop(0)
else:
while strCode[0] == "0":
strCode.pop(0)
freqMap[''.join(strCode)] = char
return freqMap
def makeString(inputFile):
cmpStr = []
byteStr = bytes()
byte = 0
while 1:
byteStr = inputFile.read(1)
if byteStr == b"":
break
byte = int.from_bytes(byteStr, "little")
i = 7
while i >= 0:
if byte >= 2 ** i:
byte -= 2 ** i
cmpStr.append("1")
else:
cmpStr.append("0")
i -= 1
return cmpStr
def decmpWrite(outputFile, freqMap, cmpStr):
tmpStr = str()
while len(cmpStr) > 0:
tmpStr += cmpStr[0]
cmpStr.pop(0)
if tmpStr in freqMap:
outputFile.write(freqMap[tmpStr].to_bytes(1, "little"))
tmpStr = str()
|
gpl-3.0
| -5,932,957,318,099,833,000 | 26.780488 | 90 | 0.58086 | false |
avaris/aBibliophile
|
searchmodel.py
|
1
|
1847
|
#!/usr/bin/env python
# -.- coding: utf-8 -.-
# Author : Deniz Turgut
# Created : 05.11.2011
from PyQt4 import QtGui, QtCore
class SearchModel(QtGui.QSortFilterProxyModel):
def __init__(self, parent=None):
super(SearchModel, self).__init__(parent)
self.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.setSortLocaleAware(True)
self.setDynamicSortFilter(True)
self.setSourceModel(SearchBaseModel())
self.sort(0)
def clear(self):
self.sourceModel().clear()
def addDataFromList(self, bookList):
self.sourceModel().addDataFromList(bookList)
class SearchBaseModel(QtCore.QAbstractItemModel):
def __init__(self, parent=None):
super(SearchBaseModel, self).__init__(parent)
self._data = []
def rowCount(self, parent):
return len(self._data)
def columnCount(self, parent):
return 1
def index(self, row, column, parent):
return self.createIndex(row, column, QtCore.QModelIndex())
def parent(self, index):
return QtCore.QModelIndex()
def data(self, index, role):
if role == QtCore.Qt.DisplayRole:
return self._data[index.row()]["title"]
elif role == QtCore.Qt.ToolTipRole:
writer = ", ".join(self._data[index.row()]["writers"])
publisher = self._data[index.row()]["publisher"]
return self.tr("Writer: %s\nPublisher: %s") % (writer, publisher)
elif role == QtCore.Qt.UserRole:
return self._data[index.row()]["url"]
def addData(self, data):
self._data.append(data)
def addDataFromList(self, dataList):
self.layoutAboutToBeChanged.emit()
for data in dataList:
self.addData(data)
self.layoutChanged.emit()
def clear(self):
self._data=[]
|
gpl-3.0
| -507,099,931,512,108,800 | 29.278689 | 77 | 0.617759 | false |
guyemerson/sem-func
|
src/core/intrinsic.py
|
1
|
5876
|
import os, pickle, numpy as np
from collections import Counter
from math import log
from warnings import warn
from __config__.filepath import AUX_DIR, FREQ_FILE, VOCAB_FILE
from utils import product
# Load vocabulary
with open(os.path.join(AUX_DIR, VOCAB_FILE), 'rb') as f:
pred_name = pickle.load(f)
with open(os.path.join(AUX_DIR, FREQ_FILE), 'rb') as f:
pred_freq = pickle.load(f)
verbs = [i for i,x in enumerate(pred_name) if x.rsplit('_', 2)[1] == 'v']
nouns = [i for i,x in enumerate(pred_name) if x.rsplit('_', 2)[1] == 'n']
noun_mask = np.array([x.rsplit('_', 2)[1] == 'n' for x in pred_name])
verb_mask = np.array([x.rsplit('_', 2)[1] == 'v' for x in pred_name])
def generate_random_data(n_trans, n_subj, n_obj):
"""
Generate a random set of tuples
:param n_trans: number of transitive tuples
:param n_subj: number of subject-verb tuples
:param n_obj: number of verb-object tuples
:return: list of (verb, subj, obj) tuples, with None for missing arguments
"""
# Get noun and verb tokens to sample from
verb_tokens = np.zeros(pred_freq[verbs].sum(), dtype='int64')
i = 0
for p in verbs:
f = pred_freq[p]
verb_tokens[i : i+f] = p
i += f
noun_tokens = np.zeros(pred_freq[nouns].sum(), dtype='int64')
i = 0
for p in nouns:
f = pred_freq[p]
noun_tokens[i : i+f] = p
i += f
# Sample the tuples
n_total = n_trans + n_subj + n_obj
subj = np.random.choice(noun_tokens, n_total)
verb = np.random.choice(verb_tokens, n_total)
obj = np.random.choice(noun_tokens, n_total)
data = [(int(verb[i]), int(subj[i]), int(obj[i])) for i in range(n_trans)]
data += [(int(verb[i]), int(subj[i]), None) for i in range(n_trans, n_trans+n_subj)]
data += [(int(verb[i]), None, int(obj[i])) for i in range(n_trans+n_subj, n_trans+n_subj+n_obj)]
return data
def separate_prob(data):
"""
Convert a list of SVO triples with missing arguments,
to three counts of tuples
:param data: list of triples
:return: {SVO}, {SV}, {VO}
"""
svo, sv, vo = Counter(), Counter(), Counter()
for verb, subj, obj in data:
if subj is None:
vo[verb, obj] += 1
elif obj is None:
sv[subj, verb] += 1
else:
svo[subj, verb, obj] += 1
totals = [sum(c.values()) for c in (svo, sv, vo)]
probs = [{tup: num/totals[i] for tup, num in c.items()} for i,c in enumerate((svo, sv, vo))]
return probs
def KL(P, Q):
"""
Calculate Kullback-Leibler divergence from Q to P
Both P and Q should be dicts from elements to probabilities
:param P: true distribution
:param Q: approximating distribution
:return: divergence
"""
# sum_x P(x) ( logP(x) - logQ(x) )
res = 0
for item, prob in P.items():
res += prob * (log(prob) - log(Q[item]))
return res
def compare_KL(model, real_data, fake_data, samples=(100,100,100), **kwargs):
"""
Approximately calculate the Kullback-Leibler divergence from the model to two sets of data
:param model: the sem-func model
:param real_data: first set of tuples
:param fake_data: second set of tuples
:param samples: number of samples to draw, for: SVO, SV, VO graphs
:return: (real KLs, fake KLs), each for (SVO, SV, VO) subsets
"""
# Get sample probabilities from the data
real_prob = separate_prob(real_data)
fake_prob = separate_prob(fake_data)
# Initialise counts for generated samples
real_match = [{tup: 0 for tup in c} for c in real_prob]
fake_match = [{tup: 0 for tup in c} for c in fake_prob]
# Sample from the model
sampler = [model.sample_background_svo, model.sample_background_sv, model.sample_background_vo]
for i in range(3):
# Sample entities for each graph configuration
for ents in sampler[i](samples=samples[i], **kwargs):
# For the sampled entities, find the distribution over predicates
pred_dist = [model.pred_dist(e) for e in ents]
# Add the probability that this sample would generate the observed predicates
for preds in real_match[i]:
real_match[i][preds] += product(pred_dist[j][p] for j,p in enumerate(preds))
for preds in fake_match[i]:
fake_match[i][preds] += product(pred_dist[j][p] for j,p in enumerate(preds))
# Average the probabilities
for preds in real_match[i]:
real_match[i][preds] /= samples[i]
for preds in fake_match[i]:
fake_match[i][preds] /= samples[i]
real_KL = [KL(real_prob[i], real_match[i]) for i in range(3)]
fake_KL = [KL(fake_prob[i], fake_match[i]) for i in range(3)]
return real_KL, fake_KL
def baseline_KL(real_data, fake_data):
"""
Calculate the Kullback-Leibler divergence from the null hypothesis (sample nouns and verbs according to frequency) to two sets of data
:param real_data: first set of tuples
:param fake_data: second set of tuples
:return: (real KLs, fake KLs), each for (SVO, SV, VO) subsets
"""
real_prob = separate_prob(real_data)
fake_prob = separate_prob(fake_data)
noun_prob = pred_freq * noun_mask / pred_freq[nouns].sum()
verb_prob = pred_freq * verb_mask / pred_freq[verbs].sum()
both_prob = noun_prob + verb_prob
real_match = [{tup: product(both_prob[p] for p in tup)
for tup in c}
for c in real_prob]
fake_match = [{tup: product(both_prob[p] for p in tup)
for tup in c}
for c in fake_prob]
real_KL = [KL(real_prob[i], real_match[i]) for i in range(3)]
fake_KL = [KL(fake_prob[i], fake_match[i]) for i in range(3)]
return real_KL, fake_KL
|
mit
| -4,691,154,396,318,563,000 | 34.403614 | 138 | 0.602451 | false |
epam/DLab
|
integration-tests/examples/copy_files.py
|
1
|
4782
|
#!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
import os, sys, json
import argparse
from fabric.api import *
parser = argparse.ArgumentParser()
parser.add_argument('--storage', type=str, default='S3/GCP buckets, Azure Blob container / Datalake folder')
parser.add_argument('--notebook', type=str, default='aws, azure, gcp')
parser.add_argument('--cloud', type=str, default='aws, azure, gcp')
parser.add_argument('--azure_storage_account', type=str, default='')
parser.add_argument('--azure_datalake_account', type=str, default='')
args = parser.parse_args()
dataset_file = ['airports.csv', 'carriers.csv', '2008.csv.bz2']
def download_dataset():
try:
for f in dataset_file:
local('wget http://stat-computing.org/dataexpo/2009/{0} -O /tmp/{0}'.format(f))
except Exception as err:
print('Failed to download test dataset', str(err))
sys.exit(1)
def upload_aws():
try:
for f in dataset_file:
local('aws s3 cp /tmp/{0} s3://{1}/{2}_dataset/ --sse AES256'.format(f, args.storage, args.notebook))
except Exception as err:
print('Failed to upload test dataset to bucket', str(err))
sys.exit(1)
def upload_azure_datalake():
try:
from azure.datalake.store import core, lib, multithread
sp_creds = json.loads(open(os.environ['AZURE_AUTH_LOCATION']).read())
dl_filesystem_creds = lib.auth(tenant_id=json.dumps(sp_creds['tenantId']).replace('"', ''),
client_secret=json.dumps(sp_creds['clientSecret']).replace('"', ''),
client_id=json.dumps(sp_creds['clientId']).replace('"', ''),
resource='https://datalake.azure.net/')
datalake_client = core.AzureDLFileSystem(dl_filesystem_creds, store_name=args.azure_datalake_account)
for f in dataset_file:
multithread.ADLUploader(datalake_client,
lpath='/tmp/{0}'.format(f),
rpath='{0}/{1}_dataset/{2}'.format(args.storage, args.notebook, f))
except Exception as err:
print('Failed to upload test dataset to datalake store', str(err))
sys.exit(1)
def upload_azure_blob():
try:
from azure.mgmt.storage import StorageManagementClient
from azure.storage.blob import BlockBlobService
from azure.common.client_factory import get_client_from_auth_file
storage_client = get_client_from_auth_file(StorageManagementClient)
resource_group_name = ''
for i in storage_client.storage_accounts.list():
if args.storage.replace('container', 'storage') == str(i.tags.get('Name')):
resource_group_name = str(i.tags.get('SBN'))
secret_key = storage_client.storage_accounts.list_keys(resource_group_name, args.azure_storage_account).keys[0].value
block_blob_service = BlockBlobService(account_name=args.azure_storage_account, account_key=secret_key)
for f in dataset_file:
block_blob_service.create_blob_from_path(args.storage, '{0}_dataset/{1}'.format(args.notebook, f), '/tmp/{0}'.format(f))
except Exception as err:
print('Failed to upload test dataset to blob storage', str(err))
sys.exit(1)
def upload_gcp():
try:
for f in dataset_file:
local('gsutil -m cp /tmp/{0} gs://{1}/{2}_dataset/'.format(f, args.storage, args.notebook))
except Exception as err:
print('Failed to upload test dataset to bucket', str(err))
sys.exit(1)
if __name__ == "__main__":
download_dataset()
if args.cloud == 'aws':
upload_aws()
elif args.cloud == 'azure':
os.environ['AZURE_AUTH_LOCATION'] = '/home/dlab-user/keys/azure_auth.json'
if args.azure_datalake_account:
upload_azure_datalake()
else:
upload_azure_blob()
elif args.cloud == 'gcp':
upload_gcp()
else:
print('Error! Unknown cloud provider.')
sys.exit(1)
sys.exit(0)
|
apache-2.0
| 599,134,483,112,161,700 | 42.472727 | 132 | 0.610832 | false |
gem/sidd
|
sidd/operator/loaders/ms.py
|
1
|
3048
|
# Copyright (c) 2011-2013, ImageCat Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
module contains class for loading mapping scheme files
mapping scheme format defined in ms module
"""
from os.path import exists
from sidd.constants import logAPICall
from sidd.operator import Operator,OperatorError, OperatorDataError
from sidd.operator.data import OperatorDataTypes
from sidd.taxonomy import get_taxonomy, Taxonomy
from sidd.ms.ms import MappingScheme
class MappingSchemeLoader(Operator):
""" operator loading mapping scheme from XML """
def __init__(self, options=None, name='Mapping Scheme Loaded'):
""" constructor """
Operator.__init__(self, options, name)
if isinstance(options['taxonomy'], Taxonomy):
self._taxonomy = options['taxonomy']
else:
self._taxonomy = get_taxonomy(options['taxonomy'])
# self documenting method override
###########################
@property
def input_types(self):
return [OperatorDataTypes.File]
@property
def input_names(self):
return ["Mapping Scheme File"]
input_descriptions = input_names
@property
def output_types(self):
return [OperatorDataTypes.MappingScheme]
@property
def output_names(self):
return ["Mapping Scheme"]
# public method override
###########################
@logAPICall
def do_operation(self):
""" perform ms loading """
# verify that input/output data is correctly set
in_file = self.inputs[0].value
# load data
ms = MappingScheme(self._taxonomy)
ms.read(in_file)
# verify that input data is loaded correctly
if not ms.is_valid:
raise OperatorError('Error Loading data file' % (in_file), self.__class__)
self.outputs[0].value = ms
# protected method override
###########################
def _verify_inputs(self, inputs):
""" perform operator specific output validation """
if not exists(inputs[0].value):
raise OperatorDataError("input file %s does not exist" % (inputs[0].value))
def _verify_outputs(self, outputs):
""" perform operator specific output validation """
pass
|
agpl-3.0
| 1,110,092,615,301,480,200 | 31.648352 | 87 | 0.621063 | false |
jumoconnect/openjumo
|
jumodjango/org/models.py
|
1
|
14137
|
from action.models import Action
from django.conf import settings
from django.contrib.contenttypes import generic
from django.db import models
from entity_items.models import Advocate, ContentItem, MediaItem, TimelineItem
from entity_items.models.location import Location
from etc import cache
from etc.templatetags.tags import _create_static_url
from issue.models import Issue
from lib.image_upload import ImageSize, ImageType, S3EnabledImageField
from users.models import User
from commitment.models import Commitment
REVENUE_CHOICES = (
("less than $100,000","less than $100,000",),
("$100,000 - $1,000,000","$100,000 - $1,000,000",),
("$1m - $5m","$1m - $5m",),
("$5m - $20m","$5m - $20m",),
("more than $20m","more than $20m",),
)
SIZE_CHOICES = (
("1-10","1-10"),
("10-50","10-50",),
("51-100","51-100",),
("100+","100+",),
)
class Org(models.Model):
#Public Properties
id = models.AutoField(db_column='org_id', primary_key=True)
name = models.CharField(max_length=200, verbose_name="Organization Name")
summary = models.CharField(max_length=255, verbose_name="Vision Statement")
handle = models.CharField(max_length=210, unique=True, verbose_name="Organization Handle",
help_text="Your organization's unique handle used for your public Jumo page: www.jumo.com/<b>HANDLE</b>")
ein = models.CharField(max_length=12, blank=True, verbose_name="EIN",
help_text="*Not required, but must be provided for 501(c)(3)'s that wish to receive donations on Jumo. Find your organization's EIN <a target='_blank' href='http://nccsdataweb.urban.org/PubApps/990search.php?a=a&bmf=1'>here</a>.")
email = models.EmailField(blank=True)
phone_number = models.CharField(max_length=50, blank=True, verbose_name="Phone")
img_small_url = S3EnabledImageField(image_type=ImageType.ORG, image_size=ImageSize.SMALL, blank=True)
img_large_url = S3EnabledImageField(image_type=ImageType.ORG, image_size=ImageSize.LARGE, blank=True)
year_founded = models.IntegerField(max_length=4, blank=True, null=True, verbose_name="Year Founded")
revenue = models.CharField(max_length=32, blank=True, choices=REVENUE_CHOICES, verbose_name="Revenue Size")
size = models.CharField(max_length=32, blank=True, choices=SIZE_CHOICES, verbose_name="# of Employees")
blog_url = models.URLField(verify_exists = False, blank=True, verbose_name="Blog")
site_url = models.URLField(verify_exists = False, blank=True, verbose_name="Website")
facebook_id = models.BigIntegerField(max_length=41, blank=True, null=True, verbose_name="Facebook ID")
twitter_id = models.CharField(max_length=64, blank=True, verbose_name="Twitter Username")
youtube_id = models.CharField(max_length=64, blank=True, verbose_name="YouTube Username")
flickr_id = models.CharField(max_length=64, blank=True, verbose_name="Flickr Username")
location = models.ForeignKey(Location, null=True, blank=True, related_name='location', verbose_name="Headquarters")
#Internal Properties
is_vetted = models.BooleanField(default=False)
is_active = models.BooleanField(default=True, verbose_name="Is Active") #Replaces the old ignore field.
donation_enabled = models.BooleanField(default=False, verbose_name="Is Donation Enabled")
claim_token = models.CharField(max_length = 32, blank = True, verbose_name="Claim Token")
is_claimed = models.BooleanField(default=False, verbose_name="Is Claimed")
date_created = models.DateTimeField(auto_now_add=True, verbose_name="Date Created")
date_updated = models.DateTimeField(auto_now=True, verbose_name="Date Updated")
facebook_last_fetched = models.CharField(max_length=24, null=True, blank=True, default=None, verbose_name='Facebook Last Fetched')
twitter_last_status_id = models.BigIntegerField(null=True, verbose_name='Twitter Last Status ID')
#Relationship Properties
admins = models.ManyToManyField(User, related_name = 'admins', db_table='org_org_admins')
content = generic.GenericRelation(ContentItem, related_name='content')
actions = generic.GenericRelation(Action, related_name='org_actions')
advocates = generic.GenericRelation(Advocate, related_name='advocates')
timeline = generic.GenericRelation(TimelineItem, related_name='timeline')
media = generic.GenericRelation(MediaItem, related_name='media')
followers = models.ManyToManyField(User, symmetrical=False, through='UserToOrgFollow', related_name='followed_orgs')
related_orgs = models.ManyToManyField('self', symmetrical = False, through='RelatedOrg', related_name="orgrelatedorgs")
working_locations = models.ManyToManyField(Location, null=True, symmetrical=False, related_name="working_locations",
db_table="org_working_locations", verbose_name="Working In")
issues = models.ManyToManyField(Issue, through='OrgIssueRelationship', verbose_name="Working On")
commitments = generic.GenericRelation(Commitment)
#aliases
class Meta:
verbose_name = "Org"
verbose_name_plural = "Orgs"
db_table = "orgs"
def __unicode__(self):
return self.name
def save(self):
#Note: I want to move all this img stuff to the forms that set them...
#not here on the model. This is a hack so we ensure the model id is
#used in the filename.
if not self.id and not self.img_large_url._committed:
#most likely you need to watch small img too
small_url_comm = self.img_url._committed
self.img_small_url._committed = True
self.img_large_url._committed = True
super(Org, self).save()
self.img_large_url._committed = False
self.img_small_url._committed = small_url_comm
if not self.id and not self.img_small_url._committed:
self.img_small_url._committed = True
super(Org, self).save()
self.img_small_url._committed = False
self.img_large_url.storage.inst_id = self.id
self.img_small_url.storage.inst_id = self.id
super(Org, self).save()
cache.bust(self)
@models.permalink
def get_absolute_url(self):
return ('entity_url', [self.handle])
@classmethod
def get(cls, id, force_db=False):
if force_db:
org = Org.objects.get(id=id)
cache.bust(org)
return org
return cache.get(cls, id)
@classmethod
def multiget(cls, ids, force_db=False):
if force_db:
return Org.objects.filter(id__in=ids)
return cache.get(cls, ids)
@property
def get_image_small(self):
if self.img_small_url:
return self.img_small_url.url
if self.facebook_id:
return 'http://graph.facebook.com/%s/picture?type=square' % self.facebook_id
return ''
@property
def get_image_large(self):
if self.img_large_url:
return self.img_large_url.url
if self.facebook_id:
return 'http://graph.facebook.com/%s/picture?type=large' % self.facebook_id
return ''
@property
def get_url(self):
return '/%s' % self.handle
@property
def get_name(self):
return self.name
@property
@cache.collection_cache(Action, '_all_actions')
def get_all_actions(self):
return self.actions.all().order_by('rank')
@property
@cache.collection_cache(Advocate, '_all_advocates')
def get_all_advocates(self):
return self.advocates.all()
@property
@cache.collection_cache(TimelineItem, '_all_timeline_items')
def get_all_timeline_items(self):
return self.timeline.all().order_by('year')
@property
@cache.collection_cache(MediaItem, '_all_media_items')
def get_all_media_items(self):
return self.media.all().order_by('position')
@property
@cache.collection_cache(MediaItem, '_photo_media_items')
def get_all_photos(self):
return self.media.filter(media_type="photo").order_by('position')
@property
@cache.collection_cache(ContentItem, '_all_content')
def get_all_content(self):
return self.content.all().order_by('position')
@property
def get_sub_heading_text(self):
t = ""
if self.year_founded:
t += "Since %s" % self.year_founded
if self.get_location:
if self.year_founded:
t += " // "
print t
t += str(self.get_location)
print t
if self.size:
if self.year_founded or self.get_location:
t += " // "
t += "%s employees" % self.size
if self.revenue:
if self.year_founded or self.size or self.get_location:
t += " // "
t += "%s revenue" % self.revenue
if self.site_url:
if self.year_founded or self.revenue or self.get_location or self.size:
t += " // "
t += self.site_url
return t
@property
def get_left_section_content(self):
return [item for item in self.get_all_content if item.section == ContentItem.ContentSection.LEFT]
@property
def get_center_section_content(self):
return [item for item in self.get_all_content if item.section == ContentItem.ContentSection.CENTER]
_location = None
@property
def get_location(self):
if self._location is not None:
return self._location
self._location = self.location
cache.put_on_handle(self, self.handle)
return self._location
@property
@cache.collection_cache(Location, '_working_locations')
def get_working_locations(self):
return self.working_locations.all()
@property
@cache.collection_cache(User, '_admins')
def get_admins(self):
return self.admins.all()
@property
@cache.collection_cache(User, '_all_followers')
def get_all_followers(self):
commitments = self.commitments.active().select_related()
return [c.user for c in commitments]
@property
def get_all_follower_ids(self):
return self.usertoorgfollow_set.filter(following = True).values_list('user', flat=True)
@property
def get_num_followers(self):
return self.commitments.active().count()
@property
def get_sample_followers(self):
commitments = self.commitments.active()[:16].select_related()
return [c.user for c in commitments]
@property
@cache.collection_cache(Issue, '_all_issues')
def get_all_issues(self):
return Issue.objects.filter(id__in = self.get_all_issues_ids)
@property
def get_all_issues_ids(self):
return self.orgissuerelationship_set.values_list('issue', flat = True)
@property
@cache.collection_cache('org.Org', '_all_related_orgs')
def get_all_related_orgs(self):
return self.related_orgs.all()
def get_related_orgs_for_user(self, user):
if not hasattr(self, '_all_related_orgs') or getattr(self, '_all_related_orgs') is None:
self.get_all_related_orgs
pos = dict((id, idx) for idx, id in enumerate(self._all_related_orgs['ids']))
orgs = sorted(list(set(self._all_related_orgs['ids']).difference(user.get_orgs_following_ids)), key=lambda id: pos[id])
return list(cache.get(Org, orgs[0:5]))
def delete(self):
cache.bust_on_handle(self, self.handle, False)
return super(self.__class__, self).delete()
def is_editable_by(self, user):
return not self.is_vetted and (user.is_staff or user in self.admins.all())
class Alias(models.Model):
"""
Another name an org might be known as.
"""
org = models.ForeignKey(Org)
alias = models.CharField(max_length=200)
date_created = models.DateTimeField(auto_now_add=True, verbose_name="Date Created")
date_updated = models.DateTimeField(auto_now=True, verbose_name="Date Updated")
class Meta:
unique_together = (("org", "alias"),)
db_table = 'org_alias'
def __unicode__(self):
return self.alias
class UserToOrgFollow(models.Model):
following = models.BooleanField(default = True, db_index = True)
started_following = models.DateTimeField(auto_now_add = True)
stopped_following = models.DateTimeField(blank = True, null = True)
user = models.ForeignKey(User)
org = models.ForeignKey(Org)
class Meta:
unique_together = (("user", "org"),)
verbose_name = "User Following Org"
verbose_name = "Users Following Orgs"
db_table = 'org_usertoorgfollow'
def __unicode__(self):
return "User '%s' following Org '%s'" % (self.user, self.org)
class RelatedOrg(models.Model):
org = models.ForeignKey(Org, related_name="org")
related_org = models.ForeignKey(Org, related_name="related_org")
rank = models.FloatField() #Value determined by magic algo that generated this item.
date_created = models.DateTimeField(auto_now_add=True, verbose_name="Date Created")
date_updated = models.DateTimeField(auto_now=True, verbose_name="Date Updated")
class Meta:
db_table = 'related_orgs'
ordering = ['rank']
unique_together = (("org", "rank"),)
verbose_name = "Org's Related Org"
verbose_name_plural = "Org's Related Orgs"
def __unicode__(self):
return "%s" % self.related_org
class OrgIssueRelationship(models.Model):
org = models.ForeignKey(Org)
issue = models.ForeignKey(Issue)
rank = models.IntegerField(default=0) #This is manually managed for each org:issues relations.
date_created = models.DateTimeField(auto_now_add=True, verbose_name="Date Created")
date_updated = models.DateTimeField(auto_now=True, verbose_name="Date Updated")
class Meta:
ordering = ['rank']
unique_together = (("org", "issue"),)
verbose_name = "Org's Issue"
verbose_name_plural = "Org's Issues"
db_table = 'org_orgissuerelationship'
def __unicode__(self):
return "%s" % self.issue
|
mit
| -5,442,474,995,486,512,000 | 38.935028 | 242 | 0.654453 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.