text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMISCSIDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from osprofiler import profiler
import six
from taskflow import exceptions as tfe
from cinder import compute
from cinder import context
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import glance
from cinder import manager
from cinder.openstack.common import periodic_task
from cinder import quota
from cinder import utils
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
from eventlet import greenpool
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = ('available', 'in-use',)
VALID_CREATE_CG_SRC_SNAP_STATUS = ('available',)
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMISCSIDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
default='none',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.huawei.huawei_hvs.HuaweiHVSISCSIDriver':
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver',
'cinder.volume.drivers.huawei.huawei_hvs.HuaweiHVSFCDriver':
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_fc.FJDXFCDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_iscsi.FJDXISCSIDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver', }
def locked_volume_operation(f):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
Example use:
If a volume operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
volume e.g. delete VolA while create volume VolB from VolA is in progress.
"""
def lvo_inner1(inst, context, volume_id, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def lvo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lvo_inner2(inst, context, volume_id, **kwargs)
return lvo_inner1
def locked_detach_operation(f):
"""Lock decorator for volume detach operations.
Takes a named lock prior to executing the detach call. The lock is named
with the operation executed and the id of the volume. This lock can then
be used by other operations to avoid operation conflicts on shared volumes.
This locking mechanism is only for detach calls. We can't use the
locked_volume_operation, because detach requires an additional
attachment_id in the parameter list.
"""
def ldo_inner1(inst, context, volume_id, attachment_id=None, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def ldo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return ldo_inner2(inst, context, volume_id, attachment_id, **kwargs)
return ldo_inner1
def locked_snapshot_operation(f):
"""Lock decorator for snapshot operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the snapshot. This lock can then be
used by other operations to avoid operation conflicts on shared snapshots.
Example use:
If a snapshot operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
snapshot e.g. delete SnapA while create volume VolA from SnapA is in
progress.
"""
def lso_inner1(inst, context, snapshot, **kwargs):
@utils.synchronized("%s-%s" % (snapshot.id, f.__name__), external=True)
def lso_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lso_inner2(inst, context, snapshot, **kwargs)
return lso_inner1
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '1.23'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self._tp = greenpool.GreenPool()
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s" % vol_db_empty)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
is_vol_db_empty=vol_db_empty)
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Invalid JSON: %s" %
self.driver.configuration.extra_capabilities)
def _add_to_threadpool(self, func, *args, **kwargs):
self._tp.spawn_n(func, *args, **kwargs)
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception as err:
LOG.error(_LE('Failed to fetch pool name for volume: %s'),
volume['id'])
LOG.exception(err)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def init_host(self):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)") %
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception as ex:
LOG.error(_LE("Error encountered during "
"initialization of driver: %(name)s") %
{'name': self.driver.__class__.__name__})
LOG.exception(ex)
# we don't want to continue since we failed
# to initialize the driver correctly.
return
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
# FIXME volume count for exporting is wrong
LOG.debug("Re-exporting %s volumes" % len(volumes))
try:
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception as export_ex:
LOG.error(_LE("Failed to re-export volume %s: "
"setting to error state"), volume['id'])
LOG.exception(export_ex)
self.db.volume_update(ctxt,
volume['id'],
{'status': 'error'})
elif volume['status'] in ('downloading', 'creating'):
LOG.info(_LI("volume %(volume_id)s stuck in "
"%(volume_stat)s state. "
"Changing to error state."),
{'volume_id': volume['id'],
'volume_stat': volume['status']})
if volume['status'] == 'downloading':
self.driver.clear_download(ctxt, volume)
self.db.volume_update(ctxt,
volume['id'],
{'status': 'error'})
else:
LOG.info(_LI("volume %s: skipping export"), volume['id'])
snapshots = self.db.snapshot_get_by_host(ctxt,
self.host,
{'status': 'creating'})
for snapshot in snapshots:
LOG.info(_LI("snapshot %(snap_id)s stuck in "
"%(snap_stat)s state. "
"Changing to error state."),
{'snap_id': snapshot['id'],
'snap_stat': snapshot['status']})
self.db.snapshot_update(ctxt,
snapshot['id'],
{'status': 'error'})
except Exception as ex:
LOG.error(_LE("Error encountered during "
"re-exporting phase of driver initialization: "
" %(name)s") %
{'name': self.driver.__class__.__name__})
LOG.exception(ex)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
self.driver.set_initialized()
LOG.debug('Resuming any in progress delete operations')
for volume in volumes:
if volume['status'] == 'deleting':
LOG.info(_LI('Resuming delete on volume: %s') % volume['id'])
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
volume['id'])
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'])
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
# conditionally run replication status task
stats = self.driver.get_volume_stats(refresh=True)
if stats and stats.get('replication', False):
@periodic_task.periodic_task
def run_replication_task(self, ctxt):
self._update_replication_relationship_status(ctxt)
self.add_periodic_task(run_replication_task)
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
snapshot_id=None, image_id=None, source_volid=None,
source_replicaid=None, consistencygroup_id=None,
cgsnapshot_id=None):
"""Creates the volume."""
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume_id,
allow_reschedule,
context,
request_spec,
filter_properties,
snapshot_id=snapshot_id,
image_id=image_id,
source_volid=source_volid,
source_replicaid=source_replicaid,
consistencygroup_id=consistencygroup_id,
cgsnapshot_id=cgsnapshot_id)
except Exception:
LOG.exception(_LE("Failed to create manager volume flow"))
raise exception.CinderException(
_("Failed to create manager volume flow."))
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
@utils.synchronized(locked_action, external=True)
def _run_flow_locked():
_run_flow()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
try:
if locked_action is None:
_run_flow()
else:
_run_flow_locked()
except Exception as e:
if hasattr(e, 'rescheduled'):
rescheduled = e.rescheduled
raise
finally:
try:
vol_ref = flow_engine.storage.fetch('volume_ref')
except tfe.NotFound as e:
# Flow was reverted, fetching volume_ref from the DB.
vol_ref = self.db.volume_get(context, volume_id)
if not rescheduled:
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(vol_ref)
return vol_ref['id']
@locked_volume_operation
def delete_volume(self, context, volume_id, unmanage_only=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration source volume
If deleting the source volume in a migration, we want to skip
quotas. Also we want to skip other database updates for source
volume because these update will be handled at
migrate_volume_completion properly.
3. Delete a migration destination volume
If deleting the destination volume in a migration, we want to
skip quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
volume_ref = self.db.volume_get(context, volume_id)
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.info(_LI("Tried to delete volume %s, but it no longer exists, "
"moving on") % (volume_id))
return True
if context.project_id != volume_ref['project_id']:
project_id = volume_ref['project_id']
else:
project_id = context.project_id
LOG.info(_LI("volume %s: deleting"), volume_ref['id'])
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if (vol_utils.extract_host(volume_ref['host']) != self.host):
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
is_migrating = volume_ref['migration_status'] is not None
is_migrating_dest = (is_migrating and
volume_ref['migration_status'].startswith(
'target:'))
self._notify_about_volume_usage(context, volume_ref, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug("volume %s: removing export", volume_ref['id'])
self.driver.remove_export(context, volume_ref)
LOG.debug("volume %s: deleting", volume_ref['id'])
if unmanage_only:
self.driver.unmanage(volume_ref)
else:
self.driver.delete_volume(volume_ref)
except exception.VolumeIsBusy:
LOG.error(_LE("Cannot delete volume %s: volume is busy"),
volume_ref['id'])
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume_ref,
'available')
return True
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume_ref,
'error_deleting')
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting volume"))
# If deleting the source volume in a migration, we should skip database
# update here. In other cases, continue to update database entries.
if not is_migrating or is_migrating_dest:
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
LOG.info(_LI("volume %s: deleted successfully"), volume_ref['id'])
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
self._notify_about_volume_usage(context, volume_ref, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume_ref['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume_ref['host'], 'pool', True)
size = volume_ref['size']
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
return True
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
self.db.volume_destroy(context, volume_ref['id'])
LOG.error(_LE("Unable to delete the destination volume %s "
"during volume migration, but the database "
"record needs to be deleted."),
volume_ref['id'])
else:
self.db.volume_update(context,
volume_ref['id'],
{'status': status})
def create_snapshot(self, context, volume_id, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
LOG.info(_LI("snapshot %s: creating"), snapshot.id)
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
LOG.debug("snapshot %(snap_id)s: creating",
{'snap_id': snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save(context)
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = 'error'
snapshot.save(context)
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata") %
{'volume_id': volume_id,
'snapshot_id': snapshot.id})
snapshot.status = 'error'
snapshot.save(context)
raise exception.MetadataCopyFailure(reason=ex)
snapshot.status = 'available'
snapshot.progress = '100%'
snapshot.save(context)
LOG.info(_("snapshot %s: created successfully"), snapshot.id)
self._notify_about_snapshot_usage(context, snapshot, "create.end")
return snapshot.id
@locked_snapshot_operation
def delete_snapshot(self, context, snapshot):
"""Deletes and unexports snapshot."""
context = context.elevated()
project_id = snapshot.project_id
LOG.info(_("snapshot %s: deleting"), snapshot.id)
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
LOG.debug("snapshot %s: deleting", snapshot.id)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Cannot delete snapshot %s: snapshot is busy"),
snapshot.id)
snapshot.status = 'available'
snapshot.save()
return True
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = 'error_deleting'
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy(context)
LOG.info(_LI("snapshot %s: deleted successfully"), snapshot.id)
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
return True
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached."""
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
volume_metadata = self.db.volume_admin_metadata_get(
context.elevated(), volume_id)
if volume['status'] == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
msg = _("being attached by different mode")
raise exception.InvalidVolume(reason=msg)
if (volume['status'] == 'in-use' and not volume['multiattach']
and not volume['migration_status']):
msg = _("volume is already attached")
raise exception.InvalidVolume(reason=msg)
attachment = None
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachment = \
self.db.volume_attachment_get_by_instance_uuid(
context, volume_id, instance_uuid)
else:
attachment = \
self.db.volume_attachment_get_by_host(context, volume_id,
host_name_sanitized)
if attachment is not None:
return
self._notify_about_volume_usage(context, volume,
"attach.start")
values = {'volume_id': volume_id,
'attach_status': 'attaching', }
attachment = self.db.volume_attach(context.elevated(), values)
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(), volume_id,
{"attached_mode": mode}, False)
attachment_id = attachment['id']
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_attachment_update(context, attachment_id,
{'attach_status':
'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
volume = self.db.volume_get(context, volume_id)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_id,
{'attach_status': 'error_attaching'})
volume = self.db.volume_attached(context.elevated(),
attachment_id,
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
if volume['migration_status']:
self.db.volume_update(context, volume_id,
{'migration_status': None})
self._notify_about_volume_usage(context, volume, "attach.end")
return self.db.volume_attachment_get(context, attachment_id)
return do_attach()
@locked_detach_operation
def detach_volume(self, context, volume_id, attachment_id=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
attachment = None
if attachment_id:
try:
attachment = self.db.volume_attachment_get(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.error(_LE("We couldn't find the volume attachment"
" for volume %(volume_id)s and"
" attachment id %(id)s"),
{"volume_id": volume_id,
"id": attachment_id})
raise
else:
# We can try and degrade gracefuly here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = self.db.volume_attachment_get_used_by_volume_id(
context, volume_id)
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Volume %(id)s is attached to more than one instance"
". A valid attachment_id must be passed to detach"
" this volume") % {'id': volume_id}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
msg = _("Volume %(id)s doesn't have any attachments "
"to detach") % {'id': volume_id}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume = self.db.volume_get(context, volume_id)
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
self.db.volume_detached(context.elevated(), volume_id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
'attached_mode')
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
volume = self.db.volume_get(context, volume_id)
try:
utils.require_driver_initialized(self.driver)
LOG.debug("volume %s: removing export", volume_id)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error detaching volume %(volume)s, "
"due to uninitialized driver."),
{"volume": volume_id})
except Exception as ex:
LOG.exception(_LE("Error detaching volume %(volume)s, "
"due to remove export failure."),
{"volume": volume_id})
raise exception.RemoveExportException(volume=volume_id, reason=ex)
self._notify_about_volume_usage(context, volume, "detach.end")
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = self.db.volume_get(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
self.driver.copy_volume_to_image(context, volume, image_service,
image_meta)
LOG.debug("Uploaded volume %(volume_id)s to "
"image (%(image_id)s) successfully",
{'volume_id': volume_id, 'image_id': image_id})
except Exception as error:
LOG.error(_LE("Error occurred while uploading "
"volume %(volume_id)s "
"to image %(image_id)s."),
{'volume_id': volume_id, 'image_id': image_meta['id']})
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
finally:
if not volume['volume_attachment']:
self.db.volume_update(context, volume_id,
{'status': 'available'})
else:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warn(_LW("Deleting image %(image_id)s in %(image_status)s "
"state."),
{'image_id': image_id,
'image_status': image_status})
image_service.delete(context, image_id)
except Exception:
LOG.warn(_LW("Error occurred while deleting image %s."),
image_id, exc_info=True)
def _driver_data_namespace(self):
return self.driver.configuration.safe_get('driver_data_namespace') \
or self.driver.configuration.safe_get('volume_backend_name') \
or self.driver.__class__.__name__
def _get_driver_initiator_data(self, context, connector):
data = None
initiator = connector.get('initiator', False)
if initiator:
namespace = self._driver_data_namespace()
try:
data = self.db.driver_initiator_data_get(
context,
initiator,
namespace
)
except exception.CinderException:
LOG.exception(_LE("Failed to get driver initiator data for"
" initiator %(initiator)s and namespace"
" %(namespace)s"),
{'initiator': initiator,
'namespace': namespace})
raise
return data
def _save_driver_initiator_data(self, context, connector, model_update):
if connector.get('initiator', False) and model_update:
namespace = self._driver_data_namespace()
try:
self.db.driver_initiator_data_update(context,
connector['initiator'],
namespace,
model_update)
except exception.CinderException:
LOG.exception(_LE("Failed to update initiator data for"
" initiator %(initiator)s and backend"
" %(backend)s"),
{'initiator': connector['initiator'],
'backend': namespace})
raise
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=err)
except Exception as err:
err_msg = (_('Unable to validate connector information in '
'backend: %(err)s') % {'err': err})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
volume = self.db.volume_get(context, volume_id)
model_update = None
try:
LOG.debug("Volume %s: creating export", volume_id)
model_update = self.driver.create_export(context.elevated(),
volume)
except exception.CinderException:
err_msg = (_('Unable to create export for volume %(volume_id)s') %
{'volume_id': volume_id})
LOG.exception(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume = self.db.volume_update(context,
volume_id,
model_update)
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating model of volume %(volume_id)s"
" with driver provided model %(model)s") %
{'volume_id': volume_id, 'model': model_update})
raise exception.ExportFailure(reason=ex)
initiator_data = self._get_driver_initiator_data(context, connector)
try:
if initiator_data:
conn_info = self.driver.initialize_connection(volume,
connector,
initiator_data)
else:
conn_info = self.driver.initialize_connection(volume,
connector)
except Exception as err:
err_msg = (_('Unable to fetch connection information from '
'backend: %(err)s') % {'err': err})
LOG.error(err_msg)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
initiator_update = conn_info.get('initiator_update', None)
if initiator_update:
self._save_driver_initiator_data(context, connector,
initiator_update)
del conn_info['initiator_update']
# Add qos_specs to connection info
typeid = volume['volume_type_id']
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
volume_id)
if conn_info['data'].get('access_mode') is None:
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Unable to terminate volume connection: %(err)s')
% {'err': err})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed updating model of "
"volume %(volume_id)s "
"with drivers update %(model)s "
"during xfr.") %
{'volume_id': volume_id,
'model': model_update})
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
return model_update
def _migrate_volume_generic(self, ctxt, volume, host, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
new_vol_values = {}
for k, v in volume.iteritems():
new_vol_values[k] = v
del new_vol_values['id']
del new_vol_values['_name_id']
# We don't copy volume_type because the db sets that according to
# volume_type_id, which we do copy
del new_vol_values['volume_type']
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
new_vol_values['host'] = host['host']
new_vol_values['status'] = 'creating'
# FIXME(jdg): using a : delimeter is confusing to
# me below here. We're adding a string member to a dict
# using a :, which is kind of a poor choice in this case
# I think
new_vol_values['migration_status'] = 'target:%s' % volume['id']
new_vol_values['attach_status'] = 'detached'
new_vol_values['volume_attachment'] = []
new_volume = self.db.volume_create(ctxt, new_vol_values)
rpcapi.create_volume(ctxt, new_volume, host['host'],
None, None, allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
new_volume = self.db.volume_get(ctxt, new_volume['id'])
tries = 0
while new_volume['status'] != 'available':
tries += 1
now = time.time()
if new_volume['status'] == 'error':
msg = _("failed to create new_volume on destination host")
self._clean_temporary_volume(ctxt, volume['id'],
new_volume['id'],
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination host")
self._clean_temporary_volume(ctxt, volume['id'],
new_volume['id'],
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
new_volume = self.db.volume_get(ctxt, new_volume['id'])
# Copy the source volume to the destination volume
try:
attachments = volume['volume_attachment']
if not attachments:
self.driver.copy_volume_data(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume['id'],
new_volume['id'],
error=False)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume['id'],
new_volume['id'])
except Exception:
with excutils.save_and_reraise_exception():
msg = _LE("Failed to copy volume %(vol1)s to %(vol2)s")
LOG.error(msg, {'vol1': volume['id'],
'vol2': new_volume['id']})
self._clean_temporary_volume(ctxt, volume['id'],
new_volume['id'])
def _get_original_status(self, volume):
attachments = volume['volume_attachment']
if not attachments:
return 'available'
else:
return 'in-use'
def _clean_temporary_volume(self, ctxt, volume_id, new_volume_id,
clean_db_only=False):
volume = self.db.volume_get(ctxt, volume_id)
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume['migration_status'] == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
self.db.volume_destroy(ctxt, new_volume_id)
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
volume = self.db.volume_get(ctxt, new_volume_id)
rpcapi.delete_volume(ctxt, volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume_id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
updates = {'migration_status': None}
self.db.volume_update(ctxt, new_volume_id, updates)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume_id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume_id})
def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
error=False):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'error'})
msg = _("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s")
LOG.debug(msg % {'vol1': volume_id, 'vol2': new_volume_id})
volume = self.db.volume_get(ctxt, volume_id)
new_volume = self.db.volume_get(ctxt, new_volume_id)
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = self._get_original_status(volume)
if error:
msg = _("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s")
LOG.info(msg % {'vol1': volume['id'],
'vol2': new_volume['id']})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': None, 'status': orig_volume_status}
self.db.volume_update(ctxt, volume_id, updates)
return volume_id
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'completing'})
# Delete the source volume (if it fails, don't fail the migration)
try:
if orig_volume_status == 'in-use':
attachments = volume['volume_attachment']
for attachment in attachments:
self.detach_volume(ctxt, volume_id, attachment['id'])
self.delete_volume(ctxt, volume_id)
except Exception as ex:
msg = _("Failed to delete migration source vol %(vol)s: %(err)s")
LOG.error(msg % {'vol': volume_id, 'err': ex})
# Give driver (new_volume) a chance to update things as needed
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume
rpcapi.update_migrated_volume(ctxt,
volume,
new_volume)
self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
self.db.volume_destroy(ctxt, new_volume_id)
if orig_volume_status == 'in-use':
updates = {'migration_status': 'completing',
'status': orig_volume_status}
else:
updates = {'migration_status': None}
self.db.volume_update(ctxt, volume_id, updates)
if orig_volume_status == 'in-use':
attachments = volume['volume_attachment']
for attachment in attachments:
rpcapi.attach_volume(ctxt, volume,
attachment['instance_uuid'],
attachment['attached_host'],
attachment['mountpoint'],
'rw')
return volume['id']
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
new_type_id=None):
"""Migrate the volume to the specified host (called on source host)."""
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'error'})
volume_ref = self.db.volume_get(ctxt, volume_id)
model_update = None
moved = False
status_update = None
if volume_ref['status'] == 'retyping':
status_update = {'status': self._get_original_status(volume_ref)}
self.db.volume_update(ctxt, volume_ref['id'],
{'migration_status': 'migrating'})
if not force_host_copy and new_type_id is None:
try:
LOG.debug("volume %s: calling driver migrate_volume",
volume_ref['id'])
moved, model_update = self.driver.migrate_volume(ctxt,
volume_ref,
host)
if moved:
updates = {'host': host['host'],
'migration_status': None}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume_ref = self.db.volume_update(ctxt,
volume_ref['id'],
updates)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': None}
if status_update:
updates.update(status_update)
try:
model_update = self.driver.create_export(ctxt,
volume_ref)
if model_update:
updates.update(model_update)
except Exception:
LOG.exception(_LE("Failed to create export for "
"volume: %s"), volume_ref['id'])
finally:
self.db.volume_update(ctxt, volume_ref['id'], updates)
if not moved:
try:
self._migrate_volume_generic(ctxt, volume_ref, host,
new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': None}
if status_update:
updates.update(status_update)
try:
model_update = self.driver.create_export(ctxt,
volume_ref)
if model_update:
updates.update(model_update)
except Exception:
LOG.exception(_LE("Failed to create export for "
"volume: %s"), volume_ref['id'])
finally:
self.db.volume_update(ctxt, volume_ref['id'], updates)
@periodic_task.periodic_task
def _report_driver_status(self, context):
LOG.info(_LI("Updating volume status"))
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW('Unable to update stats, %(driver_name)s '
'-%(driver_version)s '
'%(config_group)s driver is uninitialized.') %
{'driver_name': self.driver.__class__.__name__,
'driver_version': self.driver.get_version(),
'config_group': config_group})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def notification(self, context, event):
LOG.info(_LI("Notification {%s} received"), event)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group['id'])
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = self.db.snapshot_get_all_for_cgsnapshot(
context, cgsnapshot['id'])
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume_id, new_size, reservations):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': 'error_extending'})
volume = self.db.volume_get(context, volume_id)
size_increase = (int(new_size)) - volume['size']
self._notify_about_volume_usage(context, volume, "resize.start")
try:
LOG.info(_LI("volume %s: extending"), volume['id'])
self.driver.extend_volume(volume, new_size)
LOG.info(_LI("volume %s: extended successfully"), volume['id'])
except Exception:
LOG.exception(_LE("volume %s: Error trying to extend volume"),
volume_id)
try:
self.db.volume_update(context, volume['id'],
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume_id)
finally:
QUOTAS.rollback(context, reservations)
return
QUOTAS.commit(context, reservations)
volume = self.db.volume_update(context,
volume['id'],
{'size': int(new_size),
'status': 'available'})
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
def retype(self, ctxt, volume_id, new_type_id, host,
migration_policy='never', reservations=None):
def _retype_error(context, volume_id, old_reservations,
new_reservations, status_update):
try:
self.db.volume_update(context, volume_id, status_update)
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
context = ctxt.elevated()
volume_ref = self.db.volume_get(ctxt, volume_id)
status_update = {'status': self._get_original_status(volume_ref)}
if context.project_id != volume_ref['project_id']:
project_id = volume_ref['project_id']
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
self.db.volume_update(context, volume_id, status_update)
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
self.db.volume_update(context, volume_id, status_update)
LOG.exception(_LE("Failed to update usages "
"while retyping volume."))
raise exception.CinderException(_("Failed to get old volume type"
" quota reservations"))
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context, volume_ref.get('volume_type_id'), new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
if not retyped:
try:
new_type = volume_types.get_volume_type(context, new_type_id)
ret = self.driver.retype(context,
volume_ref,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume_id)
except Exception as ex:
retyped = False
LOG.error(_LE("Volume %s: driver error when trying to retype, "
"falling back to generic mechanism."),
volume_ref['id'])
LOG.exception(ex)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = self.db.snapshot_get_all_for_volume(context,
volume_ref['id'])
if snaps:
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume_ref['replication_status']
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self.db.volume_update(context, volume_ref['id'],
{'migration_status': 'starting'})
try:
self.migrate_volume(context, volume_id, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
self.db.volume_update(context, volume_id, model_update)
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self.publish_service_capabilities(context)
def manage_existing(self, ctxt, volume_id, ref=None):
LOG.debug('manage_existing: managing %s.' % ref)
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume_id,
ref)
except Exception:
LOG.exception(_LE("Failed to create manage_existing flow."))
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
# Update volume stats
pool = vol_utils.extract_host(vol_ref['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol_ref['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= vol_ref['size']
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol_ref['size'])
return vol_ref['id']
def promote_replica(self, ctxt, volume_id):
"""Promote volume replica secondary to be the primary volume."""
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to promote replica "
"for volume %(id)s.")
% {'id': volume_id})
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
LOG.debug("Volume %s: promote replica.", volume_id)
model_update = self.driver.promote_replica(ctxt, volume)
except exception.CinderException:
err_msg = (_('Error promoting secondary volume to primary'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
def reenable_replication(self, ctxt, volume_id):
"""Re-enable replication of secondary volume with primary volumes."""
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to sync replica for volume %(id)s.")
% {'id': volume_id})
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
LOG.debug("Volume %s: sync replica.", volume_id)
model_update = self.driver.reenable_replication(ctxt, volume)
except exception.CinderException:
err_msg = (_('Error synchronizing secondary volume to primary'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
def _update_replication_relationship_status(self, ctxt):
LOG.info(_LI('Updating volume replication status.'))
# Only want volumes that do not have a 'disabled' replication status
filters = {'replication_status': ['active', 'copying', 'error',
'active-stopped', 'inactive']}
volumes = self.db.volume_get_all_by_host(ctxt, self.host,
filters=filters)
for vol in volumes:
model_update = None
try:
model_update = self.driver.get_replication_status(
ctxt, vol)
if model_update:
self.db.volume_update(ctxt, vol['id'], model_update)
except Exception:
LOG.exception(_LE("Error checking replication status for "
"volume %s") % vol['id'])
def create_consistencygroup(self, context, group_id):
"""Creates the consistency group."""
context = context.elevated()
group_ref = self.db.consistencygroup_get(context, group_id)
group_ref['host'] = self.host
status = 'available'
model_update = False
self._notify_about_consistencygroup_usage(
context, group_ref, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Consistency group %s: creating"), group_ref['name'])
model_update = self.driver.create_consistencygroup(context,
group_ref)
if model_update:
group_ref = self.db.consistencygroup_update(
context, group_ref['id'], model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.consistencygroup_update(
context,
group_ref['id'],
{'status': 'error'})
LOG.error(_LE("Consistency group %s: create failed"),
group_ref['name'])
now = timeutils.utcnow()
self.db.consistencygroup_update(context,
group_ref['id'],
{'status': status,
'created_at': now})
LOG.info(_LI("Consistency group %s: created successfully"),
group_ref['name'])
self._notify_about_consistencygroup_usage(
context, group_ref, "create.end")
return group_ref['id']
def create_consistencygroup_from_src(self, context, group_id,
cgsnapshot_id=None):
"""Creates the consistency group from source.
Currently the source can only be a cgsnapshot.
"""
group_ref = self.db.consistencygroup_get(context, group_id)
try:
volumes = self.db.volume_get_all_by_group(
context, group_id)
cgsnapshot = None
snapshots = None
if cgsnapshot_id:
try:
cgsnapshot = self.db.cgsnapshot_get(context, cgsnapshot_id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Cannot create consistency group %(group)s "
"because cgsnapshot %(snap)s cannot be "
"found."),
{'group': group_id,
'snap': cgsnapshot_id})
raise
if cgsnapshot:
snapshots = self.db.snapshot_get_all_for_cgsnapshot(
context, cgsnapshot_id)
for snap in snapshots:
if (snap['status'] not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group_id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
self._notify_about_consistencygroup_usage(
context, group_ref, "create.start")
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Consistency group %(group)s: creating from source "
"cgsnapshot %(snap)s."),
{'group': group_id,
'snap': cgsnapshot_id})
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group_ref, volumes, cgsnapshot,
sorted_snapshots))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group_ref = self.db.consistencygroup_update(
context, group_id, model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.consistencygroup_update(
context,
group_id,
{'status': 'error'})
LOG.error(_LE("Consistency group %(group)s: create from "
"source cgsnapshot %(snap)s failed."),
{'group': group_id,
'snap': cgsnapshot_id})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update,
group_id=group_id)
self._update_allocated_capacity(vol)
self.db.consistencygroup_update(context,
group_id,
{'status': status,
'created_at': now})
LOG.info(_LI("Consistency group %(group)s: created successfully "
"from source cgsnapshot %(snap)s."),
{'group': group_id,
'snap': cgsnapshot_id})
self._notify_about_consistencygroup_usage(
context, group_ref, "create.end")
return group_ref['id']
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = filter(
lambda snap: snap['id'] == vol['snapshot_id'], snapshots)
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _update_volume_from_src(self, context, vol, update, group_id=None):
try:
snapshot_ref = self.db.snapshot_get(context,
vol['snapshot_id'])
orig_vref = self.db.volume_get(context,
snapshot_ref['volume_id'])
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], vol['snapshot_id'])
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group_id:
self.db.consistencygroup_update(
context, group_id, {'status': 'error'})
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot_ref['volume_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group_id:
self.db.consistencygroup_update(
context, group_id, {'status': 'error'})
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata.") %
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group_id:
self.db.consistencygroup_update(
context, group_id, {'status': 'error'})
raise exception.MetadataCopyFailure(reason=ex)
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group_id):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
group_ref = self.db.consistencygroup_get(context, group_id)
project_id = group_ref['project_id']
if context.project_id != group_ref['project_id']:
project_id = group_ref['project_id']
else:
project_id = context.project_id
LOG.info(_LI("Consistency group %s: deleting"), group_ref['id'])
volumes = self.db.volume_get_all_by_group(context, group_id)
for volume_ref in volumes:
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_ref['id'])
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(volume_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node"))
self._notify_about_consistencygroup_usage(
context, group_ref, "delete.start")
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Consistency group %(group_id)s: deleting",
{'group_id': group_id})
model_update, volumes = self.driver.delete_consistencygroup(
context, group_ref)
if volumes:
for volume in volumes:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting consistency group '
'%s.') % group_ref['id'])
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
else:
self.db.consistencygroup_update(context, group_ref['id'],
model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.consistencygroup_update(
context,
group_ref['id'],
{'status': 'error_deleting'})
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Failed to update usages deleting "
"consistency groups."))
for volume_ref in volumes:
# Get reservations for volume
try:
volume_id = volume_ref['id']
reserve_opts = {'volumes': -1,
'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting volume."))
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume_ref['size']
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
self.db.consistencygroup_destroy(context, group_id)
LOG.info(_LI("Consistency group %s: deleted successfully."),
group_id)
self._notify_about_consistencygroup_usage(
context, group_ref, "delete.end", volumes)
self.publish_service_capabilities(context)
return True
def update_consistencygroup(self, context, group_id,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
LOG.info(_LI("Consistency group %s: updating"), group_id)
group = self.db.consistencygroup_get(context, group_id)
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = self.db.volume_get(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume cannot be "
"found."),
{'volume_id': add_vol_ref['id'],
'group_id': group_id})
raise
if add_vol_ref['status'] not in ['in-use', 'available']:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group_id,
'status': add_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(add_vol_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Cannot remove volume %(volume_id)s from "
"consistency group %(group_id)s because volume "
"cannot be found."),
{'volume_id': remove_vol_ref['id'],
'group_id': group_id})
raise
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Consistency group %(group_id)s: updating",
{'group_id': group['id']})
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in ['error']:
msg = (_('Error occurred when updating consistency group '
'%s.') % group_id)
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
self.db.consistencygroup_update(context, group_id,
model_update)
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group_id})
self.db.consistencygroup_update(context, group_id,
{'status': 'error'})
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group['id']})
self.db.consistencygroup_update(context, group_id,
{'status': 'error'})
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
self.db.consistencygroup_update(context, group_id,
{'status': 'available',
'updated_at': now})
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group_id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
LOG.info(_LI("Consistency group %s: updated successfully."),
group_id)
self._notify_about_consistencygroup_usage(
context, group, "update.end")
return True
def create_cgsnapshot(self, context, group_id, cgsnapshot_id):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
cgsnapshot_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot_ref['id'])
snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
cgsnapshot_id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot_id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot_ref['context'] = caller_context
for snapshot in snapshots:
snapshot['context'] = caller_context
model_update, snapshots = \
self.driver.create_cgsnapshot(context, cgsnapshot_ref)
if snapshots:
for snapshot in snapshots:
# Update db if status is error
if snapshot['status'] == 'error':
update = {'status': snapshot['status']}
self.db.snapshot_update(context, snapshot['id'],
update)
# If status for one snapshot is error, make sure
# the status for the cgsnapshot is also error
if model_update['status'] != 'error':
model_update['status'] = snapshot['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot_ref['id'])
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
except Exception:
with excutils.save_and_reraise_exception():
self.db.cgsnapshot_update(context,
cgsnapshot_ref['id'],
{'status': 'error'})
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot['id'], volume_id)
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata") %
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
self.db.snapshot_update(context,
snapshot['id'],
{'status': 'error'})
raise exception.MetadataCopyFailure(reason=ex)
self.db.snapshot_update(context,
snapshot['id'], {'status': 'available',
'progress': '100%'})
self.db.cgsnapshot_update(context,
cgsnapshot_ref['id'],
{'status': 'available'})
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot_ref['id'])
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "create.end")
return cgsnapshot_id
def delete_cgsnapshot(self, context, cgsnapshot_id):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
cgsnapshot_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
project_id = cgsnapshot_ref['project_id']
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot_ref['id'])
snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
cgsnapshot_id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "delete.start")
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot_id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot_ref['context'] = caller_context
for snapshot in snapshots:
snapshot['context'] = caller_context
model_update, snapshots = \
self.driver.delete_cgsnapshot(context, cgsnapshot_ref)
if snapshots:
for snapshot in snapshots:
update = {'status': snapshot['status']}
self.db.snapshot_update(context, snapshot['id'],
update)
if snapshot['status'] in ['error_deleting', 'error'] and \
model_update['status'] not in \
['error_deleting', 'error']:
model_update['status'] = snapshot['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot_ref['id'])
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
self.db.cgsnapshot_update(context, cgsnapshot_ref['id'],
model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.cgsnapshot_update(context,
cgsnapshot_ref['id'],
{'status': 'error_deleting'})
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.db.cgsnapshot_destroy(context, cgsnapshot_id)
LOG.info(_LI("cgsnapshot %s: deleted successfully"),
cgsnapshot_ref['id'])
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "delete.end", snapshots)
return True
def update_migrated_volume(self, ctxt, volume, new_volume):
"""Finalize migration process on backend device."""
model_update = None
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume)
if model_update:
self.db.volume_update(ctxt.elevated(),
volume['id'],
model_update)
|
tmenjo/cinder-2015.1.0
|
cinder/volume/manager.py
|
Python
|
apache-2.0
| 115,641 | 0 |
# Joey Velez-Ginorio
# Gridworld Implementation
# ---------------------------------
from mdp import MDP
from grid import Grid
from scipy.stats import uniform
from scipy.stats import beta
from scipy.stats import expon
import numpy as np
import random
import pyprind
import matplotlib.pyplot as plt
class GridWorld(MDP):
"""
Defines a gridworld environment to be solved by an MDP!
"""
def __init__(self, grid, goalVals, discount=.99, tau=.01, epsilon=.001):
MDP.__init__(self, discount=discount, tau=tau, epsilon=epsilon)
self.goalVals = goalVals
self.grid = grid
self.setGridWorld()
self.valueIteration()
self.extractPolicy()
def isTerminal(self, state):
"""
Specifies terminal conditions for gridworld.
"""
return True if tuple(self.scalarToCoord(state)) in self.grid.objects.values() else False
def isObstacle(self, sCoord):
"""
Checks if a state is a wall or obstacle.
"""
if tuple(sCoord) in self.grid.walls:
return True
if sCoord[0] > (self.grid.row - 1) or sCoord[0] < 0:
return True
if sCoord[1] > (self.grid.col - 1) or sCoord[1] < 0:
return True
return False
def takeAction(self, sCoord, action):
"""
Receives an action value, performs associated movement.
"""
if action is 0:
return self.up(sCoord)
if action is 1:
return self.down(sCoord)
if action is 2:
return self.left(sCoord)
if action is 3:
return self.right(sCoord)
if action is 4:
return sCoord
if action is 5:
return self.upleft(sCoord)
if action is 6:
return self.upright(sCoord)
if action is 7:
return self.downleft(sCoord)
if action is 8:
return self.downright(sCoord)
def up(self, sCoord):
"""
Move agent up, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] -= 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def upright(self, sCoord):
"""
Move agent up and right, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] -= 1
newCoord[1] += 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def upleft(self, sCoord):
"""
Move agent up and left, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] -= 1
newCoord[1] -= 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def down(self, sCoord):
"""
Move agent down, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] += 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def downleft(self, sCoord):
"""
Move agent down, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] += 1
newCoord[1] -= 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def downright(self, sCoord):
"""
Move agent down, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[0] += 1
newCoord[1] += 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def left(self, sCoord):
"""
Move agent left, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[1] -= 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def right(self, sCoord):
"""
Move agent right, uses state coordinate.
"""
newCoord = np.copy(sCoord)
newCoord[1] += 1
# Check if action takes you to a wall/obstacle
if not self.isObstacle(newCoord):
return newCoord
# You hit a wall, return original coord
else:
return sCoord
def coordToScalar(self, sCoord):
"""
Convert state coordinates to corresponding scalar state value.
"""
return sCoord[0]*(self.grid.col) + sCoord[1]
def scalarToCoord(self, scalar):
"""
Convert scalar state value into coordinates.
"""
return np.array([scalar / self.grid.col, scalar % self.grid.col])
def getPossibleActions(self, sCoord):
"""
Will return a list of all possible actions from a current state.
"""
possibleActions = list()
if self.up(sCoord) is not sCoord:
possibleActions.append(0)
if self.down(sCoord) is not sCoord:
possibleActions.append(1)
if self.left(sCoord) is not sCoord:
possibleActions.append(2)
if self.right(sCoord) is not sCoord:
possibleActions.append(3)
if self.upleft(sCoord) is not sCoord:
possibleActions.append(5)
if self.upright(sCoord) is not sCoord:
possibleActions.append(6)
if self.downleft(sCoord) is not sCoord:
possibleActions.append(7)
if self.downright(sCoord) is not sCoord:
possibleActions.append(8)
return possibleActions
def setGridWorld(self):
"""
Initializes states, actions, rewards, transition matrix.
"""
# Possible coordinate positions + Death State
self.s = np.arange(self.grid.row*self.grid.col + 1)
# 4 Actions {Up, Down, Left, Right}
self.a = np.arange(9)
# Reward Zones
self.r = np.zeros(len(self.s))
for i in range(len(self.grid.objects)):
self.r[self.coordToScalar(self.grid.objects.values()[i])] = self.goalVals[i]
self.r_sa = np.zeros([len(self.s),len(self.a)])
for i in range(len(self.s)):
for j in range(len(self.a)):
if j <= 4:
self.r_sa[i][j] = self.r[self.coordToScalar(self.takeAction(self.scalarToCoord(i),j))]-1.0
else:
self.r_sa[i][j] = self.r[self.coordToScalar(self.takeAction(self.scalarToCoord(i),j))]-np.sqrt(2)
self.r = self.r_sa
# Transition Matrix
self.t = np.zeros([len(self.s),len(self.a),len(self.s)])
for state in range(len(self.s)):
possibleActions = self.getPossibleActions(self.scalarToCoord(state))
if self.isTerminal(state):
for i in range(len(self.a)):
if i == 4:
self.t[state][4][state]=1.0
else:
self.t[state][i][len(self.s)-1] = 1.0
continue
for action in self.a:
# Up
if action == 0:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 0)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 1:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 1)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 2:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 2)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 3:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 3)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 4:
self.t[state][action][state] = 1.0
if action == 5:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 5)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 6:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 6)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 7:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 7)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
if action == 8:
currentState = self.scalarToCoord(state)
nextState = self.takeAction(currentState, 8)
self.t[state][action][self.coordToScalar(nextState)] = 1.0
def simulate(self, state):
"""
Runs the solver for the MDP, conducts value iteration, extracts policy,
then runs simulation of problem.
NOTE: Be sure to run value iteration (solve values for states) and to
extract some policy (fill in policy vector) before running simulation
"""
# Run simulation using policy until terminal condition met
actions = ['up', 'down', 'left', 'right']
count = 0
while not self.isTerminal(state):
# Determine which policy to use (non-deterministic)
policy = self.policy[np.where(self.s == state)[0][0]]
p_policy = self.policy[np.where(self.s == state)[0][0]] / \
self.policy[np.where(self.s == state)[0][0]].sum()
# Get the parameters to perform one move
stateIndex = np.where(self.s == state)[0][0]
policyChoice = np.random.choice(policy, p=p_policy)
actionIndex = np.random.choice(np.array(np.where(self.policy[state][:] == policyChoice)).ravel())
# print actionIndex
if actionIndex <= 3:
count += 1
else:
count += np.sqrt(2)
# Take an action, move to next state
nextState = self.takeAction(self.scalarToCoord(int(stateIndex)), int(actionIndex))
nextState = self.coordToScalar(nextState)
# print "In state: {}, taking action: {}, moving to state: {}".format(
# self.scalarToCoord(state), actions[actionIndex], self.scalarToCoord(nextState))
# End game if terminal state reached
state = int(nextState)
# if self.isTerminal(state):
# print "Terminal state: {} has been reached. Simulation over.".format(self.scalarToCoord(state))
return count
|
joeyginorio/Action-Understanding-with-Rational-Rules
|
model_src/grid_world.py
|
Python
|
mit
| 9,591 | 0.033886 |
import pylab
import string
import matplotlib
matplotlib.rcParams['figure.subplot.hspace']=.45
matplotlib.rcParams['figure.subplot.wspace']=.3
labels=('Step=1','Step=.5','Step=.25','Step=.01')
steps=(1,.5,.25,.01)
pylab.figure(figsize=(8.5,11))
for i,intxt in enumerate(('O_RK1.txt','O_RK_5.txt','O_RK_25.txt','O_RK_1.txt')):
infile=open(intxt,'r')
t=[]
xs=[]
ys=[]
Es=[]
for line in infile.readlines():
line=string.split(line)
t.append(float(line[0]))
xs.append(float(line[1]))
ys.append(float(line[2]))
Es.append(float(line[5]))
pylab.subplot(4,2,2*i+1)
pylab.plot(xs,ys,'-',lw=2)
pylab.ylim(-1,1)
pylab.xlim(-1,1)
pylab.xlabel('X')
pylab.ylabel('Y')
pylab.title('Step=%f'%(steps[i]))
pylab.subplot(4,2,2*i+2)
pylab.plot(t,Es,'-',lw=1)
pylab.xlim(0,100)
pylab.xlabel('Time')
pylab.ylabel('Energy')
pylab.suptitle('RK4 Orbit Integration')
pylab.savefig('RK4_orbit_int.pdf')
pylab.close()
pylab.figure(figsize=(8.5,11))
for i,intxt in enumerate(('O_LF1.txt','O_LF_5.txt','O_LF_25.txt','O_LF_1.txt')):
infile=open(intxt,'r')
t=[]
xs=[]
ys=[]
Es=[]
for line in infile.readlines():
line=string.split(line)
t.append(float(line[0]))
xs.append(float(line[1]))
ys.append(float(line[2]))
Es.append(float(line[5]))
pylab.subplot(4,2,2*i+1)
pylab.plot(xs,ys,'-',lw=2)
pylab.ylim(-1,1)
pylab.xlim(-1,1)
pylab.xlabel('X')
pylab.ylabel('Y')
pylab.title('Step=%f'%(steps[i]))
pylab.subplot(4,2,2*i+2)
pylab.plot(t,Es,'-',lw=1)
pylab.xlim(0,100)
pylab.xlabel('Time')
pylab.ylabel('Energy')
pylab.suptitle('Leapfrog Orbit integration')
pylab.savefig('Leapfrog_orbit_int.pdf')
pylab.close()
|
justincely/classwork
|
UMD/AST615/HW6_2/plot_orbit.py
|
Python
|
bsd-3-clause
| 1,806 | 0.035991 |
import os
import sys
import json
import pytest
import subprocess
import time
from kat.harness import Query, is_ingress_class_compatible
from abstract_tests import AmbassadorTest, HTTP, ServiceType
from kat.utils import namespace_manifest
from tests.utils import KUBESTATUS_PATH
from ambassador.utils import parse_bool
class IngressStatusTest1(AmbassadorTest):
status_update = {
"loadBalancer": {
"ingress": [{
"ip": "42.42.42.42"
}]
}
}
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return """
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: ambassador
getambassador.io/ambassador-id: {self.ambassador_id}
name: {self.name.k8s}
spec:
rules:
- http:
paths:
- backend:
serviceName: {self.target.path.k8s}
servicePort: 80
path: /{self.name}/
""" + super().manifests()
def queries(self):
if sys.platform != 'darwin':
text = json.dumps(self.status_update)
update_cmd = [KUBESTATUS_PATH, 'Service', '-n', 'default', '-f', f'metadata.name={self.name.k8s}', '-u', '/dev/fd/0']
subprocess.run(update_cmd, input=text.encode('utf-8'), timeout=10)
# If you run these tests individually, the time between running kubestatus
# and the ingress resource actually getting updated is longer than the
# time spent waiting for resources to be ready, so this test will fail (most of the time)
time.sleep(1)
yield Query(self.url(self.name + "/"))
yield Query(self.url(f'need-normalization/../{self.name}/'))
def check(self):
if not parse_bool(os.environ.get("AMBASSADOR_PYTEST_INGRESS_TEST", "false")):
pytest.xfail('AMBASSADOR_PYTEST_INGRESS_TEST not set, xfailing...')
if sys.platform == 'darwin':
pytest.xfail('not supported on Darwin')
for r in self.results:
if r.backend:
assert r.backend.name == self.target.path.k8s, (r.backend.name, self.target.path.k8s)
assert r.backend.request.headers['x-envoy-original-path'][0] == f'/{self.name}/'
# check for Ingress IP here
ingress_cmd = ["kubectl", "get", "-n", "default", "-o", "json", "ingress", self.path.k8s]
ingress_run = subprocess.Popen(ingress_cmd, stdout=subprocess.PIPE)
ingress_out, _ = ingress_run.communicate()
ingress_json = json.loads(ingress_out)
assert ingress_json['status'] == self.status_update, f"Expected Ingress status to be {self.status_update}, got {ingress_json['status']} instead"
class IngressStatusTest2(AmbassadorTest):
status_update = {
"loadBalancer": {
"ingress": [{
"ip": "84.84.84.84"
}]
}
}
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return """
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: ambassador
getambassador.io/ambassador-id: {self.ambassador_id}
name: {self.name.k8s}
spec:
rules:
- http:
paths:
- backend:
serviceName: {self.target.path.k8s}
servicePort: 80
path: /{self.name}/
""" + super().manifests()
def queries(self):
if sys.platform != 'darwin':
text = json.dumps(self.status_update)
update_cmd = [KUBESTATUS_PATH, 'Service', '-n', 'default', '-f', f'metadata.name={self.name.k8s}', '-u', '/dev/fd/0']
subprocess.run(update_cmd, input=text.encode('utf-8'), timeout=10)
# If you run these tests individually, the time between running kubestatus
# and the ingress resource actually getting updated is longer than the
# time spent waiting for resources to be ready, so this test will fail (most of the time)
time.sleep(1)
yield Query(self.url(self.name + "/"))
yield Query(self.url(f'need-normalization/../{self.name}/'))
def check(self):
if not parse_bool(os.environ.get("AMBASSADOR_PYTEST_INGRESS_TEST", "false")):
pytest.xfail('AMBASSADOR_PYTEST_INGRESS_TEST not set, xfailing...')
if sys.platform == 'darwin':
pytest.xfail('not supported on Darwin')
for r in self.results:
if r.backend:
assert r.backend.name == self.target.path.k8s, (r.backend.name, self.target.path.k8s)
assert r.backend.request.headers['x-envoy-original-path'][0] == f'/{self.name}/'
# check for Ingress IP here
ingress_cmd = ["kubectl", "get", "-n", "default", "-o", "json", "ingress", self.path.k8s]
ingress_run = subprocess.Popen(ingress_cmd, stdout=subprocess.PIPE)
ingress_out, _ = ingress_run.communicate()
ingress_json = json.loads(ingress_out)
assert ingress_json['status'] == self.status_update, f"Expected Ingress status to be {self.status_update}, got {ingress_json['status']} instead"
class IngressStatusTestAcrossNamespaces(AmbassadorTest):
status_update = {
"loadBalancer": {
"ingress": [{
"ip": "168.168.168.168"
}]
}
}
def init(self):
self.target = HTTP(namespace="alt-namespace")
def manifests(self) -> str:
return namespace_manifest("alt-namespace") + """
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: ambassador
getambassador.io/ambassador-id: {self.ambassador_id}
name: {self.name.k8s}
namespace: alt-namespace
spec:
rules:
- http:
paths:
- backend:
serviceName: {self.target.path.k8s}
servicePort: 80
path: /{self.name}/
""" + super().manifests()
def queries(self):
if sys.platform != 'darwin':
text = json.dumps(self.status_update)
update_cmd = [KUBESTATUS_PATH, 'Service', '-n', 'default', '-f', f'metadata.name={self.name.k8s}', '-u', '/dev/fd/0']
subprocess.run(update_cmd, input=text.encode('utf-8'), timeout=10)
# If you run these tests individually, the time between running kubestatus
# and the ingress resource actually getting updated is longer than the
# time spent waiting for resources to be ready, so this test will fail (most of the time)
time.sleep(1)
yield Query(self.url(self.name + "/"))
yield Query(self.url(f'need-normalization/../{self.name}/'))
def check(self):
if not parse_bool(os.environ.get("AMBASSADOR_PYTEST_INGRESS_TEST", "false")):
pytest.xfail('AMBASSADOR_PYTEST_INGRESS_TEST not set, xfailing...')
if sys.platform == 'darwin':
pytest.xfail('not supported on Darwin')
for r in self.results:
if r.backend:
assert r.backend.name == self.target.path.k8s, (r.backend.name, self.target.path.k8s)
assert r.backend.request.headers['x-envoy-original-path'][0] == f'/{self.name}/'
# check for Ingress IP here
ingress_cmd = ["kubectl", "get", "-o", "json", "ingress", self.path.k8s, "-n", "alt-namespace"]
ingress_run = subprocess.Popen(ingress_cmd, stdout=subprocess.PIPE)
ingress_out, _ = ingress_run.communicate()
ingress_json = json.loads(ingress_out)
assert ingress_json['status'] == self.status_update, f"Expected Ingress status to be {self.status_update}, got {ingress_json['status']} instead"
class IngressStatusTestWithAnnotations(AmbassadorTest):
status_update = {
"loadBalancer": {
"ingress": [{
"ip": "200.200.200.200"
}]
}
}
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return """
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
getambassador.io/config: |
---
apiVersion: ambassador/v1
kind: Mapping
name: {self.name}-nested
prefix: /{self.name}-nested/
service: http://{self.target.path.fqdn}
ambassador_id: {self.ambassador_id}
kubernetes.io/ingress.class: ambassador
getambassador.io/ambassador-id: {self.ambassador_id}
name: {self.name.k8s}
spec:
rules:
- http:
paths:
- backend:
serviceName: {self.target.path.k8s}
servicePort: 80
path: /{self.name}/
""" + super().manifests()
def queries(self):
text = json.dumps(self.status_update)
update_cmd = [KUBESTATUS_PATH, 'Service', '-n', 'default', '-f', f'metadata.name={self.name.k8s}', '-u', '/dev/fd/0']
subprocess.run(update_cmd, input=text.encode('utf-8'), timeout=10)
# If you run these tests individually, the time between running kubestatus
# and the ingress resource actually getting updated is longer than the
# time spent waiting for resources to be ready, so this test will fail (most of the time)
time.sleep(1)
yield Query(self.url(self.name + "/"))
yield Query(self.url(self.name + "-nested/"))
yield Query(self.url(f'need-normalization/../{self.name}/'))
def check(self):
if not parse_bool(os.environ.get("AMBASSADOR_PYTEST_INGRESS_TEST", "false")):
pytest.xfail('AMBASSADOR_PYTEST_INGRESS_TEST not set, xfailing...')
# check for Ingress IP here
ingress_cmd = ["kubectl", "get", "-n", "default", "-o", "json", "ingress", self.path.k8s]
ingress_run = subprocess.Popen(ingress_cmd, stdout=subprocess.PIPE)
ingress_out, _ = ingress_run.communicate()
ingress_json = json.loads(ingress_out)
assert ingress_json['status'] == self.status_update, f"Expected Ingress status to be {self.status_update}, got {ingress_json['status']} instead"
class SameIngressMultipleNamespaces(AmbassadorTest):
status_update = {
"loadBalancer": {
"ingress": [{
"ip": "210.210.210.210"
}]
}
}
def init(self):
self.target = HTTP()
self.target1 = HTTP(name="target1", namespace="same-ingress-1")
self.target2 = HTTP(name="target2", namespace="same-ingress-2")
def manifests(self) -> str:
return namespace_manifest("same-ingress-1") + """
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: ambassador
getambassador.io/ambassador-id: {self.ambassador_id}
name: {self.name.k8s}
namespace: same-ingress-1
spec:
rules:
- http:
paths:
- backend:
serviceName: {self.target.path.k8s}-target1
servicePort: 80
path: /{self.name}-target1/
""" + namespace_manifest("same-ingress-2") + """
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: ambassador
getambassador.io/ambassador-id: {self.ambassador_id}
name: {self.name.k8s}
namespace: same-ingress-2
spec:
rules:
- http:
paths:
- backend:
serviceName: {self.target.path.k8s}-target2
servicePort: 80
path: /{self.name}-target2/
""" + super().manifests()
def queries(self):
if sys.platform != 'darwin':
text = json.dumps(self.status_update)
update_cmd = [KUBESTATUS_PATH, 'Service', '-n', 'default', '-f', f'metadata.name={self.name.k8s}', '-u', '/dev/fd/0']
subprocess.run(update_cmd, input=text.encode('utf-8'), timeout=10)
# If you run these tests individually, the time between running kubestatus
# and the ingress resource actually getting updated is longer than the
# time spent waiting for resources to be ready, so this test will fail (most of the time)
time.sleep(1)
yield Query(self.url(self.name + "-target1/"))
yield Query(self.url(self.name + "-target2/"))
def check(self):
if not parse_bool(os.environ.get("AMBASSADOR_PYTEST_INGRESS_TEST", "false")):
pytest.xfail('AMBASSADOR_PYTEST_INGRESS_TEST not set, xfailing...')
if sys.platform == 'darwin':
pytest.xfail('not supported on Darwin')
for namespace in ['same-ingress-1', 'same-ingress-2']:
# check for Ingress IP here
ingress_cmd = ["kubectl", "get", "-n", "default", "-o", "json", "ingress", self.path.k8s, "-n", namespace]
ingress_run = subprocess.Popen(ingress_cmd, stdout=subprocess.PIPE)
ingress_out, _ = ingress_run.communicate()
ingress_json = json.loads(ingress_out)
assert ingress_json['status'] == self.status_update, f"Expected Ingress status to be {self.status_update}, got {ingress_json['status']} instead"
class IngressStatusTestWithIngressClass(AmbassadorTest):
status_update = {
"loadBalancer": {
"ingress": [{
"ip": "42.42.42.42"
}]
}
}
def init(self):
self.target = HTTP()
if not is_ingress_class_compatible():
self.xfail = 'IngressClass is not supported in this cluster'
def manifests(self) -> str:
return """
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: {self.name.k8s}-ext
rules:
- apiGroups: ["networking.k8s.io"]
resources: ["ingressclasses"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: {self.name.k8s}-ext
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {self.name.k8s}-ext
subjects:
- kind: ServiceAccount
name: {self.path.k8s}
namespace: {self.namespace}
---
apiVersion: networking.k8s.io/v1beta1
kind: IngressClass
metadata:
annotations:
getambassador.io/ambassador-id: {self.ambassador_id}
name: {self.name.k8s}
spec:
controller: getambassador.io/ingress-controller
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
annotations:
getambassador.io/ambassador-id: {self.ambassador_id}
name: {self.name.k8s}
spec:
ingressClassName: {self.name.k8s}
rules:
- http:
paths:
- backend:
serviceName: {self.target.path.k8s}
servicePort: 80
path: /{self.name}/
""" + super().manifests()
def queries(self):
if sys.platform != 'darwin':
text = json.dumps(self.status_update)
update_cmd = [KUBESTATUS_PATH, 'Service', '-n', 'default', '-f', f'metadata.name={self.name.k8s}', '-u', '/dev/fd/0']
subprocess.run(update_cmd, input=text.encode('utf-8'), timeout=10)
# If you run these tests individually, the time between running kubestatus
# and the ingress resource actually getting updated is longer than the
# time spent waiting for resources to be ready, so this test will fail (most of the time)
time.sleep(1)
yield Query(self.url(self.name + "/"))
yield Query(self.url(f'need-normalization/../{self.name}/'))
def check(self):
if not parse_bool(os.environ.get("AMBASSADOR_PYTEST_INGRESS_TEST", "false")):
pytest.xfail('AMBASSADOR_PYTEST_INGRESS_TEST not set, xfailing...')
if sys.platform == 'darwin':
pytest.xfail('not supported on Darwin')
for r in self.results:
if r.backend:
assert r.backend.name == self.target.path.k8s, (r.backend.name, self.target.path.k8s)
assert r.backend.request.headers['x-envoy-original-path'][0] == f'/{self.name}/'
# check for Ingress IP here
ingress_cmd = ["kubectl", "get", "-n", "default", "-o", "json", "ingress", self.path.k8s]
ingress_run = subprocess.Popen(ingress_cmd, stdout=subprocess.PIPE)
ingress_out, _ = ingress_run.communicate()
ingress_json = json.loads(ingress_out)
assert ingress_json['status'] == self.status_update, f"Expected Ingress status to be {self.status_update}, got {ingress_json['status']} instead"
|
datawire/ambassador
|
python/tests/kat/t_ingress.py
|
Python
|
apache-2.0
| 16,179 | 0.00309 |
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the add/show alias command."""
import unittest
if __name__ == '__main__':
from broker import utils
utils.import_depends()
from broker.brokertest import TestBrokerCommand
from eventstest import EventsTestMixin
class TestAddAlias(EventsTestMixin, TestBrokerCommand):
def test_100_add_alias2host(self):
self.event_add_dns(
fqdn='alias2host.aqd-unittest.ms.com',
dns_environment='internal',
dns_records=[
{
'target': 'arecord13.aqd-unittest.ms.com',
'targetEnvironmentName': 'internal',
'rrtype': 'CNAME'
},
],
)
cmd = ['add', 'alias', '--fqdn', 'alias2host.aqd-unittest.ms.com',
'--target', 'arecord13.aqd-unittest.ms.com']
self.noouttest(cmd)
self.events_verify()
def test_105_add_aliasduplicate(self):
cmd = ['add', 'alias', '--fqdn', 'alias2host.aqd-unittest.ms.com',
'--target', 'arecord13.aqd-unittest.ms.com']
out = self.badrequesttest(cmd)
self.matchoutput(out, "Alias alias2host.aqd-unittest.ms.com "
"already exists.", cmd)
def test_110_mscom_alias(self):
self.event_add_dns(
fqdn='alias.ms.com',
dns_environment='internal',
dns_records=[
{
'target': 'arecord13.aqd-unittest.ms.com',
'targetEnvironmentName': 'internal',
'rrtype': 'CNAME'
},
],
)
cmd = ['add', 'alias', '--fqdn', 'alias.ms.com',
'--target', 'arecord13.aqd-unittest.ms.com',
'--comments', 'Some alias comments']
self.dsdb_expect("add_host_alias "
"-host_name arecord13.aqd-unittest.ms.com "
"-alias_name alias.ms.com "
"-comments Some alias comments")
self.noouttest(cmd)
self.dsdb_verify()
self.events_verify()
def test_120_conflict_a_record(self):
cmd = ['add', 'alias', '--fqdn', 'arecord14.aqd-unittest.ms.com',
'--target', 'arecord13.aqd-unittest.ms.com']
out = self.badrequesttest(cmd)
self.matchoutput(out, "DNS Record arecord14.aqd-unittest.ms.com "
"already exists.", cmd)
def test_130_conflict_reserver_name(self):
cmd = ['add', 'alias', '--fqdn', 'nyaqd1.ms.com',
'--target', 'arecord13.aqd-unittest.ms.com']
out = self.badrequesttest(cmd)
self.matchoutput(out, "Reserved Name nyaqd1.ms.com already exists.", cmd)
def test_140_restricted_domain(self):
cmd = ["add", "alias", "--fqdn", "foo.restrict.aqd-unittest.ms.com",
"--target", "arecord13.aqd-unittest.ms.com"]
out = self.badrequesttest(cmd)
self.matchoutput(out,
"DNS Domain restrict.aqd-unittest.ms.com is "
"restricted, aliases are not allowed.",
cmd)
def test_150_add_alias2diff_environment(self):
self.event_add_dns(
fqdn='alias2host.aqd-unittest-ut-env.ms.com',
dns_environment='ut-env',
dns_records=[
{
'target': 'arecord13.aqd-unittest.ms.com',
'targetEnvironmentName': 'internal',
'rrtype': 'CNAME'
},
],
)
cmd = ['add', 'alias', '--fqdn', 'alias2host.aqd-unittest-ut-env.ms.com',
'--dns_environment', 'ut-env',
'--target', 'arecord13.aqd-unittest.ms.com',
'--target_environment', 'internal']
self.noouttest(cmd)
self.events_verify()
def test_155_add_alias2explicit_target_environment(self):
cmd = ['add', 'alias', '--fqdn', 'alias2alias.aqd-unittest-ut-env.ms.com',
'--dns_environment', 'ut-env',
'--target', 'alias2host.aqd-unittest-ut-env.ms.com',
'--target_environment', 'ut-env']
self.noouttest(cmd)
def test_160_add_alias_with_fqdn_in_diff_environment(self):
cmd = ['add', 'alias', '--fqdn', 'alias13.aqd-unittest.ms.com',
'--dns_environment', 'ut-env',
'--target', 'arecord13.aqd-unittest.ms.com',
'--target_environment', 'internal']
self.noouttest(cmd)
def test_200_autocreate_target(self):
cmd = ["add", "alias", "--fqdn", "restrict1.aqd-unittest.ms.com",
"--target", "target.restrict.aqd-unittest.ms.com"]
out = self.statustest(cmd)
self.matchoutput(out,
"WARNING: Will create a reference to "
"target.restrict.aqd-unittest.ms.com, but ",
cmd)
def test_201_verify_autocreate(self):
cmd = ["search", "dns", "--fullinfo",
"--fqdn", "target.restrict.aqd-unittest.ms.com"]
out = self.commandtest(cmd)
self.matchoutput(out,
"Reserved Name: target.restrict.aqd-unittest.ms.com",
cmd)
def test_201_verify_noprimary(self):
cmd = ["search", "dns", "--noprimary_name",
"--record_type", "reserved_name"]
out = self.commandtest(cmd)
self.matchoutput(out, "target.restrict.aqd-unittest.ms.com", cmd)
def test_210_autocreate_second_alias(self):
cmd = ["add", "alias", "--fqdn", "restrict2.aqd-unittest.ms.com",
"--target", "target.restrict.aqd-unittest.ms.com"]
self.noouttest(cmd)
def test_220_restricted_alias_no_dsdb(self):
cmd = ["add", "alias", "--fqdn", "restrict.ms.com",
"--target", "no-dsdb.restrict.aqd-unittest.ms.com"]
out = self.statustest(cmd)
self.matchoutput(out,
"WARNING: Will create a reference to "
"no-dsdb.restrict.aqd-unittest.ms.com, but ",
cmd)
self.dsdb_verify(empty=True)
def test_400_verify_alias2host(self):
cmd = "show alias --fqdn alias2host.aqd-unittest.ms.com"
out = self.commandtest(cmd.split(" "))
self.matchoutput(out, "Alias: alias2host.aqd-unittest.ms.com", cmd)
self.matchoutput(out, "Target: arecord13.aqd-unittest.ms.com", cmd)
self.matchoutput(out, "DNS Environment: internal", cmd)
def test_405_verify_host_shows_alias(self):
cmd = "show address --fqdn arecord13.aqd-unittest.ms.com"
out = self.commandtest(cmd.split(" "))
self.matchoutput(out, "Aliases: alias.ms.com, "
"alias13.aqd-unittest.ms.com [environment: ut-env], "
"alias2alias.aqd-unittest-ut-env.ms.com [environment: ut-env], "
"alias2host.aqd-unittest-ut-env.ms.com [environment: ut-env], "
"alias2host.aqd-unittest.ms.com", cmd)
def test_410_verify_mscom_alias(self):
cmd = "show alias --fqdn alias.ms.com"
out = self.commandtest(cmd.split(" "))
self.matchoutput(out, "Alias: alias.ms.com", cmd)
self.matchoutput(out, "Target: arecord13.aqd-unittest.ms.com", cmd)
self.matchoutput(out, "DNS Environment: internal", cmd)
self.matchoutput(out, "Comments: Some alias comments", cmd)
def test_420_verify_alias2diff_environment(self):
cmd = "show alias --fqdn alias2host.aqd-unittest-ut-env.ms.com --dns_environment ut-env"
out = self.commandtest(cmd.split(" "))
self.matchoutput(out, "Alias: alias2host.aqd-unittest-ut-env.ms.com", cmd)
self.matchoutput(out, "Target: arecord13.aqd-unittest.ms.com [environment: internal]", cmd)
self.matchoutput(out, "DNS Environment: ut-env", cmd)
def test_425_verify_alias2alias_with_diff_environment(self):
cmd = "show alias --fqdn alias2alias.aqd-unittest-ut-env.ms.com --dns_environment ut-env"
out = self.commandtest(cmd.split(" "))
self.matchoutput(out, "Alias: alias2alias.aqd-unittest-ut-env.ms.com", cmd)
self.matchoutput(out, "Target: alias2host.aqd-unittest-ut-env.ms.com", cmd)
self.matchoutput(out, "DNS Environment: ut-env", cmd)
def test_500_add_alias2alias(self):
cmd = ['add', 'alias', '--fqdn', 'alias2alias.aqd-unittest.ms.com',
'--target', 'alias2host.aqd-unittest.ms.com', '--ttl', 60]
self.noouttest(cmd)
def test_510_add_alias3alias(self):
cmd = ['add', 'alias', '--fqdn', 'alias3alias.aqd-unittest.ms.com',
'--target', 'alias2alias.aqd-unittest.ms.com']
self.noouttest(cmd)
def test_520_add_alias4alias(self):
cmd = ['add', 'alias', '--fqdn', 'alias4alias.aqd-unittest.ms.com',
'--target', 'alias3alias.aqd-unittest.ms.com']
self.noouttest(cmd)
def test_530_add_alias5alias_fail(self):
cmd = ['add', 'alias', '--fqdn', 'alias5alias.aqd-unittest.ms.com',
'--target', 'alias4alias.aqd-unittest.ms.com']
out = self.badrequesttest(cmd)
self.matchoutput(out, "Maximum alias depth exceeded", cmd)
def test_600_verify_alias2alias(self):
cmd = 'show alias --fqdn alias2alias.aqd-unittest.ms.com'
out = self.commandtest(cmd.split(" "))
self.matchoutput(out, 'Alias: alias2alias.aqd-unittest.ms.com', cmd)
self.matchoutput(out, 'TTL: 60', cmd)
def test_601_verify_alias2alias_backwards(self):
cmd = 'show alias --fqdn alias2host.aqd-unittest.ms.com'
out = self.commandtest(cmd.split(" "))
self.matchoutput(out, "Aliases: alias2alias.aqd-unittest.ms.com", cmd)
def test_602_verify_alias2alias_recursive(self):
cmd = 'show address --fqdn arecord13.aqd-unittest.ms.com'
out = self.commandtest(cmd.split(" "))
self.matchoutput(out,
"Aliases: alias.ms.com, "
"alias13.aqd-unittest.ms.com [environment: ut-env], "
"alias2alias.aqd-unittest-ut-env.ms.com [environment: ut-env], "
"alias2alias.aqd-unittest.ms.com, "
"alias2host.aqd-unittest-ut-env.ms.com [environment: ut-env], "
"alias2host.aqd-unittest.ms.com, "
"alias3alias.aqd-unittest.ms.com, "
"alias4alias.aqd-unittest.ms.com",
cmd)
def test_700_show_alias_host(self):
ip = self.net["zebra_eth0"].usable[0]
command = ["add", "alias", "--fqdn", "alias0.aqd-unittest.ms.com",
"--target", "unittest20-e0.aqd-unittest.ms.com"]
out = self.commandtest(command)
command = ["add", "alias", "--fqdn", "alias01.aqd-unittest.ms.com",
"--target", "alias0.aqd-unittest.ms.com"]
out = self.commandtest(command)
command = ["show", "host", "--hostname", "unittest20.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.searchoutput(out,
r'Provides: unittest20-e0.aqd-unittest.ms.com \[%s\]\s*'
r'Aliases: alias0.aqd-unittest.ms.com, alias01.aqd-unittest.ms.com'
% ip,
command)
command = ["show", "host", "--hostname", "unittest20.aqd-unittest.ms.com",
"--format", "proto"]
host = self.protobuftest(command, expect=1)[0]
self.assertEqual(host.hostname, 'unittest20')
interfaces = {iface.device: iface for iface in host.machine.interfaces}
self.assertIn("eth0", interfaces)
self.assertEqual(interfaces["eth0"].aliases[0], 'alias0.aqd-unittest.ms.com')
self.assertEqual(interfaces["eth0"].aliases[1], 'alias01.aqd-unittest.ms.com')
self.assertEqual(interfaces["eth0"].ip, str(ip))
self.assertEqual(interfaces["eth0"].fqdn, 'unittest20-e0.aqd-unittest.ms.com')
command = ["del", "alias", "--fqdn", "alias01.aqd-unittest.ms.com"]
out = self.commandtest(command)
command = ["del", "alias", "--fqdn", "alias0.aqd-unittest.ms.com"]
out = self.commandtest(command)
def test_710_show_alias_host(self):
ip = self.net["zebra_eth1"].usable[3]
command = ["add", "alias", "--fqdn", "alias1.aqd-unittest.ms.com",
"--target", "unittest20-e1-1.aqd-unittest.ms.com"]
out = self.commandtest(command)
command = ["add", "alias", "--fqdn", "alias11.aqd-unittest.ms.com",
"--target", "alias1.aqd-unittest.ms.com"]
out = self.commandtest(command)
command = ["show", "host", "--hostname", "unittest20.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.searchoutput(out,
r'Provides: unittest20-e1-1.aqd-unittest.ms.com \[%s\] \(label: e1\)\s*'
r'Aliases: alias1.aqd-unittest.ms.com, alias11.aqd-unittest.ms.com'
% ip,
command)
command = ["show", "host", "--hostname", "unittest20.aqd-unittest.ms.com",
"--format", "proto"]
host = self.protobuftest(command, expect=1)[0]
self.assertEqual(host.hostname, 'unittest20')
interfaces = {iface.device: iface for iface in host.machine.interfaces}
self.assertIn("eth1:e1", interfaces)
self.assertEqual(interfaces["eth1:e1"].aliases[0], 'alias1.aqd-unittest.ms.com')
self.assertEqual(interfaces["eth1:e1"].aliases[1], 'alias11.aqd-unittest.ms.com')
self.assertEqual(interfaces["eth1:e1"].ip, str(ip))
self.assertEqual(interfaces["eth1:e1"].fqdn, 'unittest20-e1-1.aqd-unittest.ms.com')
command = ["del", "alias", "--fqdn", "alias11.aqd-unittest.ms.com"]
out = self.commandtest(command)
command = ["del", "alias", "--fqdn", "alias1.aqd-unittest.ms.com"]
out = self.commandtest(command)
def test_800_grn(self):
command = ["add", "alias",
"--fqdn", "alias2host-grn.aqd-unittest.ms.com",
"--target", "arecord50.aqd-unittest.ms.com",
"--grn", "grn:/ms/ei/aquilon/aqd"]
self.noouttest(command)
def test_805_verify_grn(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "alias2host-grn.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/aqd",
command)
def test_810_eon_id(self):
command = ["add", "alias",
"--fqdn", "alias2host-eon-id.aqd-unittest.ms.com",
"--target", "arecord51.aqd-unittest.ms.com",
"--eon_id", "3"]
self.noouttest(command)
def test_815_verify_eon_id(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "alias2host-eon-id.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/unittest",
command)
def test_850_grn_conflict_with_primary_name(self):
command = ["add", "alias",
"--fqdn", "alias2host-bad-target.aqd-unittest.ms.com",
"--target", "unittest00.one-nyp.ms.com",
"--grn", "grn:/ms/ei/aquilon/unittest"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Alias alias2host-bad-target.aqd-unittest.ms.com "
"depends on DNS Record unittest00.one-nyp.ms.com. "
"It conflicts with GRN grn:/ms/ei/aquilon/unittest: "
"DNS Record unittest00.one-nyp.ms.com is a primary "
"name. GRN should not be set but derived from the "
"device.",
command)
def test_860_grn_conflict_with_service_address(self):
command = ["add", "alias",
"--fqdn", "alias2host-bad-target.aqd-unittest.ms.com",
"--target", "zebra2.aqd-unittest.ms.com",
"--grn", "grn:/ms/ei/aquilon/unittest"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Alias alias2host-bad-target.aqd-unittest.ms.com "
"depends on DNS Record zebra2.aqd-unittest.ms.com. "
"It conflicts with GRN grn:/ms/ei/aquilon/unittest: "
"DNS Record zebra2.aqd-unittest.ms.com is a service "
"address. GRN should not be set but derived from the "
"device.",
command)
def test_870_grn_conflict_with_interface_address(self):
command = ["add", "alias",
"--fqdn", "alias2host-bad-target.aqd-unittest.ms.com",
"--target", "unittest20-e1.aqd-unittest.ms.com",
"--grn", "grn:/ms/ei/aquilon/unittest"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Alias alias2host-bad-target.aqd-unittest.ms.com "
"depends on DNS Record "
"unittest20-e1.aqd-unittest.ms.com. "
"It conflicts with GRN grn:/ms/ei/aquilon/unittest: "
"DNS Record unittest20-e1.aqd-unittest.ms.com is "
"already be used by the interfaces "
"unittest20.aqd-unittest.ms.com/eth1. GRN should not "
"be set but derived from the device.",
command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestAddAlias)
unittest.TextTestRunner(verbosity=2).run(suite)
|
quattor/aquilon
|
tests/broker/test_add_alias.py
|
Python
|
apache-2.0
| 18,764 | 0.001439 |
from datetime import date
from mock import patch
from nose.tools import eq_
from kitsune.sumo import googleanalytics
from kitsune.sumo.tests import TestCase
from kitsune.wiki.tests import document, revision
class GoogleAnalyticsTests(TestCase):
"""Tests for the Google Analytics API helper."""
@patch.object(googleanalytics, '_build_request')
def test_visitors(self, _build_request):
"""Test googleanalytics.visitors()."""
execute = _build_request.return_value.get.return_value.execute
execute.return_value = VISITORS_RESPONSE
visits = googleanalytics.visitors(
date(2013, 01, 16), date(2013, 01, 16))
eq_(1, len(visits))
eq_(382719, visits['2013-01-16'])
@patch.object(googleanalytics, '_build_request')
def test_visitors_by_locale(self, _build_request):
"""Test googleanalytics.visits_by_locale()."""
execute = _build_request.return_value.get.return_value.execute
execute.return_value = VISITORS_BY_LOCALE_RESPONSE
visits = googleanalytics.visitors_by_locale(
date(2013, 01, 16), date(2013, 01, 16))
eq_(50, len(visits))
eq_(221447, visits['en-US'])
eq_(24432, visits['es'])
@patch.object(googleanalytics, '_build_request')
def test_pageviews_by_document(self, _build_request):
"""Test googleanalytics.pageviews_by_document()."""
execute = _build_request.return_value.get.return_value.execute
execute.return_value = PAGEVIEWS_BY_DOCUMENT_RESPONSE
# Add some documents that match the response data.
documents = []
for i in range(1, 6):
documents.append(revision(
document=document(slug='doc-%s' % i, save=True),
is_approved=True,
save=True).document)
pageviews = googleanalytics.pageviews_by_document(
date(2013, 01, 16), date(2013, 01, 16))
eq_(5, len(pageviews))
eq_(1, pageviews[documents[0].pk])
eq_(2, pageviews[documents[1].pk])
eq_(10, pageviews[documents[2].pk])
eq_(39, pageviews[documents[3].pk])
eq_(46, pageviews[documents[4].pk])
@patch.object(googleanalytics, '_build_request')
def test_pageviews_by_question(self, _build_request):
"""Test googleanalytics.pageviews_by_question()."""
execute = _build_request.return_value.get.return_value.execute
execute.return_value = PAGEVIEWS_BY_QUESTION_RESPONSE
pageviews = googleanalytics.pageviews_by_question(
date(2013, 01, 16), date(2013, 01, 16))
eq_(3, len(pageviews))
eq_(3, pageviews[1])
eq_(2, pageviews[2])
eq_(11, pageviews[3])
@patch.object(googleanalytics, '_build_request')
def test_search_ctr(self, _build_request):
"""Test googleanalytics.search_ctr()."""
execute = _build_request.return_value.get.return_value.execute
execute.return_value = SEARCH_CTR_RESPONSE
ctr = googleanalytics.search_ctr(
date(2013, 6, 6), date(2013, 6, 6))
eq_(1, len(ctr))
eq_(74.88925980111263, ctr['2013-06-06'])
VISITORS_RESPONSE = {
u'kind': u'analytics#gaData',
u'rows': [[u'382719']], # <~ The number we are looking for.
u'containsSampledData': False,
u'profileInfo': {
u'webPropertyId': u'UA-1234567890',
u'internalWebPropertyId': u'1234567890',
u'tableId': u'ga:1234567890',
u'profileId': u'1234567890',
u'profileName': u'support.mozilla.org - Production Only',
u'accountId': u'1234567890'},
u'itemsPerPage': 1000,
u'totalsForAllResults': {
u'ga:visitors': u'382719'},
u'columnHeaders': [
{u'dataType': u'INTEGER',
u'columnType': u'METRIC',
u'name': u'ga:visitors'}],
u'query': {
u'max-results': 1000,
u'dimensions': u'',
u'start-date': u'2013-01-16',
u'start-index': 1,
u'ids': u'ga:1234567890',
u'metrics': [u'ga:visitors'],
u'end-date': u'2013-01-16'
},
u'totalResults': 1,
u'id': ('https://www.googleapis.com/analytics/v3/data/ga'
'?ids=ga:1234567890&metrics=ga:visitors&start-date=2013-01-16'
'&end-date=2013-01-16'),
u'selfLink': ('https://www.googleapis.com/analytics/v3/data/ga'
'?ids=ga:1234567890&metrics=ga:visitors'
'&start-date=2013-01-16&end-date=2013-01-16')
}
VISITORS_BY_LOCALE_RESPONSE = {
u'kind': u'analytics#gaData',
u'rows': [
[u'/1/', u'16'],
[u'/ach/', u'24'],
[u'/ak/', u'32'],
[u'/ar/', u'3362'],
[u'/as/', u'10'],
[u'/ast/', u'6'],
[u'/be/', u'13'],
[u'/bg/', u'989'],
[u'/bn-BD/', u'21'],
[u'/bn-IN/', u'3'],
[u'/bs/', u'73'],
[u'/ca/', u'432'],
[u'/cs/', u'3308'],
[u'/da/', u'947'],
[u'/de/', u'37313'],
[u'/el/', u'1720'],
[u'/en-US/', u'221447'],
[u'/eo/', u'12'],
[u'/es/', u'24432'],
[u'/et/', u'226'],
[u'/eu/', u'122'],
[u'/fa/', u'356'],
[u'/favicon.ico', u'4'],
[u'/ff/', u'6'],
[u'/fi/', u'2318'],
[u'/fr/', u'24922'],
[u'/fur/', u'5'],
[u'/fy-NL/', u'2'],
[u'/ga-IE/', u'7'],
[u'/gd/', u'7'],
[u'/gl/', u'43'],
[u'/gu-IN/', u'3'],
[u'/he/', u'202'],
[u'/hi-IN/', u'21'],
[u'/hr/', u'677'],
[u'/hu/', u'2873'],
[u'/hy-AM/', u'14'],
[u'/id/', u'3390'],
[u'/ilo/', u'5'],
[u'/is/', u'39'],
[u'/it/', u'9986'],
[u'/ja/', u'15508'],
[u'/kk/', u'9'],
[u'/km/', u'8'],
[u'/kn/', u'7'],
[u'/ko/', u'858'],
[u'/lt/', u'536'],
[u'/mai/', u'12'],
[u'/mk/', u'58'],
[u'/ml/', u'10'],
[u'/mn/', u'42'],
[u'/mr/', u'10'],
[u'/ms/', u'14'],
[u'/my/', u'413'],
[u'/nb-NO/', u'714'],
[u'/ne-NP/', u'7'],
[u'/nl/', u'4970'],
[u'/no/', u'135'],
[u'/pa-IN/', u'10'],
[u'/pl/', u'9701'],
[u'/pt-BR/', u'12299'],
[u'/pt-PT/', u'1332'],
[u'/rm/', u'8'],
[u'/ro/', u'1221'],
[u'/ru/', u'26194'],
[u'/rw/', u'5'],
[u'/si/', u'21'],
[u'/sk/', u'875'],
[u'/sl/', u'530'],
[u'/son/', u'1'],
[u'/sq/', u'27'],
[u'/sr-Cyrl/', u'256'],
[u'/sv/', u'1488'],
[u'/ta-LK/', u'13'],
[u'/ta/', u'13'],
[u'/te/', u'6'],
[u'/th/', u'2936'],
[u'/tr/', u'3470'],
[u'/uk/', u'434'],
[u'/vi/', u'4880'],
[u'/zh-CN/', u'5640'],
[u'/zh-TW/', u'3508']
],
u'containsSampledData': False,
u'profileInfo': {
u'webPropertyId': u'UA-1234567890',
u'internalWebPropertyId': u'1234567890',
u'tableId': u'ga:1234567890',
u'profileId': u'1234567890',
u'profileName': u'support.mozilla.org - Production Only',
u'accountId': u'1234567890'
},
u'itemsPerPage': 1000,
u'totalsForAllResults': {
u'ga:visitors': u'437598'},
u'columnHeaders': [
{u'dataType': u'STRING',
u'columnType': u'DIMENSION',
u'name': u'ga:pagePathLevel1'},
{u'dataType': u'INTEGER',
u'columnType': u'METRIC',
u'name': u'ga:visitors'}
],
u'query': {
u'max-results': 1000,
u'dimensions': u'ga:pagePathLevel1',
u'start-date': u'2013-01-16',
u'start-index': 1,
u'ids': u'ga:1234567890',
u'metrics': [u'ga:visitors'],
u'end-date': u'2013-01-16'
},
u'totalResults': 83,
u'id': ('https://www.googleapis.com/analytics/v3/data/ga'
'?ids=ga:1234567890&dimensions=ga:pagePathLevel1'
'&metrics=ga:visitors&start-date=2013-01-16&end-date=2013-01-16'),
u'selfLink': ('https://www.googleapis.com/analytics/v3/data/ga'
'?ids=ga:1234567890&dimensions=ga:pagePathLevel1'
'&metrics=ga:visitors&start-date=2013-01-16'
'&end-date=2013-01-16'),
}
PAGEVIEWS_BY_DOCUMENT_RESPONSE = {
u'kind': u'analytics#gaData',
u'rows': [
[u'/en-US/kb/doc-1', u'1'], # Counts as a pageview.
[u'/en-US/kb/doc-1/edit', u'2'], # Doesn't count as a pageview
[u'/en-US/kb/doc-1/history', u'1'], # Doesn't count as a pageview
[u'/en-US/kb/doc-2', u'2'], # Counts as a pageview.
[u'/en-US/kb/doc-3', u'10'], # Counts as a pageview.
[u'/en-US/kb/doc-4', u'39'], # Counts as a pageview.
[u'/en-US/kb/doc-5', u'40'], # Counts as a pageview.
[u'/en-US/kb/doc-5/discuss', u'1'], # Doesn't count as a pageview
[u'/en-US/kb/doc-5?param=ab', u'2'], # Counts as a pageview.
[u'/en-US/kb/doc-5?param=cd', u'4']], # Counts as a pageview.
u'containsSampledData': False,
u'columnHeaders': [
{u'dataType': u'STRING',
u'columnType': u'DIMENSION',
u'name': u'ga:pagePath'},
{u'dataType': u'INTEGER',
u'columnType': u'METRIC',
u'name': u'ga:pageviews'}
],
u'profileInfo': {
u'webPropertyId': u'UA-1234567890',
u'internalWebPropertyId': u'1234567890',
u'tableId': u'ga:1234567890',
u'profileId': u'1234567890',
u'profileName': u'support.mozilla.org - Production Only',
u'accountId': u'1234567890'},
u'itemsPerPage': 10,
u'totalsForAllResults': {
u'ga:pageviews': u'164293'},
u'nextLink': ('https://www.googleapis.com/analytics/v3/data/ga'
'?ids=ga:1234567890&dimensions=ga:pagePath'
'&metrics=ga:pageviews&filters=ga:pagePathLevel2%3D%3D/kb/'
';ga:pagePathLevel1%3D%3D/en-US/&start-date=2013-01-17'
'&end-date=2013-01-17&start-index=11&max-results=10'),
u'query': {
u'max-results': 10,
u'dimensions': u'ga:pagePath',
u'start-date': u'2013-01-17',
u'start-index': 1,
u'ids': u'ga:1234567890',
u'metrics': [u'ga:pageviews'],
u'filters': u'ga:pagePathLevel2==/kb/;ga:pagePathLevel1==/en-US/',
u'end-date': u'2013-01-17'},
u'totalResults': 10,
u'id': ('https://www.googleapis.com/analytics/v3/data/ga?ids=ga:1234567890'
'&dimensions=ga:pagePath&metrics=ga:pageviews'
'&filters=ga:pagePathLevel2%3D%3D/kb/;'
'ga:pagePathLevel1%3D%3D/en-US/&start-date=2013-01-17'
'&end-date=2013-01-17&start-index=1&max-results=10'),
u'selfLink': ('https://www.googleapis.com/analytics/v3/data/ga'
'?ids=ga:1234567890&dimensions=ga:pagePath&'
'metrics=ga:pageviews&filters=ga:pagePathLevel2%3D%3D/kb/;'
'ga:pagePathLevel1%3D%3D/en-US/&start-date=2013-01-17'
'&end-date=2013-01-17&start-index=1&max-results=10')
}
PAGEVIEWS_BY_QUESTION_RESPONSE = {
u'columnHeaders': [
{u'columnType': u'DIMENSION',
u'dataType': u'STRING',
u'name': u'ga:pagePath'},
{u'columnType': u'METRIC',
u'dataType': u'INTEGER',
u'name': u'ga:pageviews'}],
u'containsSampledData': False,
u'id': ('https://www.googleapis.com/analytics/v3/data/ga?ids=ga:65912487'
'&dimensions=ga:pagePath&metrics=ga:pageviews'
'&filters=ga:pagePathLevel2%3D%3D/questions/&start-date=2013-01-01'
'&end-date=2013-01-02&start-index=1&max-results=10'),
u'itemsPerPage': 10,
u'kind': u'analytics#gaData',
u'nextLink': ('https://www.googleapis.com/analytics/v3/data/ga'
'?ids=ga:65912487&dimensions=ga:pagePath'
'&metrics=ga:pageviews'
'&filters=ga:pagePathLevel2%3D%3D/questions/'
'&start-date=2013-01-01&end-date=2013-01-02'
'&start-index=11&max-results=10'),
u'profileInfo': {
u'accountId': u'36116321',
u'internalWebPropertyId': u'64136921',
u'profileId': u'65912487',
u'profileName': u'support.mozilla.org - Production Only',
u'tableId': u'ga:65912487',
u'webPropertyId': u'UA-36116321-2'},
u'query': {
u'dimensions': u'ga:pagePath',
u'end-date': u'2013-01-02',
u'filters': u'ga:pagePathLevel2==/questions/',
u'ids': u'ga:65912487',
u'max-results': 10,
u'metrics': [u'ga:pageviews'],
u'start-date': u'2013-01-01',
u'start-index': 1},
u'rows': [
[u'/en-US/questions/1', u'2'], # Counts as a pageview.
[u'/es/questions/1', u'1'], # Counts as a pageview.
[u'/en-US/questions/1/edit', u'3'], # Doesn't count as a pageview
[u'/en-US/questions/stats', u'1'], # Doesn't count as a pageview
[u'/en-US/questions/2', u'1'], # Counts as a pageview.
[u'/en-US/questions/2?mobile=1', u'1'], # Counts as a pageview.
[u'/en-US/questions/2/foo', u'2'], # Doesn't count as a pageview
[u'/en-US/questions/bar', u'1'], # Doesn't count as a pageview
[u'/es/questions/3?mobile=0', u'10'], # Counts as a pageview.
[u'/es/questions/3?lang=en-US', u'1']], # Counts as a pageview.
u'selfLink': ('https://www.googleapis.com/analytics/v3/data/ga'
'?ids=ga:65912487&dimensions=ga:pagePath'
'&metrics=ga:pageviews'
'&filters=ga:pagePathLevel2%3D%3D/questions/'
'&start-date=2013-01-01&end-date=2013-01-02'
'&start-index=1&max-results=10'),
u'totalResults': 10,
u'totalsForAllResults': {u'ga:pageviews': u'242403'}}
SEARCH_CTR_RESPONSE = {
u'kind': u'analytics#gaData',
u'rows': [[u'74.88925980111263']], # <~ The number we are looking for.
u'containsSampledData': False,
u'profileInfo': {
u'webPropertyId': u'UA-36116321-2',
u'internalWebPropertyId': u'64136921',
u'tableId': u'ga:65912487',
u'profileId': u'65912487',
u'profileName': u'support.mozilla.org - Production Only',
u'accountId': u'36116321'},
u'itemsPerPage': 1000,
u'totalsForAllResults': {
u'ga:goal11ConversionRate': u'74.88925980111263'},
u'columnHeaders': [
{u'dataType': u'PERCENT',
u'columnType': u'METRIC',
u'name': u'ga:goal11ConversionRate'}],
u'query': {
u'max-results': 1000,
u'start-date': u'2013-06-06',
u'start-index': 1,
u'ids': u'ga:65912487',
u'metrics': [u'ga:goal11ConversionRate'],
u'end-date': u'2013-06-06'},
u'totalResults': 1,
u'id': ('https://www.googleapis.com/analytics/v3/data/ga?ids=ga:65912487'
'&metrics=ga:goal11ConversionRate&start-date=2013-06-06'
'&end-date=2013-06-06'),
u'selfLink': ('https://www.googleapis.com/analytics/v3/data/ga'
'?ids=ga:65912487&metrics=ga:goal11ConversionRate&'
'start-date=2013-06-06&end-date=2013-06-06'),
}
|
dbbhattacharya/kitsune
|
kitsune/sumo/tests/test_googleanalytics.py
|
Python
|
bsd-3-clause
| 15,153 | 0 |
# coding: utf-8
import sys
from setuptools import setup, find_packages
NAME = "pollster"
VERSION = "2.0.2"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil", "pandas >= 0.19.1"]
setup(
name=NAME,
version=VERSION,
description="Pollster API",
author_email="Adam Hooper <adam.hooper@huffingtonpost.com>",
url="https://github.com/huffpostdata/python-pollster",
keywords=["Pollster API"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""Download election-related polling data from Pollster."""
)
|
huffpostdata/python-pollster
|
setup.py
|
Python
|
bsd-2-clause
| 756 | 0.003968 |
#!/usr/bin/env python3
import argparse
import datetime
import getpass
import json
import logging
import logging.config
import os
import re
import sys
import tabulate
import uuid
from critsapi.critsapi import CRITsAPI
from critsapi.critsdbapi import CRITsDBAPI
from lib.pt.common.config import Config
from lib.pt.common.constants import PT_HOME
from lib.pt.core.database import Database
from lib.pt.ptapi import PTAPI
from lib.crits.vocabulary.indicators import IndicatorTypes as it
from operator import itemgetter
from configparser import ConfigParser
log = logging.getLogger()
VERSION = "0.1337"
# Check configuration directory
local_config_dir = os.path.join(PT_HOME, 'etc', 'local')
if not os.path.exists(local_config_dir):
os.makedirs(local_config_dir)
sys.exit('No etc/local/ directory. See README to create.')
config = Config()
# Check local data directory
if config.core.cache_enabled:
if not os.path.exists(config.core.cache_dir):
log.info('Creating Cache directory in '
'{}'.format(config.core.cache_dir))
os.makedirs(config.core.cache_dir)
# Initialize loggin
log_path = os.path.join(PT_HOME, 'etc', 'local', 'logging.ini')
try:
logging.config.fileConfig(log_path)
except Exception as e:
sys.exit('unable to load logging configuration file {}: '
'{}'.format(log_path, str(e)))
pt = PTAPI(username=config.core.pt_username, apikey=config.core.pt_apikey)
pt.set_proxy(http=config.proxy.http, https=config.proxy.https)
argparser = argparse.ArgumentParser()
argparser.add_argument('QUERY', action='store', help='A value to send as a'
' query to PT. Email, phone, name, etc.')
argparser.add_argument('--dev', dest='dev', action='store_true', default=False)
argparser.add_argument('--crits', dest='crits', action='store_true',
default=False, help='Write the results to CRITs with'
' appropriate relationships.')
argparser.add_argument('--test', dest='test', action='store_true',
default=False, help='Run with test data. (Save PT '
'queries)')
argparser.add_argument('-f', dest='force', action='store_true', default=False,
help='Force a new API query (do not used cached '
'results.')
argparser.add_argument('-t', action='append', dest='tags', default=[],
help='Bucket list tags for crits. Multiple -t options '
'are allowed.')
# Add our mutually exclusive items
meg = argparser.add_mutually_exclusive_group()
meg.add_argument('-n', dest='name', action='store_true', default=False,
help='The query is a name and pt_query will not try to '
'determine the type automatically.')
meg.add_argument('-a', dest='address', action='store_true', default=False,
help='The query is an address and pt_query will not '
'try to determine the type automatically.')
args = argparser.parse_args()
# Patterns for determining which type of lookup to do
# Some items cannot be differentiated via regex (name vs address), so we use
# a flag to specify these
# Load patterns for regexes
pattern_config = ConfigParser()
patterns = {}
with open(os.path.join(PT_HOME, 'etc', 'patterns.ini')) as fp:
pattern_config.readfp(fp)
email_address_pattern = re.compile(pattern_config.get('email', 'pattern'))
phone_pattern = re.compile(pattern_config.get('phone', 'pattern'))
domain_pattern = re.compile(pattern_config.get('domain', 'pattern'))
database = None
if config.core.cache_enabled:
database = Database()
if args.crits:
HOME = os.path.expanduser("~")
if not os.path.exists(os.path.join(HOME, '.crits_api')):
print('''Please create a file with the following contents:
[crits]
user = lolnate
[keys]
prod_api_key = keyhere
dev_api_key = keyhere
''')
raise SystemExit('~/.crits_api was not found or was not accessible.')
crits_config = ConfigParser()
crits_config.read(os.path.join(HOME, '.crits_api'))
if crits_config.has_option("keys", "prod"):
crits_api_prod = crits_config.get("keys", "prod")
if crits_config.has_option("keys", "dev"):
crits_api_dev = crits_config.get("keys", "dev")
if crits_config.has_option("crits", "user"):
crits_username = crits_config.get("crits", "user")
if args.dev:
crits_url = config.crits.crits_dev_api_url
crits_api_key = crits_api_dev
if len(crits_api_key) != 40:
print("Dev API key in ~/.crits_api is the wrong length! Must be 40\
characters.")
else:
crits_url = config.crits.crits_prod_api_url
crits_api_key = crits_api_prod
if len(crits_api_key) != 40:
print("Prod API key in ~/.crits_api is the wrong length! Must be 40\
characters.")
crits_proxy = {
'http': config.crits.crits_proxy_url,
'https': config.crits.crits_proxy_url,
}
# Build our mongo connection
if args.dev:
crits_mongo = CRITsDBAPI(mongo_uri=config.crits.mongo_uri_dev,
db_name=config.crits.database)
else:
crits_mongo = CRITsDBAPI(mongo_uri=config.crits.mongo_uri,
db_name=config.crits.database)
crits_mongo.connect()
# Connect to the CRITs API
crits = CRITsAPI(
api_url=crits_url,
api_key=crits_api_key,
username=crits_username,
proxies=crits_proxy,
verify=config.crits.crits_verify
)
query = args.QUERY.rstrip()
# Get the user launching all this
user = getpass.getuser()
# Used to store the type of indicator in CRITs for the query object.
crits_indicator_type = ''
# Used to store the cache file location
cache_file = None
if database and not args.force and config.core.cache_enabled:
cache_file = database.get_cache_file(query)
if cache_file:
log.info('Using cache file for query {}'.format(query))
with open(cache_file) as fp:
results = json.loads(fp.read())
bucket_list = ['whois', 'pt:query']
for t in args.tags:
bucket_list.append(t)
if args.name or args.address:
if args.name:
field_str = 'name'
if args.address:
field_str = 'address'
if args.test:
results = pt.get_test_results(field=field_str)
else:
results = pt.whois_search(query=query, field=field_str)
if database and not cache_file and config.core.cache_enabled:
filepath = os.path.join(config.core.cache_dir, str(uuid.uuid4()))
log.debug('Filepath is {}'.format(filepath))
database.add_results_to_cache(query, user, results, filepath)
base_reference = 'https://www.passivetotal.org/search/whois/'\
'{}'.format(field_str)
# Use our config defined indicator type of whois email objects
if args.name:
crits_indicator_type = it.WHOIS_NAME
if args.address:
crits_indicator_type = it.WHOIS_ADDR1
bucket_list.append('registrant')
elif re.match(email_address_pattern, query):
if args.test:
results = pt.get_test_results(field='email')
else:
results = pt.whois_search(query=query, field='email')
# Now add the results to the db if we have it
if database and not cache_file and config.core.cache_enabled:
filepath = os.path.join(config.core.cache_dir, str(uuid.uuid4()))
log.debug('Filepath is {}'.format(filepath))
database.add_results_to_cache(query, user, results, filepath)
base_reference = 'https://www.passivetotal.org/search/whois/email'
# Use our config defined indicator type of whois email objects
crits_indicator_type = it.WHOIS_REGISTRANT_EMAIL_ADDRESS
bucket_list.append('registrant')
elif re.match(phone_pattern, query):
if args.test:
results = pt.get_test_results(field='phone')
else:
results = pt.whois_search(query=query, field='phone')
# Now add the results to the db if we have it
if database and not cache_file and config.core.cache_enabled:
filepath = os.path.join(config.core.cache_dir, str(uuid.uuid4()))
log.debug('Filepath is {}'.format(filepath))
database.add_results_to_cache(query, user, results, filepath)
base_reference = 'https://www.passivetotal.org/search/whois/phone'
crits_indicator_type = it.WHOIS_TELEPHONE
bucket_list.append('registrant')
elif re.match(domain_pattern, query):
if args.test:
results = pt.get_test_results(field='domain')
else:
results = pt.whois_search(query=query, field='domain')
# Now add the results to the db if we have it
if database and not cache_file and config.core.cache_enabled:
filepath = os.path.join(config.core.cache_dir, str(uuid.uuid4()))
log.debug('Filepath is {}'.format(filepath))
database.add_results_to_cache(query, user, results, filepath)
base_reference = 'https://www.passivetotal.org/search/whois/domain'
crits_indicator_type = it.DOMAIN
else:
raise SystemExit("Your query didn't match a known pattern.")
# Add the query to CRITs regardless of the number of results
# TODO: Add campaigns
if args.crits:
found = False
# Search for it with raw mongo because API is slow
crits_result = crits_mongo.find('indicators', {'value': query, 'type':
crits_indicator_type})
if crits_result.count() > 0:
for r in crits_result:
if r['value'] == query:
indicator = r
found = True
if not found:
indicator = crits.add_indicator(
value=query,
itype=crits_indicator_type,
source=config.crits.default_source,
reference='Added via pt_query.py',
method='pt_query.py',
bucket_list=bucket_list,
indicator_confidence='low',
indicator_impact='low',
description='Queried with pt_query.py',
)
# This is pretty hacky - Since we use both the raw DB and the API, we might
# receive either an '_id' or an 'id' back. We are going to standardize on
# 'id', rather than '_id'
if 'id' not in indicator:
if '_id' not in indicator:
print(repr(indicator))
raise SystemExit('id and _id not found for query: '
'{} in new indicator'.format(query))
else:
indicator['id'] = indicator['_id']
# Iterate through all results and print/add to CRITs (if args provided)
formatted_results = []
for result in results['results']:
if 'domain' in result:
crits_indicators_to_add = []
# Row contains:
# Domain, Registrant Email, Registrant Name, Registrant Date,
# Expiration Date, Tags
row = ['', '', '', '', '', '']
row[0] = result['domain']
# Email address used to register
if 'registrant' in result:
# Append the registrant email
if 'email' in result['registrant']:
row[1] = result['registrant']['email']
email_obj = {
'value': result['registrant']['email'],
'type': it.WHOIS_REGISTRANT_EMAIL_ADDRESS,
'related_to': result['domain']
}
crits_indicators_to_add.append(email_obj)
if 'name' in result['registrant']:
row[2] = result['registrant']['name']
name_obj = {
'value': result['registrant']['name'],
'type': it.WHOIS_NAME,
'related_to': result['domain']
}
crits_indicators_to_add.append(name_obj)
if 'telephone' in result['registrant']:
row[3] = result['registrant']['telephone']
phone_obj = {
'value': result['registrant']['telephone'],
'type': it.WHOIS_TELEPHONE,
'related_to': result['domain']
}
crits_indicators_to_add.append(phone_obj)
if 'street' in result['registrant']:
addr1_obj = {
'value': result['registrant']['street'],
'type': it.WHOIS_ADDR1,
'related_to': result['domain']
}
crits_indicators_to_add.append(addr1_obj)
# Date the domain was registered
if 'registered' in result:
row[4] = result['registered']
if 'expiresAt' in result:
row[5] = result['expiresAt']
formatted_results.append(row)
# TODO: Tags. They appear to be an extra API query which is annoying
reference = '{0}/{1}'.format(base_reference, query)
if args.crits:
# Let's try getting the confidence and impact from the parent whois
# indicator
confidence = 'low'
impact = 'low'
if 'confidence' in indicator:
if 'rating' in indicator['confidence']:
confidence = indicator['confidence']['rating']
if 'impact' in indicator:
if 'rating' in indicator['impact']:
impact = indicator['impact']['rating']
# If not in CRITs, add all the associated indicators
bucket_list = ['whois pivoting', 'pt:found']
for t in args.tags:
bucket_list.append(t)
new_ind = crits.add_indicator(
value=result['domain'],
itype=it.DOMAIN,
source=config.crits.default_source,
reference=reference,
method='pt_query.py',
bucket_list=bucket_list,
indicator_confidence=confidence,
indicator_impact=impact,
description='Discovered through PT whois pivots'
)
# The CRITs API allows us to add a campaign to the indicator, but
# not multiple campaigns at one time,
# so we will do it directly with the DB.
# We want to replicate the campaigns of the WHOIS indicator (if
# a campaign exists) to the new indicator.
if 'campaign' in indicator:
for campaign in indicator['campaign']:
crits_mongo.add_embedded_campaign(
new_ind['id'],
'indicators',
campaign['name'],
campaign['confidence'],
campaign['analyst'],
datetime.datetime.now(),
campaign['description']
)
# If the new indicator and the indicator are not related,
# relate them.
if not crits.has_relationship(indicator['id'], 'Indicator',
new_ind['id'], 'Indicator',
rel_type='Registered'):
crits.forge_relationship(indicator['id'], 'Indicator',
new_ind['id'], 'Indicator',
rel_type='Registered')
# Now we can add the rest of the WHOIS indicators (if necessary)
for ind in crits_indicators_to_add:
# If the indicator exists, just get the id and use it to build
# relationships. We will look for one with the same source.
# If not in CRITs, add it and relate it.
whois_indicator = crits_mongo.find_one(
'indicators',
{
'value': ind['value'],
'type': ind['type'],
'source.name':
config.crits.default_source,
})
if not whois_indicator:
bucket_list = ['whois pivoting', 'pt:found']
for t in args.tags:
bucket_list.append(t)
whois_indicator = crits.add_indicator(
value=ind['value'],
itype=ind['type'],
source=config.crits.default_source,
reference=reference,
method='pt_query.py',
bucket_list=bucket_list,
indicator_confidence=confidence,
indicator_impact=impact,
description='Discovered through PT whois pivots'
)
# This is pretty hacky - Since we use both the raw DB and the
# API, we might receive either an '_id' or an 'id' back. We
# are going to standardize on 'id', rather than '_id'
if 'id' not in whois_indicator:
if '_id' not in whois_indicator:
print(repr(whois_indicator))
raise SystemExit('id and _id not found for query: '
'{} in whois indicator'.format(query))
whois_indicator['id'] = whois_indicator['_id']
# Not a huge deal, but make sure we don't waste time adding
# a relationship to itself
if whois_indicator['id'] == new_ind['id']:
continue
# The CRITs API allows us to add a campaign to the indicator,
# but not multiple campaigns at one time,
# so we will do it directly with the DB.
# We want to replicate the campaigns of the WHOIS indicator (if
# a campaign exists) to the new indicator.
# Continue with the same campaign
if 'campaign' in indicator:
for campaign in indicator['campaign']:
crits_mongo.add_embedded_campaign(
whois_indicator['id'],
'indicators',
campaign['name'],
campaign['confidence'],
campaign['analyst'],
datetime.datetime.now(),
campaign['description']
)
# If the new indicator and the indicator are not related,
# relate them.
if not crits.has_relationship(whois_indicator['id'],
'Indicator',
new_ind['id'],
'Indicator',
rel_type='Registered'):
crits.forge_relationship(whois_indicator['id'],
'Indicator',
new_ind['id'],
'Indicator',
rel_type='Registered')
# Add a bucket_list item to track that we searched for this whois indicator
if args.crits:
crits_mongo.add_bucket_list_item(indicator['id'], 'indicators',
'pt:whois_search_completed')
# SORT BY DATE
formatted_results = sorted(formatted_results, key=itemgetter(3), reverse=True)
# Row contains:
# Domain, Registrant Email, Registrant Name, Registrant Telephone,
# Registrant Date, Expiration Date, Tags
headers = ['Domain', 'Registrant Email', 'Registrant Name',
'Registrant Telephone', 'Registrant Date', 'Expiration Date',
'Tags']
print(tabulate.tabulate(formatted_results, headers))
|
IntegralDefense/ptauto
|
bin/pt_query.py
|
Python
|
apache-2.0
| 19,839 | 0.00005 |
from __future__ import absolute_import, print_function
from datetime import timedelta
from django.utils import timezone
from freezegun import freeze_time
from sentry.models import CheckInStatus, Monitor, MonitorCheckIn, MonitorStatus, MonitorType
from sentry.testutils import APITestCase
@freeze_time("2019-01-01")
class CreateMonitorCheckInTest(APITestCase):
def test_passing(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org, members=[user])
project = self.create_project(teams=[team])
monitor = Monitor.objects.create(
organization_id=org.id,
project_id=project.id,
next_checkin=timezone.now() - timedelta(minutes=1),
type=MonitorType.CRON_JOB,
config={"schedule": "* * * * *"},
)
self.login_as(user=user)
with self.feature({"organizations:monitors": True}):
resp = self.client.post(
"/api/0/monitors/{}/checkins/".format(monitor.guid), data={"status": "ok"}
)
assert resp.status_code == 201, resp.content
checkin = MonitorCheckIn.objects.get(guid=resp.data["id"])
assert checkin.status == CheckInStatus.OK
monitor = Monitor.objects.get(id=monitor.id)
assert monitor.status == MonitorStatus.OK
assert monitor.last_checkin == checkin.date_added
assert monitor.next_checkin == monitor.get_next_scheduled_checkin(checkin.date_added)
def test_failing(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org, members=[user])
project = self.create_project(teams=[team])
monitor = Monitor.objects.create(
organization_id=org.id,
project_id=project.id,
next_checkin=timezone.now() - timedelta(minutes=1),
type=MonitorType.CRON_JOB,
config={"schedule": "* * * * *"},
)
self.login_as(user=user)
with self.feature({"organizations:monitors": True}):
resp = self.client.post(
"/api/0/monitors/{}/checkins/".format(monitor.guid), data={"status": "error"}
)
assert resp.status_code == 201, resp.content
checkin = MonitorCheckIn.objects.get(guid=resp.data["id"])
assert checkin.status == CheckInStatus.ERROR
monitor = Monitor.objects.get(id=monitor.id)
assert monitor.status == MonitorStatus.ERROR
assert monitor.last_checkin == checkin.date_added
assert monitor.next_checkin == monitor.get_next_scheduled_checkin(checkin.date_added)
def test_disabled(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org, members=[user])
project = self.create_project(teams=[team])
monitor = Monitor.objects.create(
organization_id=org.id,
project_id=project.id,
next_checkin=timezone.now() - timedelta(minutes=1),
type=MonitorType.CRON_JOB,
status=MonitorStatus.DISABLED,
config={"schedule": "* * * * *"},
)
self.login_as(user=user)
with self.feature({"organizations:monitors": True}):
resp = self.client.post(
"/api/0/monitors/{}/checkins/".format(monitor.guid), data={"status": "error"}
)
assert resp.status_code == 201, resp.content
checkin = MonitorCheckIn.objects.get(guid=resp.data["id"])
assert checkin.status == CheckInStatus.ERROR
monitor = Monitor.objects.get(id=monitor.id)
assert monitor.status == MonitorStatus.DISABLED
assert monitor.last_checkin == checkin.date_added
assert monitor.next_checkin == monitor.get_next_scheduled_checkin(checkin.date_added)
def test_pending_deletion(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org, members=[user])
project = self.create_project(teams=[team])
monitor = Monitor.objects.create(
organization_id=org.id,
project_id=project.id,
next_checkin=timezone.now() - timedelta(minutes=1),
type=MonitorType.CRON_JOB,
status=MonitorStatus.PENDING_DELETION,
config={"schedule": "* * * * *"},
)
self.login_as(user=user)
with self.feature({"organizations:monitors": True}):
resp = self.client.post(
"/api/0/monitors/{}/checkins/".format(monitor.guid), data={"status": "error"}
)
assert resp.status_code == 404, resp.content
def test_deletion_in_progress(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org, members=[user])
project = self.create_project(teams=[team])
monitor = Monitor.objects.create(
organization_id=org.id,
project_id=project.id,
next_checkin=timezone.now() - timedelta(minutes=1),
type=MonitorType.CRON_JOB,
status=MonitorStatus.DELETION_IN_PROGRESS,
config={"schedule": "* * * * *"},
)
self.login_as(user=user)
with self.feature({"organizations:monitors": True}):
resp = self.client.post(
"/api/0/monitors/{}/checkins/".format(monitor.guid), data={"status": "error"}
)
assert resp.status_code == 404, resp.content
|
mvaled/sentry
|
tests/sentry/api/endpoints/test_monitor_checkins.py
|
Python
|
bsd-3-clause
| 5,664 | 0.001589 |
from dataclasses import asdict, dataclass
from typing import List, Optional
from license_grep.licenses import UnknownLicense, canonicalize_licenses
from license_grep.utils import unique_in_order
@dataclass
class PackageInfo:
name: str
version: str
type: str
raw_licenses: Optional[List[str]]
location: str
context: Optional[str]
@property
def licenses(self):
for license, canonicalized_license in canonicalize_licenses(self.raw_licenses):
yield canonicalized_license
@property
def licenses_string(self):
return ", ".join(
unique_in_order(str(license or "<UNKNOWN>") for license in self.licenses)
)
@property
def spec(self):
return f"{self.name}@{self.version}"
@property
def full_spec(self):
return f"{self.type}:{self.name}@{self.version}"
def as_json_dict(self):
return {
**asdict(self),
"licenses": list(
unique_in_order(
f"?{l}" if isinstance(l, UnknownLicense) else l
for l in self.licenses
)
),
"spec": self.spec,
}
|
akx/license-grep
|
license_grep/models.py
|
Python
|
mit
| 1,190 | 0.002521 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top level script for running all python unittests in the NaCl SDK.
"""
from __future__ import print_function
import argparse
import os
import subprocess
import sys
import unittest
# add tools folder to sys.path
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
TOOLS_DIR = os.path.join(SCRIPT_DIR, 'tools')
BUILD_TOOLS_DIR = os.path.join(SCRIPT_DIR, 'build_tools')
sys.path.append(TOOLS_DIR)
sys.path.append(os.path.join(TOOLS_DIR, 'tests'))
sys.path.append(os.path.join(TOOLS_DIR, 'lib', 'tests'))
sys.path.append(BUILD_TOOLS_DIR)
sys.path.append(os.path.join(BUILD_TOOLS_DIR, 'tests'))
import build_paths
PKG_VER_DIR = os.path.join(build_paths.NACL_DIR, 'build', 'package_version')
TAR_DIR = os.path.join(build_paths.NACL_DIR, 'toolchain', '.tars')
PKG_VER = os.path.join(PKG_VER_DIR, 'package_version.py')
EXTRACT_PACKAGES = ['nacl_x86_glibc']
TOOLCHAIN_OUT = os.path.join(build_paths.OUT_DIR, 'sdk_tests', 'toolchain')
# List of modules containing unittests. The goal is to keep the total
# runtime of these tests under 2 seconds. Any slower tests should go
# in TEST_MODULES_BIG.
TEST_MODULES = [
'build_artifacts_test',
'build_version_test',
'create_html_test',
'create_nmf_test',
'easy_template_test',
'elf_test',
'fix_deps_test',
'getos_test',
'get_shared_deps_test',
'httpd_test',
'nacl_config_test',
'oshelpers_test',
'parse_dsc_test',
'quote_test',
'sdktools_config_test',
'sel_ldr_test',
'update_nacl_manifest_test',
'verify_filelist_test',
'verify_ppapi_test',
]
# Slower tests. For example the 'sdktools' are mostly slower system tests
# that longer to run. If --quick is passed then we don't run these.
TEST_MODULES_BIG = [
'sdktools_commands_test',
'sdktools_test',
]
def ExtractToolchains():
cmd = [sys.executable, PKG_VER,
'--packages', ','.join(EXTRACT_PACKAGES),
'--tar-dir', TAR_DIR,
'--dest-dir', TOOLCHAIN_OUT,
'extract']
subprocess.check_call(cmd)
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--quick', action='store_true')
options = parser.parse_args(args)
# Some of the unit tests use parts of toolchains. Extract to TOOLCHAIN_OUT.
print('Extracting toolchains...')
ExtractToolchains()
suite = unittest.TestSuite()
modules = TEST_MODULES
if not options.quick:
modules += TEST_MODULES_BIG
for module_name in modules:
module = __import__(module_name)
suite.addTests(unittest.defaultTestLoader.loadTestsFromModule(module))
if options.verbose:
verbosity = 2
else:
verbosity = 1
print('Running unittests...')
result = unittest.TextTestRunner(verbosity=verbosity).run(suite)
return int(not result.wasSuccessful())
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
M4sse/chromium.src
|
native_client_sdk/src/test_all.py
|
Python
|
bsd-3-clause
| 3,095 | 0.006462 |
import unittest
from ncclient.devices.alu import *
from ncclient.xml_ import *
import re
xml = """<rpc-reply xmlns:junos="http://xml.alu.net/alu/12.1x46/alu">
<routing-engin>
<name>reX</name>
<commit-success/>
<!-- This is a comment -->
</routing-engin>
<ok/>
</rpc-reply>"""
class TestAluDevice(unittest.TestCase):
def setUp(self):
self.obj = AluDeviceHandler({'name': 'alu'})
def test_remove_namespaces(self):
xmlObj = to_ele(xml)
expected = re.sub(r'<rpc-reply xmlns:junos="http://xml.alu.net/alu/12.1x46/alu">',
r'<?xml version="1.0" encoding="UTF-8"?><rpc-reply>', xml)
self.assertEqual(expected, to_xml(remove_namespaces(xmlObj)))
def test_get_capabilities(self):
expected = ["urn:ietf:params:netconf:base:1.0", ]
self.assertListEqual(expected, self.obj.get_capabilities())
def test_get_xml_base_namespace_dict(self):
expected = {None: BASE_NS_1_0}
self.assertDictEqual(expected, self.obj.get_xml_base_namespace_dict())
def test_get_xml_extra_prefix_kwargs(self):
expected = dict()
expected["nsmap"] = self.obj.get_xml_base_namespace_dict()
self.assertDictEqual(expected, self.obj.get_xml_extra_prefix_kwargs())
def test_add_additional_operations(self):
expected=dict()
expected["get_configuration"] = GetConfiguration
expected["show_cli"] = ShowCLI
expected["load_configuration"] = LoadConfiguration
self.assertDictEqual(expected, self.obj.add_additional_operations())
def test_transform_reply(self):
expected = re.sub(r'<rpc-reply xmlns:junos="http://xml.alu.net/alu/12.1x46/alu">',
r'<?xml version="1.0" encoding="UTF-8"?><rpc-reply>', xml)
actual = self.obj.transform_reply()
xmlObj = to_ele(xml)
self.assertEqual(expected, to_xml(actual(xmlObj)))
|
ncclient/ncclient
|
test/unit/devices/test_alu.py
|
Python
|
apache-2.0
| 1,917 | 0.003652 |
"""Commands related to networks are in this module"""
import click
import sys
from hil.cli.client_setup import client
@click.group()
def network():
"""Commands related to network"""
@network.command(name='create', short_help='Create a new network')
@click.argument('network')
@click.argument('owner')
@click.option('--access', help='Projects that can access this network. '
'Defaults to the owner of the network')
@click.option('--net-id',
help='Network ID for network. Only admins can specify this.')
def network_create(network, owner, access, net_id):
"""Create a link-layer <network>. See docs/networks.md for details"""
if net_id is None:
net_id = ''
if access is None:
access = owner
client.network.create(network, owner, access, net_id)
@network.command(name='delete')
@click.argument('network')
def network_delete(network):
"""Delete a network"""
client.network.delete(network)
@network.command(name='show')
@click.argument('network')
def network_show(network):
"""Display information about network"""
q = client.network.show(network)
for item in q.items():
sys.stdout.write("%s\t : %s\n" % (item[0], item[1]))
@network.command(name='list')
def network_list():
"""List all networks"""
q = client.network.list()
for item in q.items():
sys.stdout.write('%s \t : %s\n' % (item[0], item[1]))
@network.command('list-attachments')
@click.argument('network')
@click.option('--project', help='Name of project.')
def list_network_attachments(network, project):
"""Lists all the attachments from <project> for <network>
If <project> is `None`, lists all attachments for <network>
"""
print client.network.list_network_attachments(network, project)
@network.command(name='grant-access')
@click.argument('network')
@click.argument('project')
def network_grant_project_access(project, network):
"""Add <project> to <network> access"""
client.network.grant_access(project, network)
@network.command(name='revoke-access')
@click.argument('network')
@click.argument('project')
def network_revoke_project_access(project, network):
"""Remove <project> from <network> access"""
client.network.revoke_access(project, network)
|
SahilTikale/haas
|
hil/cli/network.py
|
Python
|
apache-2.0
| 2,277 | 0 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .lecroyWRXIA import *
class lecroyWR64XIA(lecroyWRXIA):
"Lecroy WaveRunner 64Xi-A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'WaveRunner 64Xi-A')
super(lecroy104MXiA, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 600e6
self._init_channels()
|
elopezga/ErrorRate
|
ivi/lecroy/lecroyWR64XIA.py
|
Python
|
mit
| 1,644 | 0.001825 |
# -*- coding: utf-8 -*-
#
# test_pp_psc_delta_stdp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
#
# Moritz Deger, moritz.deger@epfl.ch, Aug 14, 2015
#
#
# Python script to reproduce failure of pp_psc_delta to show spike timing
# dependent plasticity (STDP), as opposed to iaf_psc_delta.
# The problem is probably related to the setting of 'archiver_length'
# (printed at the end of the script)
import nest
import nest.raster_plot
import numpy as np
import pylab
Dt = 1.
nsteps = 100
w_0 = 100.
nest.ResetKernel()
nrn_pre = nest.Create('parrot_neuron')
nrn_post1 = nest.Create('iaf_psc_delta')
nrn_post2 = nest.Create('pp_psc_delta')
nest.Connect(nrn_pre, nrn_post1 + nrn_post2,
syn_spec={'model': 'stdp_synapse', 'weight': w_0})
conn1 = nest.GetConnections(nrn_pre, nrn_post1)
conn2 = nest.GetConnections(nrn_pre, nrn_post2)
sg_pre = nest.Create('spike_generator')
nest.SetStatus(sg_pre, {'spike_times': np.arange(Dt, nsteps * Dt, 10. * Dt)})
nest.Connect(sg_pre, nrn_pre)
mm = nest.Create('multimeter')
nest.SetStatus(mm, {'record_from': ['V_m']})
nest.Connect(mm, nrn_post1 + nrn_post2)
sd = nest.Create('spike_detector')
nest.Connect(nrn_pre + nrn_post1 + nrn_post2, sd)
t = []
w1 = []
w2 = []
t.append(0.)
w1.append(nest.GetStatus(conn1, keys=['weight'])[0][0])
w2.append(nest.GetStatus(conn2, keys=['weight'])[0][0])
for i in xrange(nsteps):
nest.Simulate(Dt)
t.append(i * Dt)
w1.append(nest.GetStatus(conn1, keys=['weight'])[0][0])
w2.append(nest.GetStatus(conn2, keys=['weight'])[0][0])
pylab.figure(1)
pylab.plot(t, w1, 'g', label='iaf_psc_delta, ' + str(nrn_post1[0]))
pylab.plot(t, w2, 'r', label='pp_psc_delta, ' + str(nrn_post2[0]))
pylab.xlabel('time [ms]')
pylab.ylabel('weight [mV]')
pylab.legend(loc='best')
ylims = pylab.ylim()
pylab.ylim(ylims[0] - 5, ylims[1] + 5)
# pylab.savefig('test_pp_psc_delta_stdp_fig1.png')
nest.raster_plot.from_device(sd)
ylims = pylab.ylim()
pylab.ylim(ylims[0] - .5, ylims[1] + .5)
pylab.show()
# pylab.savefig('test_pp_psc_delta_stdp_fig2.png')
print 'Archiver lengths shall be equal:'
for nrn in [nrn_post1, nrn_post2]:
print nest.GetStatus(nrn, keys=['model', 'archiver_length'])[0]
|
HBPNeurorobotics/nest-simulator
|
testsuite/manualtests/test_pp_psc_delta_stdp.py
|
Python
|
gpl-2.0
| 2,827 | 0 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
r"""Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Library with the implementantion of a generic cubic equation of state with the
form
.. math::
P = \frac{RT}{V-b}-\frac{\alpha(T)}{V^2+\delta V+\epsilon}
Expressing as a cubic polynomy in compressibility factor easy to solve
.. math::
Z^3 + \left(\delta'-B'-1\right)Z^2 +
\left(a'+\epsilon'-\delta'\left(b'+1\right)\right)Z -
\left(\epsilon'\left(b'+1\right)+a'b'\right) = 0
using the adimensional parameters
.. math::
\begin{array}[t]{l}
a' = \frac{aP}{RT}\\
b' = \frac{bP}{RT}\\
\delta' = \frac{\delta P}{RT}\\
\epsilon' = \frac{\epsilon P}{RT}\\
\end{array}
Each cubic EoS implemented here would a specific form of this general
expression changing the values of δ, ε and the expresion of α(T)
Each equation is specially suitable for different compounds, for example, the
Schmidt-Wenzel (SW) equation (1980) and the Adachi-Lu-Sugie (ALS) equation
(1983) are good for methane to n-decane. The Yu-Lu (YL) equation (1987) was
designed for asymmetric nonpolar mixtures, but not for polar substances. The
Iwai-Margerum-Lu (IML) equation ( 1987) was developed for polar substances, but
not suitable for nonpolar substances with large molecular weight.
"""
from math import log, exp
from scipy.constants import R
from PyQt5.QtWidgets import QApplication
from lib import unidades
from lib.eos import EoS
from lib.physics import R_atml, cubicCardano
from lib.bip import Kij, Mixing_Rule
from lib.utilities import refDoc
# TODO: Añadir parámetros, archivo /media/datos/Biblioteca/archivos/alfas.pdf
# self.Mathias = 0
# self.Adachi = [0, 0]
# self.Andoulakis = [0, 0, 0]
__doi__ = {
1:
{"autor": "Poling, B.E, Prausnitz, J.M, O'Connell, J.P",
"title": "The Properties of Gases and Liquids 5th Edition",
"ref": "McGraw-Hill, New York, 2001",
"doi": ""},
2:
{"autor": "Ahmed, T.",
"title": "Equations of State and PVT Analysis: Applications for"
"Improved Reservoir Modeling, 2nd Edition",
"ref": "Gulf Professional Publishing, 2016, ISBN 9780128015704,",
"doi": "10.1016/B978-0-12-801570-4.00002-7"},
3:
{"autor": "Bell, I.H., Jäger, A.",
"title": "Helmholtz Energy Transformations of Common Cubic Equations "
"of State for Use with Pure Fluids and Mixtures",
"ref": "J. Res. of NIST 121 (2016) 236-263",
"doi": "10.6028/jres.121.011"},
4:
{"autor": "",
"title": "",
"ref": "",
"doi": ""},
}
alfa = (QApplication.translate("pychemqt", "Original"),
"Boston-Mathias",
"Twu",
"Doridon")
@refDoc(__doi__, [3])
def CubicHelmholtz(tau, delta, **kw):
r"""Residual contribution to the free Helmholtz energy from a generic cubic
equation of state with the form:
.. math::
P = \frac{RT}{V-b}-\frac{\alpha(T)}{\left(v+\Delta_1b\right)
\left(v+\Delta_2b\right)}
From this formulation it's possible calculate the Helmholtz free energy
with the equation:
.. math::
\alpha^r = \phi^{(-)}-\frac{\tau\alpha}{RT_c}\phi^{(+)}
Parameters
----------
tau : float
Inverse reduced temperature, Tc/T [-]
delta : float
Reduced density, rho/rhoc [-]
kw : list
Aditional parameters specific of cubic equation of state
The parameters include: rhoc, Tc, b, alfa, Delta1, Delta2
Returns
-------
prop : dictionary with residual adimensional helmholtz energy and deriv
fir [-]
firt: [∂fir/∂τ]δ,x [-]
fird: [∂fir/∂δ]τ,x [-]
firtt: [∂²fir/∂τ²]δ,x [-]
firdt: [∂²fir/∂τ∂δ]x [-]
firdd: [∂²fir/∂δ²]τ,x [-]
"""
b = kw["b"]
a = kw["a"]
dat = kw["dat"]
datt = kw["datt"]
dattt = kw["dattt"]
Delta1 = kw["Delta1"]
Delta2 = kw["Delta2"]
R = kw["R"]
# This parameters are necessary only for multicomponent mixtures to
# calculate fugacity coefficient
bi = kw.get("bi", None)
daxi = kw.get("daxi", None)
rhoc = kw.get("rhoc", 1)
Tc = kw.get("Tc", 1)
phi1 = -log(1-b*delta*rhoc)
if Delta1 == Delta2:
# Special case using the l'Hôpital's rule
phi2 = rhoc*delta
else:
phi2 = log((Delta1*b*rhoc*delta+1)/(Delta2*b*rhoc*delta+1)) / \
b/(Delta1-Delta2)
phi1d = b*rhoc/(1-b*delta*rhoc)
phi1dd = b**2*rhoc**2/(1-b*delta*rhoc)**2
phi1ddd = 2*b**3*rhoc**3/(1-b*delta*rhoc)**3
PI12 = (1+Delta1*b*rhoc*delta) * (1+Delta2*b*rhoc*delta)
PI12d = b*rhoc * (2*Delta1*Delta2*b*delta*rhoc + Delta1 + Delta2)
PI12dd = 2*Delta1*Delta2*b**2*rhoc**2
phi2d = rhoc/PI12
phi2dd = -rhoc*PI12d/PI12**2
phi2ddd = rhoc*(-PI12*PI12dd+2*PI12d**2)/PI12**3
fir = phi1 - tau*a/R/Tc*phi2
fird = phi1d - tau*a/R/Tc*phi2d
firdd = phi1dd - tau*a/R/Tc*phi2dd
firddd = phi1ddd - tau*a/R/Tc*phi2ddd
# Eq 32
dtat = tau*dat + a
dtatt = tau*datt + 2*dat
dtattt = tau*dattt + 3*datt
firt = -dtat/R/Tc * phi2
firtt = -dtatt/R/Tc * phi2
firttt = -dtattt/R/Tc * phi2
firdt = -dtat/R/Tc * phi2d
firddt = -dtat/R/Tc * phi2dd
firdtt = -dtatt/R/Tc * phi2d
prop = {}
prop["fir"] = fir
prop["fird"] = fird
prop["firt"] = firt
prop["firdd"] = firdd
prop["firdt"] = firdt
prop["firtt"] = firtt
prop["firddd"] = firddd
prop["firddt"] = firddt
prop["firdtt"] = firdtt
prop["firttt"] = firttt
prop["B"] = 0
prop["C"] = 0
prop["D"] = 0
if bi:
# Composition derivatives for fugacity coefficient calculation
c = 1/b
dbxi = bi # Eq 132
A = log((delta*rhoc*b*Delta1+1)/(delta*rhoc*b*Delta2+1)) # Eq 103
dAxi = [delta*rhoc*db*(Delta1-Delta2)/PI12 for db in dbxi] # Eq 104
dcxi = [-db/b**2 for db in dbxi] # Eq 107
phi1xi = [delta*rhoc*db/(1-delta*rhoc*b) for db in dbxi] # Eq 80
# Eq 111
phi2xi = [(A*dc + c*dA)/(Delta1-Delta2) for dc, dA in zip(dcxi, dAxi)]
dtaxi = [tau*da for da in daxi]
# Eq 77
phirxi = []
for dt, p1x, p2x in zip(dtaxi, phi1xi, phi2xi):
phirxi.append(p1x - 1/R/Tc*(dt*phi2 + tau*a*p2x))
prop["firxi"] = phirxi
return prop
@refDoc(__doi__, [1, 2])
class Cubic(EoS):
r"""Class to implement the common functionality of cubic equation of state
This class implement a general cubic equation of state in the form:
.. math::
P = \frac{RT}{V-b}-\frac{\alpha(T)}{V^2+\delta V+\epsilon}
.. math::
P = \frac{RT}{V-b}-\frac{\alpha(T)}{\left(V+\delta_1b\right)
\left(V+\delta_2b\right)}
.. math::
\delta_1 = -\frac{\sqrt{\delta^2-4\epsilon}-\delta}{2b}
.. math::
\delta_2 = -\frac{\sqrt{\delta^2-4\epsilon}+\delta}{2b}
"""
def __init__(self, T, P, mezcla, **kwargs):
EoS.__init__(self, T, P, mezcla, **kwargs)
if "R" in kwargs:
self.R = kwargs["R"]
else:
self.R = R
self._cubicDefinition(T)
if self.mezcla.Tc < T:
self.x = 1
self.xi = self.zi
self.yi = self.zi
self.Zg = self._Z(self.zi, T, P)[-1]
self.Zl = None
else:
self.x, self.Zl, self.Zg, self.xi, self.yi, self.Ki = self._Flash()
# print("q = ", self.x)
# print("x = ", self.xi)
# print("y = ", self.yi)
# print("K = ", self.Ki)
if self.Zl:
self.Vl = unidades.MolarVolume(self.Zl*self.R*T/P, "m3mol")
rhoL = self.P/self.Zl/self.R/self.T
self.rhoL = unidades.MolarDensity(rhoL, "molm3")
else:
self.Vl = None
self.rhoL = None
if self.Zg:
self.Vg = unidades.MolarVolume(self.Zg*self.R*T/P, "m3mol")
rhoG = self.P/self.Zg/self.R/self.T
self.rhoG = unidades.MolarDensity(rhoG, "molm3")
else:
self.Vg = None
self.rhoG = None
self._volumeCorrection()
# tau = mezcla.Tc/T
# delta = self.V[-1]*mezcla.Vc
# kw = {}
# print(CubicHelmholtz(tau, delta, **kw))
# dep_v = self._departure(self.tita, self.b, self.delta, self.epsilon, self.dTitadT, self.V[-1], T)
# dep_l = self._departure(self.tita, self.b, self.delta, self.epsilon, self.dTitadT, self.V[0], T)
# rho = self.rhoG.molm3
# from pprint import pprint
# pprint(self._phir(self.T, rho, self.yi))
def _volumeCorrection(self):
"""Apply volume correction to the rhoL property"""
pass
def _cubicDefinition(self, T):
"""Definition of individual component parameters of generalized cubic
equation of state, its calculation don't depend composition"""
# TODO: Split fixed paremeters calculation from temperature dependences
# to speed up
pass
def _GEOS(self, xi):
"""Definition of parameters of generalized cubic equation of state,
each child class must define in this procedure the values of mixture
a, b, delta, epsilon. The returned values are not dimensionless.
Parameters
----------
xi : list
Molar fraction of component in mixture, [-]
Returns
-------
parameters : list
Mixture parameters of equation, a, b, c, d
"""
pass
def _Z(self, xi, T, P):
"""Calculate root of cubic polynomial in terms of GCEoS as give in
[1]_.
Parameters
----------
xi : list
Molar fraction of component in mixture, [-]
T : float
Temperature, [K]
P : float
Pressure, [Pa]
Returns
-------
Z : list
List with real root of equation
"""
self._cubicDefinition(T)
tita, b, delta, epsilon = self._GEOS(xi)
B = b*P/self.R/T
A = tita*P/(self.R*T)**2
D = delta*P/self.R/T
E = epsilon*(P/self.R/T)**2
# Eq 4-6.3 in [1]_
# η by default set to b to reduce terms, if any equations need that
# term redefine this procedure
coeff = (1, D-B-1, A+E-D*(B+1), -E*(B+1)-A*B)
Z = cubicCardano(*coeff)
# Sort Z values, if typeerror is raise return is because there is
# complex root, so return only the real root
try:
Z = sorted(map(float, Z))
except TypeError:
Z = Z[0:1]
return Z
def _fug(self, xi, yi, T, P):
"""Fugacities of component in mixture calculation
Parameters
----------
xi : list
Molar fraction of component in liquid phase, [-]
yi : list
Molar fraction of component in vapor phase, [-]
T : float
Temperature, [K]
P : float
Pressure, [Pa]
Returns
-------
tital : list
List with liquid phase component fugacities
titav : list
List with vapour phase component fugacities
"""
self._cubicDefinition(T)
Bi = [bi*P/self.R/T for bi in self.bi]
Ai = [ai*P/(self.R*T)**2 for ai in self.ai]
al, bl, deltal, epsilonl = self._GEOS(xi)
Bl = bl*P/self.R/T
Al = al*P/(self.R*T)**2
Zl = self._Z(xi, T, P)[0]
tital = self._fugacity(Zl, xi, Al, Bl, Ai, Bi)
Zv = self._Z(yi, T, P)[-1]
av, bv, deltav, epsilonv = self._GEOS(yi)
Bv = bv*P/self.R/T
Av = av*P/(self.R*T)**2
titav = self._fugacity(Zv, yi, Av, Bv, Ai, Bi)
return tital, titav
def _fugacity(self, Z, zi, A, B, Ai, Bi):
"""Fugacity for individual components in a mixture using the GEoS in
the Schmidt-Wenzel formulation, so the subclass must define the
parameters u and w in the EoS
Any other subclass with different formulation must overwrite this
method
"""
# Precalculation of inner sum in equation
aij = []
for ai, kiji in zip(Ai, self.kij):
suma = 0
for xj, aj, kij in zip(zi, Ai, kiji):
suma += xj*(1-kij)*(ai*aj)**0.5
aij.append(suma)
tita = []
for bi, aai in zip(Bi, aij):
rhs = bi/B*(Z-1) - log(Z-B) + A/B/(self.u-self.w)*(
bi/B-2/A*aai) * log((Z+self.u*B)/(Z+self.w*B))
tita.append(exp(rhs))
return tita
def _mixture(self, eq, xi, par):
"""Apply mixing rules to individual parameters to get the mixture
parameters for EoS
Although it possible use any of available mixing rules, for now other
properties calculation as fugacity helmholtz free energy are defined
using the vdW mixing rules.
Parameters
----------
eq : str
codename of equation, PR, SRK...
xi : list
Molar fraction of component, [-]
par : list
list with individual parameters of equation, [-]
Returns
-------
mixpar : list
List with mixture parameters, [-]
"""
self.kij = Kij(self.mezcla.ids, eq)
mixpar = Mixing_Rule(xi, par, self.kij)
return mixpar
def _Tr(self):
"""Definition of reducing parameters"""
if len(self.mezcla.componente) > 1:
# Mixture as one-fluid
Tr = 1
rhor = 1
else:
# Pure fluid
Tr = self.mezcla.Tc
rhor = 1/self.mezcla.Vc/1000 # m3/mol
return Tr, rhor
def _phir(self, T, rho, xi):
Tr, rhor = self._Tr()
tau = Tr/T
delta = rho/rhor
a, b, d, e = self._GEOS(xi)
kw = self._da(tau, xi)
Tr, rhor = self._Tr()
kw["rhoc"] = rhor
kw["Tc"] = Tr
kw["Delta1"] = self.u
kw["Delta2"] = self.w
kw["bi"] = self.bi
kw["b"] = b
kw["a"] = a
kw["R"] = self.R
fir = CubicHelmholtz(tau, delta, **kw)
# print(self._excess(tau, delta, fir))
# print("fir: ", fir["fir"])
# print("fird: ", fir["fird"]*delta)
# print("firt: ", fir["firt"]*tau)
# print("firdd: ", fir["firdd"]*delta**2)
# print("firdt: ", fir["firdt"]*delta*tau)
# print("firtt: ", fir["firtt"]*tau**2)
# print("firddd: ", fir["firddd"]*delta**3)
# print("firddt: ", fir["firddt"]*delta**2*tau)
# print("firdtt: ", fir["firdtt"]*delta*tau**2)
# print("firttt: ", fir["firttt"]*tau**3)
# T = Tr/tau
# rho = rhor*delta
# print("P", (1+delta*fir["fird"])*R*T*rho)
# print(delta, fir["fird"], R, T, rho)
return fir
def _excess(self, tau, delta, phir):
fir = phir["fir"]
fird = phir["fird"]
firt = phir["firt"]
firtt = phir["firtt"]
p = {}
p["Z"] = 1 + delta*fird
p["H"] = tau*firt + delta*fird
p["S"] = tau*firt - fir
p["cv"] = -tau**2*firtt
return p
def _departure(self, a, b, d, e, TdadT, V, T):
"""Calculate departure function, Table 6-3 from [1]"""
Z = 1 + b/(V-b) - a*V/R_atml/T/(V**2+d*V+e)
# Numerador and denominator used in several expression
K = (d**2-4*e)**0.5
num = 2*V + d - K
den = 2*V + d + K
kw = {}
kw["Z"] = Z
if K:
kw["H"] = 1 - (a+TdadT)/R_atml/T/K*log(num/den) - Z
kw["S"] = TdadT/R_atml/K*log(num/den) - log(Z*(1-b/V))
kw["A"] = -a/R_atml/T/K*log(num/den) + log(Z*(1-b/V))
kw["f"] = a/R_atml/T/K*log(num/den) - log(Z*(1-b/V)) - (1-Z)
else:
kw["H"] = 1 - Z
kw["S"] = -log(Z*(1-b/V))
kw["A"] = log(Z*(1-b/V))
kw["f"] = -log(Z*(1-b/V)) - (1-Z)
return kw
# def _fug2(self, Z, xi):
# """Calculate partial fugacities coefficieint of components
# References
# ----------
# mollerup, Chap 2, pag 64 and so
# """
# V = Z*R_atml*self.T/self.P
# g = log(V-self.b) - log(V) # Eq 61
# f = 1/R_atml/self.b/(self.delta1-self.delta2) * \
# log((V+self.delta1*self.b)/(V+self.delta2*self.b)) # Eq 62
# gB = -1/(V-self.b) # Eq 80
# An = -g # Eq 75
# AB = -n*gB-D/self.T*fB # Eq 78
# AD = -f/self.T # Eq 79
# # Ch.3, Eq 66
# dAni = An+AB*Bi+AD*Di
# # Ch.2, Eq 13
# fi = dAni - log(Z)
# return fi
# def _fug(self, Z, xi):
# Ai=[]
# for i in range(len(self.componente)):
# suma=0
# for j in range(len(self.componente)):
# suma+=self.zi[j]*self.ai[j]**0.5*(1-self.kij[i][j])
# Ai.append(1/self.tita*2*self.ai[i]**0.5*suma)
# tita=[]
# for i in range(len(self.componente)):
# tita.append(exp(self.bi[i]/self.b*(Z-1)-log(Z-self.B)-self.Tita/self.B/sqrt(self.u**2-4*self.w)*(Ai[i]-self.bi[i]/self.b)*log((Z+self.B/2*(self.u+sqrt(self.u**2-4*self.w)))/(Z+self.B/2*(self.u-sqrt(self.u**2-4*self.w))))).real)
# print("fug:", tita)
# return tita
|
jjgomera/pychemqt
|
lib/EoS/cubic.py
|
Python
|
gpl-3.0
| 18,485 | 0.000759 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0002_taxoutput_tax_result'),
]
operations = [
migrations.AddField(
model_name='taxsaveinputs',
name='parameters',
field=models.TextField(default=None),
preserve_default=True,
),
]
|
talumbau/webapp-public
|
webapp/apps/taxbrain/migrations/0003_taxsaveinputs_parameters.py
|
Python
|
mit
| 448 | 0 |
from __future__ import absolute_import
from rest_framework.response import Response
from sentry import filters
from sentry.api.bases.project import ProjectEndpoint
class ProjectFiltersEndpoint(ProjectEndpoint):
def get(self, request, project):
"""
List a project's filters
Retrieve a list of filters for a given project.
{method} {path}
"""
results = []
for f_cls in filters.all():
filter = f_cls(project)
results.append({
'id': filter.id,
# 'active' will be either a boolean or list for the legacy browser filters
# all other filters will be boolean
'active': filter.is_enabled(),
'description': filter.description,
'name': filter.name,
})
results.sort(key=lambda x: x['name'])
return Response(results)
|
JackDanger/sentry
|
src/sentry/api/endpoints/project_filters.py
|
Python
|
bsd-3-clause
| 923 | 0.001083 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .update_resource import UpdateResource
class VirtualMachineUpdate(UpdateResource):
"""Describes a Virtual Machine.
Variables are only populated by the server, and will be ignored when
sending a request.
:param tags: Resource tags
:type tags: dict[str, str]
:param plan: Specifies information about the marketplace image used to
create the virtual machine. This element is only used for marketplace
images. Before you can use a marketplace image from an API, you must
enable the image for programmatic use. In the Azure portal, find the
marketplace image that you want to use and then click **Want to deploy
programmatically, Get Started ->**. Enter any required information and
then click **Save**.
:type plan: ~azure.mgmt.compute.v2017_12_01.models.Plan
:param hardware_profile: Specifies the hardware settings for the virtual
machine.
:type hardware_profile:
~azure.mgmt.compute.v2017_12_01.models.HardwareProfile
:param storage_profile: Specifies the storage settings for the virtual
machine disks.
:type storage_profile:
~azure.mgmt.compute.v2017_12_01.models.StorageProfile
:param os_profile: Specifies the operating system settings for the virtual
machine.
:type os_profile: ~azure.mgmt.compute.v2017_12_01.models.OSProfile
:param network_profile: Specifies the network interfaces of the virtual
machine.
:type network_profile:
~azure.mgmt.compute.v2017_12_01.models.NetworkProfile
:param diagnostics_profile: Specifies the boot diagnostic settings state.
<br><br>Minimum api-version: 2015-06-15.
:type diagnostics_profile:
~azure.mgmt.compute.v2017_12_01.models.DiagnosticsProfile
:param availability_set: Specifies information about the availability set
that the virtual machine should be assigned to. Virtual machines specified
in the same availability set are allocated to different nodes to maximize
availability. For more information about availability sets, see [Manage
the availability of virtual
machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
<br><br> For more information on Azure planned maintainance, see [Planned
maintenance for virtual machines in
Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
<br><br> Currently, a VM can only be added to availability set at creation
time. An existing VM cannot be added to an availability set.
:type availability_set: ~azure.mgmt.compute.v2017_12_01.models.SubResource
:ivar provisioning_state: The provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:ivar instance_view: The virtual machine instance view.
:vartype instance_view:
~azure.mgmt.compute.v2017_12_01.models.VirtualMachineInstanceView
:param license_type: Specifies that the image or disk that is being used
was licensed on-premises. This element is only used for images that
contain the Windows Server operating system. <br><br> Possible values are:
<br><br> Windows_Client <br><br> Windows_Server <br><br> If this element
is included in a request for an update, the value must match the initial
value. This value cannot be updated. <br><br> For more information, see
[Azure Hybrid Use Benefit for Windows
Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
<br><br> Minimum api-version: 2015-06-15
:type license_type: str
:ivar vm_id: Specifies the VM unique ID which is a 128-bits identifier
that is encoded and stored in all Azure IaaS VMs SMBIOS and can be read
using platform BIOS commands.
:vartype vm_id: str
:param identity: The identity of the virtual machine, if configured.
:type identity:
~azure.mgmt.compute.v2017_12_01.models.VirtualMachineIdentity
:param zones: The virtual machine zones.
:type zones: list[str]
"""
_validation = {
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'vm_id': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'Plan'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'VirtualMachineIdentity'},
'zones': {'key': 'zones', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(VirtualMachineUpdate, self).__init__(**kwargs)
self.plan = kwargs.get('plan', None)
self.hardware_profile = kwargs.get('hardware_profile', None)
self.storage_profile = kwargs.get('storage_profile', None)
self.os_profile = kwargs.get('os_profile', None)
self.network_profile = kwargs.get('network_profile', None)
self.diagnostics_profile = kwargs.get('diagnostics_profile', None)
self.availability_set = kwargs.get('availability_set', None)
self.provisioning_state = None
self.instance_view = None
self.license_type = kwargs.get('license_type', None)
self.vm_id = None
self.identity = kwargs.get('identity', None)
self.zones = kwargs.get('zones', None)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/virtual_machine_update.py
|
Python
|
mit
| 6,810 | 0.001028 |
class resonance():
"""
This class represents a resonance.
"""
def __init__(self,cR=1.0,wR=[],w0=1.,r0=.5,phase=0.):
self.wR=wR
self.cR=cR
self.w0=w0
self.r0=r0
self.phase=phase
def toString(self):
"""
Returns a string of the resonance data memebers delimited by newlines.
"""
return "\n".join(["wR="+str(self.wR),"cR="+str(self.cR),"w0="+str(self.w0),"r0="+str(self.r0)])
|
bdell/pyPWA
|
pythonPWA/dataTypes/resonance.py
|
Python
|
mit
| 464 | 0.032328 |
'''
Convert a table from a nested list to a nested dictionary and back.
-----------------------------------------------------------
(c) 2013 Allegra Via and Kristian Rother
Licensed under the conditions of the Python License
This code appears in section 7.4.3 of the book
"Managing Biological Data with Python".
-----------------------------------------------------------
'''
table = [
['protein', 'ext1', 'ext2', 'ext3'],
[0.16, 0.038, 0.044, 0.040],
[0.33, 0.089, 0.095, 0.091],
[0.66, 0.184, 0.191, 0.191],
[1.00, 0.280, 0.292, 0.283],
[1.32, 0.365, 0.367, 0.365],
[1.66, 0.441, 0.443, 0.444]
]
# convert nested list to nested dict
nested_dict = {}
n = 0
key = table[0]
# to include the header , run the for loop over
# All table elements (including the first one)
for row in table[1:]:
n = n + 1
entry = {key[0]: row[0], key[1]: row[1], key[2]: row[2],
key[3]: row[3]}
nested_dict['row'+str(n)] = entry
# Test
# print(table[1:])
print(nested_dict)
nested_list = []
for entry in nested_dict:
key = nested_dict[entry]
nested_list.append([key['protein'], key['ext1'], key['ext2'],
key['ext3']])
print(nested_list)
|
raymonwu/Managing_Your_Biological_Data_with_Python_3
|
07-tabular_data/7.4.3_convert_table.py
|
Python
|
mit
| 1,222 | 0 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
import gzip
import zipfile
import boto
from boto.s3.connection import Location
from bs4 import BeautifulSoup
import mo_files
from mo_dots import Data, Null, coalesce, unwrap, to_data, is_many, list_to_data
from mo_files import mimetype
from mo_files.url import value2url_param
from mo_future import StringIO, is_binary, text
from mo_http import http
from mo_http.big_data import (
LazyLines,
MAX_STRING_SIZE,
ibytes2ilines,
safe_size,
scompressed2ibytes,
)
from mo_kwargs import override
from mo_logs import Except, Log
from mo_testing.fuzzytestcase import assertAlmostEqual
from mo_times.dates import Date
from mo_times.timer import Timer
from pyLibrary import convert
VERIFY_UPLOAD = True
DEBUG = False
TOO_MANY_KEYS = 1000 * 1000 * 1000
READ_ERROR = "S3 read error"
MAX_FILE_SIZE = 100 * 1024 * 1024
VALID_KEY = r"\d+([.:]\d+)*"
KEY_IS_WRONG_FORMAT = "key {{key}} in bucket {{bucket}} is of the wrong format"
class File(object):
def __init__(self, bucket, key):
self.bucket = bucket
self.key = key
def read(self):
return self.bucket.read(self.key)
def read_lines(self):
return self.bucket.read_lines(self.key)
def write(self, value):
self.bucket.write(self.key, value)
def write_lines(self, lines):
self.bucket.write_lines(self.key, lines)
@property
def meta(self):
return self.bucket.meta(self.key)
def delete(self):
return self.bucket.delete_key(self.key)
class Connection(object):
@override
def __init__(
self,
aws_access_key_id=None, # CREDENTIAL
aws_secret_access_key=None, # CREDENTIAL
region=None, # NAME OF AWS REGION, REQUIRED FOR SOME BUCKETS
kwargs=None,
):
self.settings = kwargs
try:
if not kwargs.region:
self.connection = boto.connect_s3(
aws_access_key_id=unwrap(self.settings.aws_access_key_id),
aws_secret_access_key=unwrap(self.settings.aws_secret_access_key),
)
else:
self.connection = boto.s3.connect_to_region(
self.settings.region,
aws_access_key_id=unwrap(self.settings.aws_access_key_id),
aws_secret_access_key=unwrap(self.settings.aws_secret_access_key),
)
except Exception as e:
Log.error("Problem connecting to S3", e)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.connection:
self.connection.close()
def get_bucket(self, name):
output = SkeletonBucket()
output.bucket = self.connection.get_bucket(name, validate=False)
return output
class Bucket(object):
"""
STORE JSON, OR CR-DELIMITED LIST OF JSON, IN S3
THIS CLASS MANAGES THE ".json" EXTENSION, AND ".gz"
(ZIP/UNZIP) SHOULD THE FILE BE BIG ENOUGH TO
JUSTIFY IT
ALL KEYS ARE DIGITS, SEPARATED BY DOT (.) COLON (:)
"""
@override
def __init__(
self,
bucket, # NAME OF THE BUCKET
aws_access_key_id=None, # CREDENTIAL
aws_secret_access_key=None, # CREDENTIAL
region=None, # NAME OF AWS REGION, REQUIRED FOR SOME BUCKETS
public=False,
debug=False,
kwargs=None,
):
self.settings = kwargs
self.connection = None
self.bucket = None
self.key_format = _scrub_key(kwargs.key_format)
try:
self.connection = Connection(kwargs).connection
self.bucket = self.connection.get_bucket(
self.settings.bucket, validate=False
)
except Exception as e:
Log.error(
"Problem connecting to {{bucket}}", bucket=self.settings.bucket, cause=e
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.connection:
self.connection.close()
def __getattr__(self, item):
return getattr(self.bucket, item)
def get_key(self, key, must_exist=True):
if must_exist:
meta = self.get_meta(key)
if not meta:
Log.error(
"Key {{key}} does not exist in bucket {{bucket}}",
key=key,
bucket=self.bucket.name,
)
key = strip_extension(meta.key)
return File(self, key)
def delete_key(self, key):
# self._verify_key_format(key) DO NOT VERIFY, DELETE BAD KEYS ANYWAY!!
try:
meta = self.get_meta(key, conforming=False)
if meta == None:
return
self.bucket.delete_key(meta.key)
except Exception as e:
self.get_meta(key, conforming=False)
raise e
def delete_keys(self, keys):
self.bucket.delete_keys([str(k) for k in keys])
def get_meta(self, key, conforming=True):
"""
RETURN METADATA ON FILE IN BUCKET
:param key: KEY, OR PREFIX OF KEY
:param conforming: TEST IF THE KEY CONFORMS TO REQUIRED PATTERN
:return: METADATA, IF UNIQUE, ELSE ERROR
"""
try:
metas = list(self.bucket.list(prefix=str(key)))
metas = list_to_data([m for m in metas if text(m.name).find(".json") != -1])
perfect = Null
favorite = Null
too_many = False
error = None
for m in metas:
try:
simple = strip_extension(m.key)
if conforming:
self._verify_key_format(simple)
if simple == key:
perfect = m
too_many = False
if simple.startswith(key + ".") or simple.startswith(key + ":"):
if favorite and not perfect:
too_many = True
favorite = m
except Exception as e:
error = e
if too_many:
Log.error(
"multiple keys in {{bucket}} with prefix={{prefix|quote}}: {{list}}",
bucket=self.name,
prefix=key,
list=[k.name for k in metas],
)
if not perfect and error:
Log.error("Problem with key request", error)
return coalesce(perfect, favorite)
except Exception as e:
Log.error(
READ_ERROR + " can not read {{key}} from {{bucket}}",
key=key,
bucket=self.bucket.name,
cause=e,
)
def keys(self, prefix=None, delimiter=None):
"""
:param prefix: NOT A STRING PREFIX, RATHER PATH ID PREFIX (MUST MATCH TO NEXT "." OR ":")
:param delimiter: TO GET Prefix OBJECTS, RATHER THAN WHOLE KEYS
:return: SET OF KEYS IN BUCKET, OR
"""
if delimiter:
# WE REALLY DO NOT GET KEYS, BUT RATHER Prefix OBJECTS
# AT LEAST THEY ARE UNIQUE
candidates = [
k.name.rstrip(delimiter)
for k in self.bucket.list(prefix=str(prefix), delimiter=str(delimiter))
]
else:
candidates = [
strip_extension(k.key) for k in self.bucket.list(prefix=str(prefix))
]
if prefix == None:
return set(c for c in candidates if c != "0.json")
else:
return set(
k
for k in candidates
if k == prefix
or k.startswith(prefix + ".")
or k.startswith(prefix + ":")
)
def metas(self, prefix=None, limit=None, delimiter=None):
"""
RETURN THE METADATA DESCRIPTORS FOR EACH KEY
"""
limit = coalesce(limit, TOO_MANY_KEYS)
keys = self.bucket.list(prefix=str(prefix), delimiter=str(delimiter))
prefix_len = len(prefix)
output = []
for i, k in enumerate(
k
for k in keys
if len(k.key) == prefix_len or k.key[prefix_len] in [".", ":"]
):
output.append(
{
"key": strip_extension(k.key),
"etag": convert.quote2string(k.etag),
"expiry_date": Date(k.expiry_date),
"last_modified": Date(k.last_modified),
}
)
if i >= limit:
break
return to_data(output)
def read(self, key):
source = self.get_meta(key)
try:
json = safe_size(source)
except Exception as e:
Log.error(READ_ERROR, e)
if json == None:
return None
if source.key.endswith(".zip"):
json = _unzip(json)
elif source.key.endswith(".gz"):
json = convert.zip2bytes(json)
return json.decode("utf8")
def read_bytes(self, key):
source = self.get_meta(key)
return safe_size(source)
def read_lines(self, key):
source = self.get_meta(key)
if source is None:
Log.error("{{key}} does not exist", key=key)
elif source.key.endswith(".gz"):
return LazyLines(ibytes2ilines(scompressed2ibytes(source)))
elif source.size < MAX_STRING_SIZE:
return source.read().decode("utf8").split("\n")
else:
return LazyLines(source)
def write(self, key, value, disable_zip=False):
if key.endswith(".json") or key.endswith(".zip"):
Log.error("Expecting a pure key")
try:
if hasattr(value, "read"):
if disable_zip:
storage = self.bucket.new_key(str(key + ".json"))
string_length = len(value)
headers = {"Content-Type": mimetype.JSON}
else:
storage = self.bucket.new_key(str(key + ".json.gz"))
string_length = len(value)
value = convert.bytes2zip(value)
headers = {"Content-Type": mimetype.GZIP}
file_length = len(value)
Log.note(
"Sending contents with length {{file_length|comma}} (from string with length {{string_length|comma}})",
file_length=file_length,
string_length=string_length,
)
value.seek(0)
storage.set_contents_from_file(value, headers=headers)
if self.settings.public:
storage.set_acl("public-read")
return
if len(value) > 20 * 1000 and not disable_zip:
self.bucket.delete_key(str(key + ".json"))
self.bucket.delete_key(str(key + ".json.gz"))
if is_binary(value):
value = convert.bytes2zip(value)
key += ".json.gz"
else:
value = convert.bytes2zip(value).encode("utf8")
key += ".json.gz"
headers = {"Content-Type": mimetype.GZIP}
else:
self.bucket.delete_key(str(key + ".json.gz"))
if is_binary(value):
key += ".json"
else:
key += ".json"
headers = {"Content-Type": mimetype.JSON}
storage = self.bucket.new_key(str(key))
storage.set_contents_from_string(value, headers=headers)
if self.settings.public:
storage.set_acl("public-read")
except Exception as e:
Log.error(
"Problem writing {{bytes}} bytes to {{key}} in {{bucket}}",
key=key,
bucket=self.bucket.name,
bytes=len(value),
cause=e,
)
def write_lines(self, key, lines):
self._verify_key_format(key)
storage = self.bucket.new_key(str(key + ".json.gz"))
if VERIFY_UPLOAD:
lines = list(lines)
with mo_files.TempFile() as tempfile:
with open(tempfile.abspath, "wb") as buff:
DEBUG and Log.note("Temp file {{filename}}", filename=tempfile.abspath)
archive = gzip.GzipFile(filename=str(key + ".json"), fileobj=buff, mode="w")
count = 0
for l in lines:
if is_many(l):
for ll in l:
archive.write(ll.encode("utf8"))
archive.write(b"\n")
count += 1
else:
archive.write(l.encode("utf8"))
archive.write(b"\n")
count += 1
archive.close()
retry = 3
while retry:
try:
with Timer(
"Sending {{count}} lines in {{file_length|comma}} bytes for {{key}}",
{"key": key, "file_length": tempfile.length, "count": count},
verbose=self.settings.debug,
):
storage.set_contents_from_filename(
tempfile.abspath, headers={"Content-Type": mimetype.GZIP}
)
break
except Exception as e:
e = Except.wrap(e)
retry -= 1
if (
retry == 0
or "Access Denied" in e
or "No space left on device" in e
):
Log.error("could not push data to s3", cause=e)
else:
Log.warning("could not push data to s3, will retry", cause=e)
if self.settings.public:
storage.set_acl("public-read")
if VERIFY_UPLOAD:
try:
with open(tempfile.abspath, mode="rb") as source:
result = list(ibytes2ilines(scompressed2ibytes(source)))
assertAlmostEqual(result, lines, msg="file is different")
# full_url = "https://"+self.name+".s3-us-west-2.amazonaws.com/"+storage.key.replace(":", "%3A")
# https://active-data-test-result.s3-us-west-2.amazonaws.com/tc.1524896%3A152488763.0.json.gz
# dest_bucket = s3.MultiBucket(bucket="self.name", kwargs=self.settings.aws)
result = list(self.read_lines(strip_extension(key)))
assertAlmostEqual(result, lines, result, msg="S3 is different")
except Exception as e:
from activedata_etl.transforms import TRY_AGAIN_LATER
Log.error(TRY_AGAIN_LATER, reason="did not pass verification", cause=e)
return
@property
def name(self):
return self.settings.bucket
def _verify_key_format(self, key):
if self.key_format == None:
return
if self.key_format != _scrub_key(key):
Log.error(KEY_IS_WRONG_FORMAT, key=key, bucket=self.bucket.name)
class SkeletonBucket(Bucket):
"""
LET CALLER WORRY ABOUT SETTING PROPERTIES
"""
def __init__(self):
object.__init__(self)
self.connection = None
self.bucket = None
self.key_format = None
content_keys = {
"key": text,
"lastmodified": Date,
"etag": text,
"size": int,
"storageclass": text,
}
class PublicBucket(object):
"""
USE THE https PUBLIC API TO INTERACT WITH A BUCKET
MAYBE boto CAN DO THIS, BUT NO DOCS FOUND
"""
@override
def __init__(self, url, kwargs=None):
self.url = url
def list(self, prefix=None, marker=None, delimiter=None):
# https://s3.amazonaws.com/net-mozaws-stage-fx-test-activedata?marker=jenkins-go-bouncer.prod-3019/py27.log
# <ListBucketResult>
# <Name>net-mozaws-stage-fx-test-activedata</Name>
# <Prefix/>
# <Marker>jenkins-go-bouncer.prod-3019/py27.log</Marker>
# <MaxKeys>1000</MaxKeys>
# <IsTruncated>true</IsTruncated>
# <Contents>
# <Key>jenkins-go-bouncer.prod-3020/py27.log</Key>
# <LastModified>2017-03-05T07:02:20.000Z</LastModified>
# <ETag>"69dcb19e91eb3eec51e1b659801523d6"</ETag>
# <Size>10037</Size>
# <StorageClass>STANDARD</StorageClass>
state = Data()
state.prefix = prefix
state.delimiter = delimiter
state.marker = marker
state.get_more = True
def more():
xml = http.get(self.url + "?" + value2url_param(state)).content
data = BeautifulSoup(xml, "xml")
state.get_more = data.find("istruncated").contents[0] == "true"
contents = data.findAll("contents")
if len(contents):
state.marker = contents[-1].find("key").contents[0]
return [
{k: t(d.find(k).contents[0]) for k, t in content_keys.items()}
for d in contents
]
while state.get_more:
content = more()
for c in content:
yield to_data(c)
def read_lines(self, key):
url = self.url + "/" + key
return http.get(url).all_lines
def strip_extension(key):
key = text(key)
e = key.find(".json")
if e == -1:
return key
return key[:e]
def _unzip(compressed):
buff = StringIO(compressed)
archive = zipfile.ZipFile(buff, mode="r")
return archive.read(archive.namelist()[0])
def _scrub_key(key):
"""
RETURN JUST THE :. CHARACTERS
"""
if key == None:
return None
output = []
for c in key:
if c in [":", "."]:
output.append(c)
return "".join(output)
def key_prefix(key):
return int(key.split(":")[0].split(".")[0])
|
klahnakoski/ActiveData
|
vendor/pyLibrary/aws/s3.py
|
Python
|
mpl-2.0
| 18,596 | 0.001559 |
# -*- encoding: utf-8 -*-
from abjad import *
def test_pitchtools_PitchClass_is_pitch_class_number_01():
assert pitchtools.PitchClass.is_pitch_class_number(0)
assert pitchtools.PitchClass.is_pitch_class_number(0.5)
assert pitchtools.PitchClass.is_pitch_class_number(11)
assert pitchtools.PitchClass.is_pitch_class_number(11.5)
def test_pitchtools_PitchClass_is_pitch_class_number_02():
assert not pitchtools.PitchClass.is_pitch_class_number(-1)
assert not pitchtools.PitchClass.is_pitch_class_number(-0.5)
assert not pitchtools.PitchClass.is_pitch_class_number(12)
assert not pitchtools.PitchClass.is_pitch_class_number(99)
assert not pitchtools.PitchClass.is_pitch_class_number('foo')
|
mscuthbert/abjad
|
abjad/tools/pitchtools/test/test_pitchtools_PitchClass_is_pitch_class_number.py
|
Python
|
gpl-3.0
| 727 | 0.001376 |
# OpenShot Video Editor is a program that creates, modifies, and edits video files.
# Copyright (C) 2009 Jonathan Thomas
#
# This file is part of OpenShot Video Editor (http://launchpad.net/openshot/).
#
# OpenShot Video Editor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenShot Video Editor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenShot Video Editor. If not, see <http://www.gnu.org/licenses/>.
# Import Blender's python API. This only works when the script is being
# run from the context of Blender. Blender contains it's own version of Python
# with this library pre-installed.
import bpy
# Load a font
def load_font(font_path):
""" Load a new TTF font into Blender, and return the font object """
# get the original list of fonts (before we add a new one)
original_fonts = bpy.data.fonts.keys()
# load new font
bpy.ops.font.open(filepath=font_path)
# get the new list of fonts (after we added a new one)
for font_name in bpy.data.fonts.keys():
if font_name not in original_fonts:
return bpy.data.fonts[font_name]
# no new font was added
return None
# Debug Info:
# ./blender -b test.blend -P demo.py
# -b = background mode
# -P = run a Python script within the context of the project file
# Init all of the variables needed by this script. Because Blender executes
# this script, OpenShot will inject a dictionary of the required parameters
# before this script is executed.
params = {
'title' : 'Oh Yeah! OpenShot!',
'extrude' : 0.1,
'bevel_depth' : 0.02,
'spacemode' : 'CENTER',
'text_size' : 1.5,
'width' : 1.0,
'fontname' : 'Bfont',
'color' : [0.8,0.8,0.8],
'alpha' : 1.0,
'line1_color' : [0.8,0.8,0.8],
'line2_color' : [0.8,0.8,0.8],
'line3_color' : [0.8,0.8,0.8],
'line4_color' : [0.8,0.8,0.8],
'output_path' : '/tmp/',
'fps' : 24,
'quality' : 90,
'file_format' : 'PNG',
'color_mode' : 'RGBA',
'horizon_color' : [0.57, 0.57, 0.57],
'resolution_x' : 1920,
'resolution_y' : 1080,
'resolution_percentage' : 100,
'start_frame' : 20,
'end_frame' : 25,
'animation' : True,
}
#INJECT_PARAMS_HERE
# The remainder of this script will modify the current Blender .blend project
# file, and adjust the settings. The .blend file is specified in the XML file
# that defines this template in OpenShot.
#----------------------------------------------------------------------------
# Modify Text / Curve settings
#print (bpy.data.curves.keys())
text_object = bpy.data.curves["Text.001"]
text_object.extrude = params["extrude"]
text_object.bevel_depth = params["bevel_depth"]
text_object.body = params["title"]
text_object.align = params["spacemode"]
text_object.size = params["text_size"]
text_object.space_character = params["width"]
# Get font object
font = None
if params["fontname"] != "Bfont":
# Add font so it's available to Blender
font = load_font(params["fontname"])
else:
# Get default font
font = bpy.data.fonts["Bfont"]
text_object.font = font
# Change the material settings (color, alpha, etc...)
material_object = bpy.data.materials["Material.title"]
material_object.diffuse_color = params["diffuse_color"]
material_object.specular_color = params["specular_color"]
material_object.specular_intensity = params["specular_intensity"]
material_object.alpha = params["alpha"]
# Change line colors
material_object = bpy.data.materials["Material.line1"]
material_object.diffuse_color = params["line1_color"]
material_object = bpy.data.materials["Material.line2"]
material_object.diffuse_color = params["line2_color"]
material_object = bpy.data.materials["Material.line3"]
material_object.diffuse_color = params["line3_color"]
material_object = bpy.data.materials["Material.line4"]
material_object.diffuse_color = params["line4_color"]
# Set the render options. It is important that these are set
# to the same values as the current OpenShot project. These
# params are automatically set by OpenShot
bpy.context.scene.render.filepath = params["output_path"]
bpy.context.scene.render.fps = params["fps"]
#bpy.context.scene.render.quality = params["quality"]
try:
bpy.context.scene.render.file_format = params["file_format"]
bpy.context.scene.render.color_mode = params["color_mode"]
except:
bpy.context.scene.render.image_settings.file_format = params["file_format"]
bpy.context.scene.render.image_settings.color_mode = params["color_mode"]
#bpy.data.worlds[0].horizon_color = params["horizon_color"]
bpy.context.scene.render.resolution_x = params["resolution_x"]
bpy.context.scene.render.resolution_y = params["resolution_y"]
bpy.context.scene.render.resolution_percentage = params["resolution_percentage"]
bpy.context.scene.frame_start = params["start_frame"]
bpy.context.scene.frame_end = params["end_frame"]
# Animation Speed (use Blender's time remapping to slow or speed up animation)
animation_speed = int(params["animation_speed"]) # time remapping multiplier
new_length = int(params["end_frame"]) * animation_speed # new length (in frames)
bpy.context.scene.frame_end = new_length
bpy.context.scene.render.frame_map_old = 1
bpy.context.scene.render.frame_map_new = animation_speed
if params["start_frame"] == params["end_frame"]:
bpy.context.scene.frame_start = params["end_frame"]
bpy.context.scene.frame_end = params["end_frame"]
# Render the current animation to the params["output_path"] folder
bpy.ops.render.render(animation=params["animation"])
|
i5o/openshot-sugar
|
openshot/openshot/blender/scripts/neon_curves.py
|
Python
|
gpl-3.0
| 5,877 | 0.020759 |
#!/user/bin/python
'''
This script uses SimpleCV to grab an image from the camera and numpy to find an infrared LED and report its position relative to the camera view centre and whether it is inside the target area.
Attempted stabilisation of the output by tracking a circular object instead and altering exposure of the camera.
'''
# make it possible to import from parent directory:
import sys
sys.path.insert(0,'..')
## Change terminal window header for easier identification of contents
sys.stdout.write("\x1b]2;Sensors/simpleCV_3.py\x07")
import time, math, SimpleCV
import zmq, json
import subprocess as sp
from globalVars import CHANNEL_TARGETDATA
from globalVars import CAMERA_ID_NUMBER
printing = True
dpx = 0.0025 # approximate amount of degrees per pixel for Trust eLight
width = 1920
height = 1080
camera_id = 'video' + str(CAMERA_ID_NUMBER)
# To increase framerate, count the search() loops and render every n frames
renderFrame = 5
frame = 0
# Adjust camera settings from OS, since SimpleCV's commands don't do anything:
sp.call(["uvcdynctrl -d '"+camera_id+"' -s 'Exposure, Auto' 1"], shell = True) # Disable auto exposure
sp.call(["uvcdynctrl -d '"+camera_id+"' -s 'Exposure (Absolute)' 12"], shell = True) # Set absolute exposure
display = SimpleCV.Display()
cam = SimpleCV.Camera(CAMERA_ID_NUMBER, {"width":width,"height":height})
#target box for the marker
box_d = 20
yTgt = (height/2-box_d, height/2+box_d)
xTgt = (width/2-box_d, width/2+box_d)
box_clr = SimpleCV.Color.RED
centre = (height/2, width/2)
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind(CHANNEL_TARGETDATA)
def search():
global frame, renderFrame
img = cam.getImage()
objective = img.colorDistance(color=(255,255,255)).invert()
seg_objective = objective.stretch(200,255)
blobs = seg_objective.findBlobs()
if blobs:
center_point = (blobs[-1].x, blobs[-1].y)
if frame is renderFrame:
img.drawCircle((blobs[-1].x, blobs[-1].y), 10,SimpleCV.Color.YELLOW,3)
img.dl().rectangle2pts((xTgt[0], yTgt[0]), (xTgt[1],yTgt[1]), box_clr)
img.show()
frame = 0
frame +=1
return center_point
if frame is renderFrame:
img.dl().rectangle2pts((xTgt[0], yTgt[0]), (xTgt[1],yTgt[1]), box_clr)
img.show()
frame = 0
frame +=1
return None
#get current time in milliseconds
millis = lambda: int(round(time.time() * 1000))
#############################################################
# RUNNING CODE BELOW #
#############################################################
tar_x = 0
tar_y = 0
deg_x = 0
deg_y = 0
last_tar = tar_x
found = False
findTime = 0
lastFound = findTime
lossReported = False
while display.isNotDone():
target = search()
if target is not None:
tar_x = target[0]-width/2
tar_y = target[1]-height/2
findTime = millis()
found = True
lossReported = False
else:
found = False
lastFound = findTime
# Angular difference between the box and the target
# Having the target within the box is acceptable
if abs(tar_x) > box_d:
deg_x = tar_x * dpx
else:
deg_x = 0
if abs(tar_y) > box_d:
deg_y = tar_y * dpx
else:
deg_y = 0
# If the target is in the box, indicate this with the box colour
if deg_y is 0 and deg_x is 0 and found:
box_clr = SimpleCV.Color.GREEN
else:
box_clr = SimpleCV.Color.RED
#output the data
# not needed if there's no new data to report
if not lossReported:
message = {
't' : millis(),
'findTime': findTime,
'found' : found,
'tar_px' : {'x':tar_x, 'y':tar_y},
'tar_dg' : {'x':deg_x, 'y':deg_y}
}
# wait 20 ms to make sure Scan picks up on the last hit
if not found and millis() - findTime < 100:
continue
socket.send_json(message)
print "Sent targetData: ",
print message
if lastFound == findTime:
lossReported = False
#spam to keep data flowing
|
dotCID/Graduation
|
Robot code/Sensors/simpleCV_3.py
|
Python
|
gpl-2.0
| 4,363 | 0.013981 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 NLPY.ORG
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
import numpy as np
from line_iterator import LineIterator
class FeatureContainer(object):
def __init__(self, path=None, dtype="libsvm", feature_n=-1):
self.N = 0
self.data = np.zeros(0)
self.targets = np.zeros(0)
self.feature_n = feature_n
self.path = path
self.dtype = dtype
# if path:
# self.read(path, dtype)
def read(self):
"""
Read feature matrix from data
:param path: data path
:param type: libsvm (only)
"""
ys = []
xs = []
for line in LineIterator(self.path):
items = line.split(" ")
feature_map = {}
y = 0
for item in items:
if ":" in item:
feature_idx, value = item.split(":")
feature_map[int(feature_idx)] = float(value)
else:
y = int(item)
if self.feature_n == -1:
max_key = max(feature_map.keys()) if feature_map else 0
else:
max_key = self.feature_n
features = []
for fidx in range(1, max_key + 1):
if fidx in feature_map:
features.append(feature_map[fidx])
else:
features.append(0)
yield features, y
# xs.append(features)
# ys.append(y)
#
# self.data = np.array(xs)
# self.targets = np.array(ys)
|
zomux/nlpy
|
nlpy/util/feature_container.py
|
Python
|
gpl-3.0
| 1,660 | 0.001205 |
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2010 Task Coach developers <developers@taskcoach.org>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import test
from taskcoachlib.widgets import treectrl
class DummyEvent(object):
def __init__(self, item=None):
self.item = item
self.vetoed = self.allowed = False
def GetItem(self):
return self.item
def Veto(self):
self.vetoed = True
def Allow(self):
self.allowed = True
class TreeCtrlDragAndDropMixinTest(test.wxTestCase):
# pylint: disable-msg=E1101
def setUp(self):
self.treeCtrl = treectrl.HyperTreeList(self.frame)
self.treeCtrl.AddColumn('First')
self.rootItem = self.treeCtrl.AddRoot('root')
self.item = self.treeCtrl.AppendItem(self.rootItem, 'item')
def assertEventIsVetoed(self, event):
self.failUnless(event.vetoed)
self.failIf(event.allowed)
def assertEventIsAllowed(self, event):
self.failUnless(event.allowed)
self.failIf(event.vetoed)
def testEventIsVetoedWhenDragBeginsWithoutItem(self):
event = DummyEvent()
self.treeCtrl.OnBeginDrag(event)
self.assertEventIsVetoed(event)
def testEventIsAllowedWhenDragBeginsWithItem(self):
event = DummyEvent(self.item)
self.treeCtrl.OnBeginDrag(event)
self.assertEventIsAllowed(event)
def testEventIsAllowedWhenDragBeginWithSelectedItem(self):
self.treeCtrl.SelectItem(self.item)
event = DummyEvent(self.item)
self.treeCtrl.OnBeginDrag(event)
self.assertEventIsAllowed(event)
|
wdmchaft/taskcoach
|
tests/unittests/widgetTests/DragAndDropTest.py
|
Python
|
gpl-3.0
| 2,283 | 0.005694 |
from django import http
from django.apps import apps
from django.conf import settings
from django.contrib import admin
from django.core.exceptions import PermissionDenied
from django.core.files.storage import default_storage as storage
from django.shortcuts import get_object_or_404, redirect
from django.views import debug
from django.views.decorators.cache import never_cache
import six
import olympia.core.logger
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.addons.decorators import addon_view_factory
from olympia.addons.indexers import get_mappings as get_addons_mappings
from olympia.addons.models import Addon
from olympia.amo import messages, search
from olympia.amo.decorators import (
json_view, permission_required, post_required)
from olympia.amo.mail import DevEmailBackend
from olympia.amo.utils import HttpResponseSendFile, render
from olympia.bandwagon.models import Collection
from olympia.files.models import File, FileUpload
from olympia.stats.search import get_mappings as get_stats_mappings
from olympia.versions.models import Version
from .decorators import admin_required
from .forms import (
AddonStatusForm, FeaturedCollectionFormSet, FileFormSet,
MonthlyPickFormSet)
log = olympia.core.logger.getLogger('z.zadmin')
@admin_required
def show_settings(request):
settings_dict = debug.get_safe_settings()
return render(request, 'zadmin/settings.html',
{'settings_dict': settings_dict, 'title': 'Settings!'})
@admin_required
def env(request):
env = {}
for k in request.META.keys():
env[k] = debug.cleanse_setting(k, request.META[k])
return render(request, 'zadmin/settings.html',
{'settings_dict': env, 'title': 'Env!'})
@admin.site.admin_view
def fix_disabled_file(request):
file_ = None
if request.method == 'POST' and 'file' in request.POST:
file_ = get_object_or_404(File, id=request.POST['file'])
if 'confirm' in request.POST:
file_.unhide_disabled_file()
messages.success(request, 'We have done a great thing.')
return redirect('zadmin.fix-disabled')
return render(request, 'zadmin/fix-disabled.html',
{'file': file_, 'file_id': request.POST.get('file', '')})
@admin_required
@json_view
def collections_json(request):
app = request.GET.get('app', '')
q = request.GET.get('q', '')
data = []
if not q:
return data
qs = Collection.objects.all()
try:
qs = qs.filter(pk=int(q))
except ValueError:
qs = qs.filter(slug__startswith=q)
try:
qs = qs.filter(application=int(app))
except ValueError:
pass
for c in qs[:7]:
data.append({'id': c.id,
'name': six.text_type(c.name),
'slug': six.text_type(c.slug),
'all_personas': c.all_personas,
'url': c.get_url_path()})
return data
@admin_required
@post_required
def featured_collection(request):
try:
pk = int(request.POST.get('collection', 0))
except ValueError:
pk = 0
c = get_object_or_404(Collection, pk=pk)
return render(request, 'zadmin/featured_collection.html',
dict(collection=c))
@admin_required
def features(request):
form = FeaturedCollectionFormSet(request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save(commit=False)
for obj in form.deleted_objects:
obj.delete()
messages.success(request, 'Changes successfully saved.')
return redirect('zadmin.features')
return render(request, 'zadmin/features.html', dict(form=form))
@admin_required
def monthly_pick(request):
form = MonthlyPickFormSet(request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
messages.success(request, 'Changes successfully saved.')
return redirect('zadmin.monthly_pick')
return render(request, 'zadmin/monthly_pick.html', dict(form=form))
@admin_required
def elastic(request):
INDEX = settings.ES_INDEXES['default']
es = search.get_es()
indexes = set(settings.ES_INDEXES.values())
es_mappings = {
'addons': get_addons_mappings(),
'addons_stats': get_stats_mappings(),
}
ctx = {
'index': INDEX,
'nodes': es.nodes.stats(),
'health': es.cluster.health(),
'state': es.cluster.state(),
'mappings': [(index, es_mappings.get(index, {})) for index in indexes],
}
return render(request, 'zadmin/elastic.html', ctx)
@admin.site.admin_view
def mail(request):
backend = DevEmailBackend()
if request.method == 'POST':
backend.clear()
return redirect('zadmin.mail')
return render(request, 'zadmin/mail.html', dict(mail=backend.view_all()))
@permission_required(amo.permissions.ANY_ADMIN)
def index(request):
log = ActivityLog.objects.admin_events()[:5]
return render(request, 'zadmin/index.html', {'log': log})
@admin_required
def addon_search(request):
ctx = {}
if 'q' in request.GET:
q = ctx['q'] = request.GET['q']
if q.isdigit():
qs = Addon.objects.filter(id=int(q))
else:
qs = Addon.search().query(name__text=q.lower())[:100]
if len(qs) == 1:
return redirect('zadmin.addon_manage', qs[0].id)
ctx['addons'] = qs
return render(request, 'zadmin/addon-search.html', ctx)
@never_cache
@json_view
def general_search(request, app_id, model_id):
if not admin.site.has_permission(request):
raise PermissionDenied
try:
model = apps.get_model(app_id, model_id)
except LookupError:
raise http.Http404
limit = 10
obj = admin.site._registry[model]
ChangeList = obj.get_changelist(request)
# This is a hideous api, but uses the builtin admin search_fields API.
# Expecting this to get replaced by ES so soon, that I'm not going to lose
# too much sleep about it.
args = [request, obj.model, [], [], [], [], obj.search_fields, [],
obj.list_max_show_all, limit, [], obj]
try:
# python3.2+ only
from inspect import signature
if 'sortable_by' in signature(ChangeList.__init__).parameters:
args.append('None') # sortable_by is a django2.1+ addition
except ImportError:
pass
cl = ChangeList(*args)
qs = cl.get_queryset(request)
# Override search_fields_response on the ModelAdmin object
# if you'd like to pass something else back to the front end.
lookup = getattr(obj, 'search_fields_response', None)
return [{'value': o.pk, 'label': getattr(o, lookup) if lookup else str(o)}
for o in qs[:limit]]
@admin_required
@addon_view_factory(qs=Addon.objects.all)
def addon_manage(request, addon):
form = AddonStatusForm(request.POST or None, instance=addon)
pager = amo.utils.paginate(
request, Version.unfiltered.filter(addon=addon), 30)
# A list coercion so this doesn't result in a subquery with a LIMIT which
# MySQL doesn't support (at this time).
versions = list(pager.object_list)
files = File.objects.filter(version__in=versions).select_related('version')
formset = FileFormSet(request.POST or None, queryset=files)
if form.is_valid() and formset.is_valid():
if 'status' in form.changed_data:
ActivityLog.create(amo.LOG.CHANGE_STATUS, addon,
form.cleaned_data['status'])
log.info('Addon "%s" status changed to: %s' % (
addon.slug, form.cleaned_data['status']))
form.save()
for form in formset:
if 'status' in form.changed_data:
log.info('Addon "%s" file (ID:%d) status changed to: %s' % (
addon.slug, form.instance.id, form.cleaned_data['status']))
form.save()
return redirect('zadmin.addon_manage', addon.slug)
# Build a map from file.id to form in formset for precise form display
form_map = dict((form.instance.id, form) for form in formset.forms)
# A version to file map to avoid an extra query in the template
file_map = {}
for file in files:
file_map.setdefault(file.version_id, []).append(file)
return render(request, 'zadmin/addon_manage.html', {
'addon': addon, 'pager': pager, 'versions': versions, 'form': form,
'formset': formset, 'form_map': form_map, 'file_map': file_map})
@admin_required
def download_file_upload(request, uuid):
upload = get_object_or_404(FileUpload, uuid=uuid)
return HttpResponseSendFile(request, upload.path,
content_type='application/octet-stream')
@admin.site.admin_view
@post_required
@json_view
def recalc_hash(request, file_id):
file = get_object_or_404(File, pk=file_id)
file.size = storage.size(file.file_path)
file.hash = file.generate_hash()
file.save()
log.info('Recalculated hash for file ID %d' % file.id)
messages.success(request,
'File hash and size recalculated for file %d.' % file.id)
return {'success': 1}
|
kumar303/addons-server
|
src/olympia/zadmin/views.py
|
Python
|
bsd-3-clause
| 9,220 | 0 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import signal
import subprocess
import io
import os
import re
import locale
import tempfile
import warnings
from luigi import six
class FileWrapper(object):
"""
Wrap `file` in a "real" so stuff can be added to it after creation.
"""
def __init__(self, file_object):
self._subpipe = file_object
def __getattr__(self, name):
# forward calls to 'write', 'close' and other methods not defined below
return getattr(self._subpipe, name)
def __enter__(self, *args, **kwargs):
# instead of returning whatever is returned by __enter__ on the subpipe
# this returns self, so whatever custom injected methods are still available
# this might cause problems with custom file_objects, but seems to work
# fine with standard python `file` objects which is the only default use
return self
def __exit__(self, *args, **kwargs):
return self._subpipe.__exit__(*args, **kwargs)
def __iter__(self):
return iter(self._subpipe)
class InputPipeProcessWrapper(object):
def __init__(self, command, input_pipe=None):
"""
Initializes a InputPipeProcessWrapper instance.
:param command: a subprocess.Popen instance with stdin=input_pipe and
stdout=subprocess.PIPE.
Alternatively, just its args argument as a convenience.
"""
self._command = command
self._input_pipe = input_pipe
self._original_input = True
if input_pipe is not None:
try:
input_pipe.fileno()
except AttributeError:
# subprocess require a fileno to work, if not present we copy to disk first
self._original_input = False
f = tempfile.NamedTemporaryFile('wb', prefix='luigi-process_tmp', delete=False)
self._tmp_file = f.name
f.write(input_pipe.read())
input_pipe.close()
f.close()
self._input_pipe = FileWrapper(io.BufferedReader(io.FileIO(self._tmp_file, 'r')))
self._process = command if isinstance(command, subprocess.Popen) else self.create_subprocess(command)
# we want to keep a circular reference to avoid garbage collection
# when the object is used in, e.g., pipe.read()
self._process._selfref = self
def create_subprocess(self, command):
"""
http://www.chiark.greenend.org.uk/ucgi/~cjwatson/blosxom/2009-07-02-python-sigpipe.html
"""
def subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
return subprocess.Popen(command,
stdin=self._input_pipe,
stdout=subprocess.PIPE,
preexec_fn=subprocess_setup,
close_fds=True)
def _finish(self):
# Need to close this before input_pipe to get all SIGPIPE messages correctly
self._process.stdout.close()
if not self._original_input and os.path.exists(self._tmp_file):
os.remove(self._tmp_file)
if self._input_pipe is not None:
self._input_pipe.close()
self._process.wait() # deadlock?
if self._process.returncode not in (0, 141, 128 - 141):
# 141 == 128 + 13 == 128 + SIGPIPE - normally processes exit with 128 + {reiceived SIG}
# 128 - 141 == -13 == -SIGPIPE, sometimes python receives -13 for some subprocesses
raise RuntimeError('Error reading from pipe. Subcommand exited with non-zero exit status %s.' % self._process.returncode)
def close(self):
self._finish()
def __del__(self):
self._finish()
def __enter__(self):
return self
def _abort(self):
"""
Call _finish, but eat the exception (if any).
"""
try:
self._finish()
except KeyboardInterrupt:
raise
except BaseException:
pass
def __exit__(self, type, value, traceback):
if type:
self._abort()
else:
self._finish()
def __getattr__(self, name):
if name == '_process':
raise AttributeError(name)
try:
return getattr(self._process.stdout, name)
except AttributeError:
return getattr(self._input_pipe, name)
def __iter__(self):
for line in self._process.stdout:
yield line
self._finish()
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return False
class OutputPipeProcessWrapper(object):
WRITES_BEFORE_FLUSH = 10000
def __init__(self, command, output_pipe=None):
self.closed = False
self._command = command
self._output_pipe = output_pipe
self._process = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=output_pipe,
close_fds=True)
self._flushcount = 0
def write(self, *args, **kwargs):
self._process.stdin.write(*args, **kwargs)
self._flushcount += 1
if self._flushcount == self.WRITES_BEFORE_FLUSH:
self._process.stdin.flush()
self._flushcount = 0
def writeLine(self, line):
assert '\n' not in line
self.write(line + '\n')
def _finish(self):
"""
Closes and waits for subprocess to exit.
"""
if self._process.returncode is None:
self._process.stdin.flush()
self._process.stdin.close()
self._process.wait()
self.closed = True
def __del__(self):
if not self.closed:
self.abort()
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
self.abort()
def __enter__(self):
return self
def close(self):
self._finish()
if self._process.returncode == 0:
if self._output_pipe is not None:
self._output_pipe.close()
else:
raise RuntimeError('Error when executing command %s' % self._command)
def abort(self):
self._finish()
def __getattr__(self, name):
if name == '_process':
raise AttributeError(name)
try:
return getattr(self._process.stdin, name)
except AttributeError:
return getattr(self._output_pipe, name)
def readable(self):
return False
def writable(self):
return True
def seekable(self):
return False
class BaseWrapper(object):
def __init__(self, stream, *args, **kwargs):
self._stream = stream
try:
super(BaseWrapper, self).__init__(stream, *args, **kwargs)
except TypeError:
pass
def __getattr__(self, name):
if name == '_stream':
raise AttributeError(name)
return getattr(self._stream, name)
def __enter__(self):
self._stream.__enter__()
return self
def __exit__(self, *args):
self._stream.__exit__(*args)
def __iter__(self):
try:
for line in self._stream:
yield line
finally:
self.close()
class NewlineWrapper(BaseWrapper):
def __init__(self, stream, newline=None):
if newline is None:
self.newline = newline
else:
self.newline = newline.encode('ascii')
if self.newline not in (b'', b'\r\n', b'\n', b'\r', None):
raise ValueError("newline need to be one of {b'', b'\r\n', b'\n', b'\r', None}")
super(NewlineWrapper, self).__init__(stream)
def read(self, n=-1):
b = self._stream.read(n)
if self.newline == b'':
return b
if self.newline is None:
newline = b'\n'
return re.sub(b'(\n|\r\n|\r)', newline, b)
def writelines(self, lines):
if self.newline is None or self.newline == '':
newline = os.linesep.encode('ascii')
else:
newline = self.newline
self._stream.writelines(
(re.sub(b'(\n|\r\n|\r)', newline, line) for line in lines)
)
def write(self, b):
if self.newline is None or self.newline == '':
newline = os.linesep.encode('ascii')
else:
newline = self.newline
self._stream.write(re.sub(b'(\n|\r\n|\r)', newline, b))
class MixedUnicodeBytesWrapper(BaseWrapper):
"""
"""
def __init__(self, stream, encoding=None):
if encoding is None:
encoding = locale.getpreferredencoding()
self.encoding = encoding
super(MixedUnicodeBytesWrapper, self).__init__(stream)
def write(self, b):
self._stream.write(self._convert(b))
def writelines(self, lines):
self._stream.writelines((self._convert(line) for line in lines))
def _convert(self, b):
if isinstance(b, six.text_type):
b = b.encode(self.encoding)
warnings.warn('Writing unicode to byte stream', stacklevel=2)
return b
class Format(object):
"""
Interface for format specifications.
"""
@classmethod
def pipe_reader(cls, input_pipe):
raise NotImplementedError()
@classmethod
def pipe_writer(cls, output_pipe):
raise NotImplementedError()
def __rshift__(a, b):
return ChainFormat(a, b)
class ChainFormat(Format):
def __init__(self, *args, **kwargs):
self.args = args
try:
self.input = args[0].input
except AttributeError:
pass
try:
self.output = args[-1].output
except AttributeError:
pass
if not kwargs.get('check_consistency', True):
return
for x in range(len(args) - 1):
try:
if args[x].output != args[x + 1].input:
raise TypeError(
'The format chaining is not valid, %s expect %s'
'but %s provide %s' % (
args[x].__class__.__name__,
args[x].input,
args[x + 1].__class__.__name__,
args[x + 1].output,
)
)
except AttributeError:
pass
def pipe_reader(self, input_pipe):
for x in reversed(self.args):
input_pipe = x.pipe_reader(input_pipe)
return input_pipe
def pipe_writer(self, output_pipe):
for x in reversed(self.args):
output_pipe = x.pipe_writer(output_pipe)
return output_pipe
class TextWrapper(io.TextIOWrapper):
def __exit__(self, *args):
# io.TextIOWrapper close the file on __exit__, let the underlying file decide
if not self.closed and self.writable():
super(TextWrapper, self).flush()
self._stream.__exit__(*args)
def __del__(self, *args):
# io.TextIOWrapper close the file on __del__, let the underlying file decide
if not self.closed and self.writable():
super(TextWrapper, self).flush()
try:
self._stream.__del__(*args)
except AttributeError:
pass
def __init__(self, stream, *args, **kwargs):
self._stream = stream
try:
super(TextWrapper, self).__init__(stream, *args, **kwargs)
except TypeError:
pass
def __getattr__(self, name):
if name == '_stream':
raise AttributeError(name)
return getattr(self._stream, name)
def __enter__(self):
self._stream.__enter__()
return self
class NopFormat(Format):
def pipe_reader(self, input_pipe):
return input_pipe
def pipe_writer(self, output_pipe):
return output_pipe
class WrappedFormat(Format):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def pipe_reader(self, input_pipe):
return self.wrapper_cls(input_pipe, *self.args, **self.kwargs)
def pipe_writer(self, output_pipe):
return self.wrapper_cls(output_pipe, *self.args, **self.kwargs)
class TextFormat(WrappedFormat):
input = 'unicode'
output = 'bytes'
wrapper_cls = TextWrapper
class MixedUnicodeBytesFormat(WrappedFormat):
output = 'bytes'
wrapper_cls = MixedUnicodeBytesWrapper
class NewlineFormat(WrappedFormat):
input = 'bytes'
output = 'bytes'
wrapper_cls = NewlineWrapper
class GzipFormat(Format):
input = 'bytes'
output = 'bytes'
def __init__(self, compression_level=None):
self.compression_level = compression_level
def pipe_reader(self, input_pipe):
return InputPipeProcessWrapper(['gunzip'], input_pipe)
def pipe_writer(self, output_pipe):
args = ['gzip']
if self.compression_level is not None:
args.append('-' + str(int(self.compression_level)))
return OutputPipeProcessWrapper(args, output_pipe)
class Bzip2Format(Format):
input = 'bytes'
output = 'bytes'
def pipe_reader(self, input_pipe):
return InputPipeProcessWrapper(['bzcat'], input_pipe)
def pipe_writer(self, output_pipe):
return OutputPipeProcessWrapper(['bzip2'], output_pipe)
Text = TextFormat()
UTF8 = TextFormat(encoding='utf8')
Nop = NopFormat()
SysNewLine = NewlineFormat()
Gzip = GzipFormat()
Bzip2 = Bzip2Format()
MixedUnicodeBytes = MixedUnicodeBytesFormat()
def get_default_format():
if six.PY3:
return Text
elif os.linesep == '\n':
return Nop
else:
return SysNewLine
|
ViaSat/luigi
|
luigi/format.py
|
Python
|
apache-2.0
| 14,652 | 0.001092 |
import unittest
from card import Card
class CardTest(unittest.TestCase):
def test_create(self):
suit = 'Hearts'
rank = 'Ace'
card1 = Card(suit, rank)
self.assertEqual((suit, rank), card1.get_value())
def test___eq__(self):
card1 = Card('Spades', 'Queen')
card2 = Card('Spades', 'Queen')
self.assertEqual(card1, card2)
card3 = Card('Hearts', 'Queen')
self.assertNotEqual(card1, card3)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
munhyunsu/Hobby
|
2018F_SCSCAlgorithm/week2/card_tests.py
|
Python
|
gpl-3.0
| 528 | 0 |
import logging
from logging.handlers import RotatingFileHandler
from flask import Flask, render_template
from flask_login import LoginManager
from flask_restful import Api
from flask_wtf.csrf import CsrfProtect
from itsdangerous import URLSafeTimedSerializer
from sqlalchemy import create_engine
import AppConfig
from RestResources.Resources import PostsList, Posts
from services.Services import UserService
from views import Login, Common, Post, Admin
app = Flask(__name__)
CsrfProtect(app)
login_serializer = URLSafeTimedSerializer(AppConfig.APPSECRETKEY)
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
# set the secret key. keep this really secret:
app.secret_key = AppConfig.APPSECRETKEY
def register_mods():
app.register_blueprint(Common.mod)
app.register_blueprint(Login.mod)
app.register_blueprint(Post.mod)
app.register_blueprint(Admin.mod)
def create_db_engine():
return create_engine(AppConfig.CONNECTIONSTRING, pool_recycle=3600, echo=True)
def build_db_engine():
AppConfig.DBENGINE = create_db_engine()
def init_login():
login_manager = LoginManager()
login_manager.init_app(app)
AppConfig.LOGINMANAGER = login_manager
# Create user loader function
@login_manager.user_loader
def load_user(user_id):
return UserService().getAll().filter_by(id=user_id).first()
@login_manager.token_loader
def get_user_token(token):
max_age = app.config["REMEMBER_COOKIE_DURATION"].total_seconds()
#Decrypt the Security Token, data = [username, hashpass]
data = login_serializer.loads(token, max_age=max_age)
userService = UserService()
#Find the User
user = userService.getById(data[0])
#Check Password and return user or None
if user and userService.validate(user.username, user.password):
return user
return None
def init_logger():
handler = RotatingFileHandler('FlaskTest.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
def register_rest_api():
return Api(app)
def register_rest_resources():
api.add_resource(PostsList, '/api/posts')
api.add_resource(Posts, '/api/posts/<string:post_id>')
def set_app_configuration():
app.config['REMEMBER_COOKIE_DURATION'] = AppConfig.REMEMBER_COOKIE_DURATION
register_mods()
api = register_rest_api()
register_rest_resources()
build_db_engine()
init_login()
init_logger()
set_app_configuration()
app.run(AppConfig.APPHOST, AppConfig.APPPORT)
|
mandrive/FlaskTest
|
__init__.py
|
Python
|
mit
| 2,578 | 0.002327 |
import pytest
from bughouse.models import (
BLACK,
WHITE,
OVERALL_OVERALL,
)
from bughouse.ratings.engines.overall import (
rate_teams,
rate_players,
)
def test_rate_single_game(factories, models, elo_settings):
game = factories.GameFactory()
r1, r2 = rate_teams(game)
assert r1.rating == 1006
assert r2.rating == 994
def test_rate_multiple_games(factories, models):
team_a = factories.TeamFactory()
team_b = factories.TeamFactory()
rate_teams(factories.GameFactory(winning_team=team_a, losing_team=team_b))
rate_teams(factories.GameFactory(winning_team=team_a, losing_team=team_b))
assert team_a.get_latest_rating(OVERALL_OVERALL) == 1012
assert team_b.get_latest_rating(OVERALL_OVERALL) == 988
@pytest.mark.parametrize(
'losing_color',
(BLACK, WHITE),
)
def test_individual_ratings(factories, models, losing_color):
game = factories.GameFactory(losing_color=losing_color)
if game.losing_color == game.BLACK:
wtwr, wtbr, ltwr, ltbr = rate_players(game)
assert wtwr.player.get_latest_rating(OVERALL_OVERALL) == 1007
assert wtbr.player.get_latest_rating(OVERALL_OVERALL) == 1006
assert ltwr.player.get_latest_rating(OVERALL_OVERALL) == 994
assert ltbr.player.get_latest_rating(OVERALL_OVERALL) == 993
else:
wtwr, wtbr, ltwr, ltbr = rate_players(game)
assert wtwr.player.get_latest_rating(OVERALL_OVERALL) == 1006
assert wtbr.player.get_latest_rating(OVERALL_OVERALL) == 1007
assert ltwr.player.get_latest_rating(OVERALL_OVERALL) == 993
assert ltbr.player.get_latest_rating(OVERALL_OVERALL) == 994
def test_ratings_computation_is_idempotent(factories, models):
"""
Ensure that going back and re-computing old game ratings is an idempotent
process.
"""
team_a = factories.TeamFactory()
team_b = factories.TeamFactory()
factories.GameFactory(winning_team=team_a, losing_team=team_b)
game_b = factories.GameFactory(winning_team=team_a, losing_team=team_b)
factories.GameFactory(winning_team=team_a, losing_team=team_b)
first_rating_initial = team_a.ratings.get(
game=game_b,
).rating
rate_teams(game_b)
first_rating_recomputed = team_a.ratings.get(
game=game_b,
).rating
assert first_rating_initial == first_rating_recomputed
|
simpleenergy/bughouse-ranking
|
tests/bughouse/ratings/test_overall_overall_ratings.py
|
Python
|
mit
| 2,377 | 0 |
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from . import mod123
|
OCA/l10n-spain
|
l10n_es_aeat_mod123/models/__init__.py
|
Python
|
agpl-3.0
| 87 | 0 |
__problem_title__ = "Integer sided triangles for which the area/perimeter ratio is integral"
__problem_url___ = "https://projecteuler.net/problem=283"
__problem_description__ = "Consider the triangle with sides 6, 8 and 10. It can be seen that the " \
"perimeter and the area are both equal to 24. So the area/perimeter " \
"ratio is equal to 1. Consider also the triangle with sides 13, 14 and " \
"15. The perimeter equals 42 while the area is equal to 84. So for " \
"this triangle the area/perimeter ratio is equal to 2. Find the sum of " \
"the perimeters of all integer sided triangles for which the " \
"area/perimeter ratios are equal to positive integers not exceeding " \
"1000."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
|
jrichte43/ProjectEuler
|
Problem-0283/solutions.py
|
Python
|
gpl-3.0
| 1,243 | 0.008045 |
"""
#;+
#; NAME:
#; spec_guis
#; Version 1.0
#;
#; PURPOSE:
#; Module for Spectroscopy Guis with QT
#; These call pieces from spec_widgets
#; 12-Dec-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
# Import libraries
import numpy as np
import os, sys
import matplotlib.pyplot as plt
import glob
from PyQt4 import QtGui
from PyQt4 import QtCore
from matplotlib import mpl
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
# Matplotlib Figure object
from matplotlib.figure import Figure
from astropy.units import Quantity
from astropy import units as u
from linetools.lists.linelist import LineList
from xastropy.xutils import xdebug as xdb
from xastropy.xguis import spec_widgets as xspw
#class XSpecGui(QtGui.QMainWindow):
#class XAbsIDGui(QtGui.QMainWindow):
#class XVelPltGui(QtGui.QDialog):
# x_specplot replacement
class XSpecGui(QtGui.QMainWindow):
''' GUI to replace XIDL x_specplot
12-Dec-2014 by JXP v1.0
27-Mar-2015 by JXP v2.0 :: EW, column, better zooming + panning
'''
def __init__(self, spec, parent=None, zsys=None, norm=None):
QtGui.QMainWindow.__init__(self, parent)
'''
spec = Spectrum1D
'''
mpl.rcParams['agg.path.chunksize'] = 20000 # Needed to avoid carsh in large spectral files
# Build a widget combining several others
self.main_widget = QtGui.QWidget()
# Status bar
self.create_status_bar()
# Grab the pieces and tie together
self.pltline_widg = xspw.PlotLinesWidget(status=self.statusBar, init_z=zsys)
self.pltline_widg.setMaximumWidth(300)
# Hook the spec widget to Plot Line
self.spec_widg = xspw.ExamineSpecWidget(spec,status=self.statusBar,
llist=self.pltline_widg.llist,
zsys=zsys, norm=norm)
self.pltline_widg.spec_widg = self.spec_widg
self.spec_widg.canvas.mpl_connect('button_press_event', self.on_click)
extras = QtGui.QWidget()
extras.setMaximumWidth(130)
vbox = QtGui.QVBoxLayout()
qbtn = QtGui.QPushButton('Quit', self)
qbtn.clicked.connect(self.quit)
vbox.addWidget(self.pltline_widg)
vbox.addWidget(qbtn)
extras.setLayout(vbox)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.spec_widg)
hbox.addWidget(extras)
self.main_widget.setLayout(hbox)
# Point MainWindow
self.setCentralWidget(self.main_widget)
def create_status_bar(self):
self.status_text = QtGui.QLabel("XSpec")
self.statusBar().addWidget(self.status_text, 1)
def on_click(self,event):
if event.button == 3: # Set redshift
if self.pltline_widg.llist['List'] is None:
return
self.select_line_widg = xspw.SelectLineWidget(
self.pltline_widg.llist[self.pltline_widg.llist['List']]._data)
self.select_line_widg.exec_()
line = self.select_line_widg.line
if line.strip() == 'None':
return
#
quant = line.split('::')[1].lstrip()
spltw = quant.split(' ')
wrest = Quantity(float(spltw[0]), unit=spltw[1])
z = event.xdata/wrest.value - 1.
self.pltline_widg.llist['z'] = z
self.statusBar().showMessage('z = {:f}'.format(z))
self.pltline_widg.zbox.setText('{:.5f}'.format(self.pltline_widg.llist['z']))
# Draw
self.spec_widg.on_draw()
# Quit
def quit(self):
self.close()
# GUI for Identifying many (all) Abs Systems in a Spectrum
class XAbsIDGui(QtGui.QMainWindow):
''' GUI to analyze absorption systems in a spectrum
16-Dec-2014 by JXP
'''
def __init__(self, spec, parent=None, abssys_dir=None, absid_list=None, norm=True,
srch_id=True, id_dir='ID_LINES/', second_file=None):
QtGui.QMainWindow.__init__(self, parent)
'''
spec = Spectrum1D
second_file = Second spectrum file (e.g. COS + STIS)
'''
# Build a widget combining several others
self.main_widget = QtGui.QWidget()
# Status bar
self.create_status_bar()
# Initialize
if absid_list is None:
# Automatically search for ID files
if srch_id:
absid_list = glob.glob(id_dir+'*id.fits')
else:
absid_list = []
# Grab the pieces and tie together
self.abssys_widg = xspw.AbsSysWidget(absid_list)
self.pltline_widg = xspw.PlotLinesWidget(status=self.statusBar)
self.spec_widg = xspw.ExamineSpecWidget(spec,status=self.statusBar,
llist=self.pltline_widg.llist, norm=norm,
second_file=second_file,
abs_sys=self.abssys_widg.abs_sys)
self.pltline_widg.spec_widg = self.spec_widg
# Connections
self.spec_widg.canvas.mpl_connect('button_press_event', self.on_click)
self.spec_widg.canvas.mpl_connect('key_press_event', self.on_key)
self.abssys_widg.refine_button.clicked.connect(self.refine_abssys)
# Layout
anly_widg = QtGui.QWidget()
anly_widg.setMaximumWidth(300)
anly_widg.setMinimumWidth(150)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.pltline_widg)
vbox.addWidget(self.abssys_widg)
anly_widg.setLayout(vbox)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.spec_widg)
hbox.addWidget(anly_widg)
self.main_widget.setLayout(hbox)
# Point MainWindow
self.setCentralWidget(self.main_widget)
def create_status_bar(self):
self.status_text = QtGui.QLabel("XAbsID")
self.statusBar().addWidget(self.status_text, 1)
def on_key(self,event):
if event.key == 'v': # Stack plot
if self.spec_widg.vplt_flg == 1:
self.abssys_widg.add_fil(self.spec_widg.outfil)
self.abssys_widg.reload()
# Update line list
idx = self.pltline_widg.lists.index(self.spec_widg.llist['List'])
self.pltline_widg.llist_widget.setCurrentRow(idx)
elif event.key == '?': # Check for a match with known systems
wv_chosen = event.xdata
# Load grb
llist = xspw.set_llist('grb.lst')
# Loop through systems
for iabs_sys in self.abssys_widg.all_abssys:
z = iabs_sys.zabs
wvobs = np.array((1+z) * llist['grb.lst']['wrest'])
mtwv = np.where( np.abs( wvobs-wv_chosen ) < 0.2 )[0]
for imt in mtwv:
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
print('z={:g}, {:s}, f={:g}'.format(z,
llist['grb.lst']['name'][imt],
llist['grb.lst']['fval'][imt]))
if len(mtwv) == 0:
print('No match. wrest={:g} for z={:g}'.format(wv_chosen/(1+z), z))
def on_click(self,event):
if event.button == 3: # Set redshift
# Line list?
try:
self.pltline_widg.llist['List']
except KeyError:
print('Set a line list first!!')
return
#
if self.pltline_widg.llist[self.pltline_widg.llist['List']] == 'None':
return
self.select_line_widg = xspw.SelectLineWidget(
self.pltline_widg.llist[self.pltline_widg.llist['List']]._data)
self.select_line_widg.exec_()
line = self.select_line_widg.line
if line.strip() == 'None':
return
#
quant = line.split('::')[1].lstrip()
spltw = quant.split(' ')
wrest = Quantity(float(spltw[0]), unit=spltw[1])
z = event.xdata/wrest.value - 1.
self.pltline_widg.llist['z'] = z
self.statusBar().showMessage('z = {:f}'.format(z))
self.pltline_widg.zbox.setText(self.pltline_widg.zbox.z_frmt.format(
self.pltline_widg.llist['z']))
# Draw
self.spec_widg.on_draw()
def refine_abssys(self):
item = self.abssys_widg.abslist_widget.selectedItems()
if len(item) != 1:
self.statusBar().showMessage('AbsSys: Must select only 1 system!')
print('AbsSys: Must select only 1 system!')
txt = item[0].text()
ii = self.abssys_widg.all_items.index(txt)
iabs_sys = self.abssys_widg.all_abssys[ii]
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Launch
gui = XVelPltGui(self.spec_widg.spec, outfil=iabs_sys.absid_file,
abs_sys=iabs_sys, norm=self.spec_widg.norm)
gui.exec_()
# ##################################
# GUI for velocity plot
class XVelPltGui(QtGui.QDialog):
''' GUI to analyze absorption systems in a spectrum
24-Dec-2014 by JXP
'''
def __init__(self, ispec, z=None, parent=None, llist=None, norm=True,
vmnx=[-300., 300.]*u.km/u.s, abs_sys=None, outfil='dum_ID.fits',
sel_wv=None):
'''
spec = Filename or Spectrum1D
Norm: Bool (False)
Normalized spectrum?
abs_sys: AbsSystem
Absorption system class
sel_wv: Selected wavelength. Used to inspect a single, unknown line
'''
super(XVelPltGui, self).__init__(parent)
# Initialize
self.abs_sys = abs_sys
if not self.abs_sys is None:
self.z = self.abs_sys.zabs
else:
if z is None:
raise ValueError('XVelPlt: Need to set abs_sys or z!')
self.z = z
self.vmnx = vmnx
self.outfil = outfil
self.norm = norm
self.sel_wv = sel_wv
# Grab the pieces and tie together
self.vplt_widg = xspw.VelPlotWidget(ispec, abs_sys=self.abs_sys, llist=llist,
vmnx=self.vmnx, z=self.z, norm=self.norm)
self.pltline_widg = xspw.PlotLinesWidget(init_llist=self.vplt_widg.llist,
init_z=self.z)
#self.pltline_widg.spec_widg = self.vplt_widg
self.slines = xspw.SelectedLinesWidget(self.vplt_widg.llist[self.vplt_widg.llist['List']],
init_select=self.vplt_widg.llist['show_line'],
plot_widget=self.vplt_widg)
# Connections
self.pltline_widg.llist_widget.currentItemChanged.connect(self.on_llist_change)
self.connect(self.pltline_widg.zbox, QtCore.SIGNAL('editingFinished ()'), self.setz)
self.vplt_widg.canvas.mpl_connect('key_press_event', self.on_key)
# Outfil
wbtn = QtGui.QPushButton('Write', self)
wbtn.setAutoDefault(False)
wbtn.clicked.connect(self.write_out)
self.out_box = QtGui.QLineEdit()
self.out_box.setText(self.outfil)
self.connect(self.out_box, QtCore.SIGNAL('editingFinished ()'), self.set_outfil)
# Quit
buttons = QtGui.QWidget()
wqbtn = QtGui.QPushButton('Write+Quit', self)
wqbtn.setAutoDefault(False)
wqbtn.clicked.connect(self.write_quit)
qbtn = QtGui.QPushButton('Quit', self)
qbtn.setAutoDefault(False)
qbtn.clicked.connect(self.quit)
# Sizes
lines_widg = QtGui.QWidget()
lines_widg.setMaximumWidth(300)
lines_widg.setMinimumWidth(200)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.pltline_widg)
vbox.addWidget(self.slines)
vbox.addWidget(wbtn)
vbox.addWidget(self.out_box)
# Quit buttons
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(wqbtn)
hbox1.addWidget(qbtn)
buttons.setLayout(hbox1)
#
vbox.addWidget(buttons)
lines_widg.setLayout(vbox)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.vplt_widg)
hbox.addWidget(lines_widg)
self.setLayout(hbox)
# Initial draw
self.vplt_widg.on_draw()
# Change z
def on_key(self,event):
if event.key == 'z':
self.z = self.vplt_widg.z
self.pltline_widg.llist['z'] = self.z
self.pltline_widg.zbox.setText(self.pltline_widg.zbox.z_frmt.format(self.z))
if event.key == 'T': # Try another rest wavelength for input line
# Get line from User
self.select_line_widg = xspw.SelectLineWidget(
self.pltline_widg.llist[self.pltline_widg.llist['List']]._data)
self.select_line_widg.exec_()
line = self.select_line_widg.line
quant = line.split('::')[1].lstrip()
spltw = quant.split(' ')
wrest = Quantity(float(spltw[0]), unit=spltw[1])
# Set redshift
self.z = self.sel_wv / wrest - 1.
print('Setting z = {:g}'.format(self.z))
self.pltline_widg.llist['z'] = self.z
self.pltline_widg.zbox.setText(self.pltline_widg.zbox.z_frmt.format(self.z))
self.vplt_widg.z = self.pltline_widg.llist['z']
# Reset
self.vplt_widg.init_lines()
self.vplt_widg.on_draw()
# Set z from pltline_widg
def setz(self):
self.vplt_widg.abs_sys.zabs = self.pltline_widg.llist['z']
self.vplt_widg.z = self.pltline_widg.llist['z']
self.z = self.pltline_widg.llist['z']
self.vplt_widg.on_draw()
# Change list of lines to choose from
def on_llist_change(self):
llist = self.pltline_widg.llist
all_lines = list( llist[llist['List']]['wrest'] )
# Set selected
abs_sys = self.vplt_widg.abs_sys
wrest = abs_sys.lines.keys()
wrest.sort()
select = []
for iwrest in wrest:
try:
select.append(all_lines.index(iwrest))
except ValueError:
pass
select.sort()
# GUIs
self.vplt_widg.llist['List'] = llist['List']
self.vplt_widg.llist['show_line'] = select
self.vplt_widg.idx_line = 0
self.slines.selected = select
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.slines.on_list_change(llist[llist['List']])
# Write
def set_outfil(self):
self.outfil = str(self.out_box.text())
print('XVelPlot: Will write to {:s}'.format(self.outfil))
# Write
def write_out(self):
self.vplt_widg.abs_sys.absid_file = self.outfil
self.vplt_widg.abs_sys.write_absid_file()
# Write + Quit
def write_quit(self):
self.write_out()
self.flg_quit = 1
self.abs_sys = self.vplt_widg.abs_sys
self.done(1)
# Write + Quit
def quit(self):
#self.abs_sys = self.vplt_widg.abs_sys # Have to write to pass back
self.flg_quit = 0
self.done(1)
# x_specplot replacement
class XAODMGui(QtGui.QDialog):
''' GUI to show AODM plots
28-Dec-2014 by JXP
'''
def __init__(self, spec, z, wrest, vmnx=[-300., 300.]*u.km/u.s, parent=None, norm=True):
super(XAODMGui, self).__init__(parent)
'''
spec = Spectrum1D
'''
# Grab the pieces and tie together
self.aodm_widg = xspw.AODMWidget(spec,z,wrest,vmnx=vmnx,norm=norm)
self.aodm_widg.canvas.mpl_connect('key_press_event', self.on_key)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.aodm_widg)
self.setLayout(vbox)
self.aodm_widg.on_draw()
def on_key(self,event):
if event.key == 'q': # Quit
self.done(1)
# Script to run XSpec from the command line
def run_xspec(*args, **kwargs):
'''
Runs the XSpecGui
Command line
or from Python
Examples:
1. python ~/xastropy/xastropy/xguis/spec_guis.py 1
2. spec_guis.run_xspec(filename)
3. spec_guis.run_xspec(spec1d)
'''
import argparse
from specutils import Spectrum1D
from xastropy.spec.utils import XSpectrum1D
parser = argparse.ArgumentParser(description='Parse for XSpec')
parser.add_argument("flag", type=int, help="GUI flag (ignored)")
parser.add_argument("file", type=str, help="Spectral file")
parser.add_argument("-zsys", type=float, help="System Redshift")
parser.add_argument("--un_norm", help="Spectrum is NOT normalized",
action="store_true")
if len(args) == 0:
pargs = parser.parse_args()
else: # better know what you are doing!
#xdb.set_trace()
if type(args[0]) in [XSpectrum1D, Spectrum1D]:
app = QtGui.QApplication(sys.argv)
gui = XSpecGui(args[0], **kwargs)
gui.show()
app.exec_()
return
else: # String parsing
largs = ['1'] + [iargs for iargs in args]
pargs = parser.parse_args(largs)
# Normalized?
norm=True
if pargs.un_norm:
norm=False
# Second spectral file?
try:
zsys = pargs.zsys
except AttributeError:
zsys=None
app = QtGui.QApplication(sys.argv)
gui = XSpecGui(pargs.file, zsys=zsys, norm=norm)
gui.show()
app.exec_()
# Script to run XAbsID from the command line
def run_xabsid():
import argparse
parser = argparse.ArgumentParser(description='Script for XSpec')
parser.add_argument("flag", type=int, help="GUI flag (ignored)")
parser.add_argument("file", type=str, help="Spectral file")
parser.add_argument("--un_norm", help="Spectrum is NOT normalized",
action="store_true")
parser.add_argument("-id_dir", type=str,
help="Directory for ID files (ID_LINES is default)")
parser.add_argument("-secondfile", type=str, help="Second spectral file")
args = parser.parse_args()
# Normalized?
norm=True
if args.un_norm:
norm=False
# Second spectral file?
second_file=None
if args.secondfile:
second_file=args.secondfile
# Launch
app = QtGui.QApplication(sys.argv)
gui = XAbsIDGui(args.file, norm=norm, second_file=second_file)
gui.show()
app.exec_()
# ################
if __name__ == "__main__":
import sys
from linetools.spectra import io as lsi
from xastropy.igm import abs_sys as xiabs
if len(sys.argv) == 1: # TESTING
flg_fig = 0
#flg_fig += 2**0 # XSpec
#flg_fig += 2**1 # XAbsID
#flg_fig += 2**2 # XVelPlt Gui
flg_fig += 2**3 # XVelPlt Gui without ID list; Also tests select wave
#flg_fig += 2**4 # XAODM Gui
# Read spectrum
spec_fil = '/u/xavier/Keck/HIRES/RedData/PH957/PH957_f.fits'
spec = lsi.readspec(spec_fil)
# XSpec
if (flg_fig % 2) == 1:
app = QtGui.QApplication(sys.argv)
gui = XSpecGui(spec)
gui.show()
app.exec_()
# XAbsID
if (flg_fig % 2**2) >= 2**1:
#spec_fil = '/u/xavier/PROGETTI/LLSZ3/data/normalize/SDSSJ1004+0018_nF.fits'
#spec = xspec.readwrite.readspec(spec_fil)
#norm = True
spec_fil = '/Users/xavier/Dropbox/CASBAH/jxp_analysis/FBQS0751+2919/fbqs0751_nov2014bin.fits'
norm = False
absid_fil = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ1004+0018_z2.746_id.fits'
absid_fil2 = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ2348-1041_z2.997_id.fits'
app = QtGui.QApplication(sys.argv)
gui = XAbsIDGui(spec_fil,norm=norm) #,absid_list=[absid_fil, absid_fil2])
gui.show()
app.exec_()
# XVelPlt with existing AbsID file
if (flg_fig % 2**3) >= 2**2:
spec_fil = '/u/xavier/PROGETTI/LLSZ3/data/normalize/SDSSJ1004+0018_nF.fits'
#spec = xspec.readwrite.readspec(spec_fil)
absid_fil = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ1004+0018_z2.746_id.fits'
abs_sys = xiabs.abssys_utils.Generic_System(None)
abs_sys.parse_absid_file(absid_fil)
#
app = QtGui.QApplication(sys.argv)
app.setApplicationName('XVelPlt')
gui = XVelPltGui(spec_fil,abs_sys=abs_sys,
outfil='/Users/xavier/Desktop/tmp.fits')
gui.show()
sys.exit(app.exec_())
# XVelPlt without existing AbsID file
if (flg_fig % 2**4) >= 2**3:
#spec_fil = '/u/xavier/PROGETTI/LLSZ3/data/normalize/SDSSJ1004+0018_nF.fits'
#z=2.746
#outfil='/Users/xavier/Desktop/J1004+0018_z2.746_id.fits'
spec_fil = '/Users/xavier/Dropbox/CASBAH/jxp_analysis/FBQS0751+2919/fbqs0751_nov2014bin.fits'
z=0.
outfil='/Users/xavier/Desktop/tmp.fits'
#
app = QtGui.QApplication(sys.argv)
app.setApplicationName('XVelPlt')
gui = XVelPltGui(spec_fil, z=z, outfil=outfil,norm=False, sel_wv=1526.80)
gui.show()
sys.exit(app.exec_())
# AODM GUI
if (flg_fig % 2**5) >= 2**4:
#spec_fil = '/Users/xavier/PROGETTI/LLSZ3/data/normalize/UM184_nF.fits'
#z=2.96916
#lines = [1548.195, 1550.770]
norm = True
spec_fil = '/Users/xavier/Dropbox/CASBAH/jxp_analysis/FBQS0751+2919/fbqs0751_nov2014bin.fits'
z=0.4391
lines = [1215.6701, 1025.7223] * u.AA
norm = False
# Launch
spec = lsi.readspec(spec_fil)
app = QtGui.QApplication(sys.argv)
app.setApplicationName('AODM')
main = XAODMGui(spec, z, lines, norm=norm)
main.show()
sys.exit(app.exec_())
else: # RUN A GUI
id_gui = int(sys.argv[1]) # 1 = XSpec, 2=XAbsId
if id_gui == 1:
run_xspec()
elif id_gui == 2:
run_xabsid()
|
profxj/old_xastropy
|
xastropy/xguis/spec_guis.py
|
Python
|
bsd-3-clause
| 22,801 | 0.006316 |
# -*- coding: utf-8 -*-
"""
example1-simpleloop
~~~~~~~~~~~~~~~~~~~
This example shows how to use the loop block backend and frontend.
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
# From lantz, you import a helper function.
from lantz.ui.app import start_gui_app
# and the loop block and its user interface
from lantz.ui.blocks import Loop, LoopUi
# the drivers you need (In this case just simulated dummy drivers).
from lantz.drivers.examples.dummydrivers import DummyOsci
# Drivers are instantiated in the usual way.
osci = DummyOsci('COM2')
# You create a function that will be called by the loop
# It requires three parameters
# counter - the iteration number
# iterations - total number of iterations
# overrun - a boolean indicating if the time required for the operation
# is longer than the interval.
def measure(counter, iterations, overrun):
print(counter, iterations, overrun)
data = osci.measure()
print(data)
# You instantiate the loop
app = Loop()
# and assign the function to the body of the loop
app.body = measure
# Finally you start the program
start_gui_app(app, LoopUi)
# This contains a very complete GUI for a loop you can easily create a customized version!
|
varses/awsch
|
examples/using_blocks/example1-simpleloop.py
|
Python
|
bsd-3-clause
| 1,304 | 0.003067 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ImageTranslationTranslation'
db.create_table('cmsplugin_filer_image_translated_imagetranslation_translation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(max_length=256, blank=True)),
('alt_text', self.gf('django.db.models.fields.CharField')(max_length=512, blank=True)),
('caption', self.gf('django.db.models.fields.CharField')(max_length=512, blank=True)),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('master', self.gf('django.db.models.fields.related.ForeignKey')(related_name='translations', null=True, to=orm['cmsplugin_filer_image_translated.ImageTranslation'])),
))
db.send_create_signal('cmsplugin_filer_image_translated', ['ImageTranslationTranslation'])
# Adding unique constraint on 'ImageTranslationTranslation', fields ['language_code', 'master']
db.create_unique('cmsplugin_filer_image_translated_imagetranslation_translation', ['language_code', 'master_id'])
# Adding model 'ImageTranslation'
db.create_table('cmsplugin_filer_image_translated_imagetranslation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('image', self.gf('django.db.models.fields.related.OneToOneField')(related_name='translation', unique=True, to=orm['filer.Image'])),
))
db.send_create_signal('cmsplugin_filer_image_translated', ['ImageTranslation'])
def backwards(self, orm):
# Removing unique constraint on 'ImageTranslationTranslation', fields ['language_code', 'master']
db.delete_unique('cmsplugin_filer_image_translated_imagetranslation_translation', ['language_code', 'master_id'])
# Deleting model 'ImageTranslationTranslation'
db.delete_table('cmsplugin_filer_image_translated_imagetranslation_translation')
# Deleting model 'ImageTranslation'
db.delete_table('cmsplugin_filer_image_translated_imagetranslation')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cmsplugin_filer_image_translated.imagetranslation': {
'Meta': {'object_name': 'ImageTranslation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'translation'", 'unique': 'True', 'to': "orm['filer.Image']"})
},
'cmsplugin_filer_image_translated.imagetranslationrenamed': {
'Meta': {'object_name': 'ImageTranslationRenamed'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']"}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'trans_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'trans_caption': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'trans_description': ('django.db.models.fields.TextField', [], {'max_length': '256', 'blank': 'True'}),
'trans_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
},
'cmsplugin_filer_image_translated.imagetranslationtranslation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'ImageTranslationTranslation', 'db_table': "'cmsplugin_filer_image_translated_imagetranslation_translation'"},
'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '256', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': "orm['cmsplugin_filer_image_translated.ImageTranslation']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['cmsplugin_filer_image_translated']
|
bitmazk/cmsplugin-filer-image-translated
|
cmsplugin_filer_image_translated/migrations/0004_auto__add_imagetranslationtranslation__add_unique_imagetranslationtran.py
|
Python
|
mit
| 12,501 | 0.007919 |
#!/usr/bin/env python
'''
create ardupilot terrain database files
'''
from MAVProxy.modules.mavproxy_map import srtm
import math, struct, os, sys
import crc16, time, struct
# MAVLink sends 4x4 grids
TERRAIN_GRID_MAVLINK_SIZE = 4
# a 2k grid_block on disk contains 8x7 of the mavlink grids. Each
# grid block overlaps by one with its neighbour. This ensures that
# the altitude at any point can be calculated from a single grid
# block
TERRAIN_GRID_BLOCK_MUL_X = 7
TERRAIN_GRID_BLOCK_MUL_Y = 8
# this is the spacing between 32x28 grid blocks, in grid_spacing units
TERRAIN_GRID_BLOCK_SPACING_X = ((TERRAIN_GRID_BLOCK_MUL_X-1)*TERRAIN_GRID_MAVLINK_SIZE)
TERRAIN_GRID_BLOCK_SPACING_Y = ((TERRAIN_GRID_BLOCK_MUL_Y-1)*TERRAIN_GRID_MAVLINK_SIZE)
# giving a total grid size of a disk grid_block of 32x28
TERRAIN_GRID_BLOCK_SIZE_X = (TERRAIN_GRID_MAVLINK_SIZE*TERRAIN_GRID_BLOCK_MUL_X)
TERRAIN_GRID_BLOCK_SIZE_Y = (TERRAIN_GRID_MAVLINK_SIZE*TERRAIN_GRID_BLOCK_MUL_Y)
# format of grid on disk
TERRAIN_GRID_FORMAT_VERSION = 1
IO_BLOCK_SIZE = 2048
GRID_SPACING = 100
def to_float32(f):
'''emulate single precision float'''
return struct.unpack('f', struct.pack('f',f))[0]
LOCATION_SCALING_FACTOR = to_float32(0.011131884502145034)
LOCATION_SCALING_FACTOR_INV = to_float32(89.83204953368922)
def longitude_scale(lat):
'''get longitude scale factor'''
scale = to_float32(math.cos(to_float32(math.radians(lat))))
return max(scale, 0.01)
def get_distance_NE_e7(lat1, lon1, lat2, lon2):
'''get distance tuple between two positions in 1e7 format'''
return ((lat2 - lat1) * LOCATION_SCALING_FACTOR, (lon2 - lon1) * LOCATION_SCALING_FACTOR * longitude_scale(lat1*1.0e-7))
def add_offset(lat_e7, lon_e7, ofs_north, ofs_east):
'''add offset in meters to a position'''
dlat = int(float(ofs_north) * LOCATION_SCALING_FACTOR_INV)
dlng = int((float(ofs_east) * LOCATION_SCALING_FACTOR_INV) / longitude_scale(lat_e7*1.0e-7))
return (int(lat_e7+dlat), int(lon_e7+dlng))
def east_blocks(lat_e7, lon_e7):
'''work out how many blocks per stride on disk'''
lat2_e7 = lat_e7
lon2_e7 = lon_e7 + 10*1000*1000
# shift another two blocks east to ensure room is available
lat2_e7, lon2_e7 = add_offset(lat2_e7, lon2_e7, 0, 2*GRID_SPACING*TERRAIN_GRID_BLOCK_SIZE_Y)
offset = get_distance_NE_e7(lat_e7, lon_e7, lat2_e7, lon2_e7)
return int(offset[1] / (GRID_SPACING*TERRAIN_GRID_BLOCK_SPACING_Y))
def pos_from_file_offset(lat_degrees, lon_degrees, file_offset):
'''return a lat/lon in 1e7 format given a file offset'''
ref_lat = int(lat_degrees*10*1000*1000)
ref_lon = int(lon_degrees*10*1000*1000)
stride = east_blocks(ref_lat, ref_lon)
blocks = file_offset // IO_BLOCK_SIZE
grid_idx_x = blocks // stride
grid_idx_y = blocks % stride
idx_x = grid_idx_x * TERRAIN_GRID_BLOCK_SPACING_X
idx_y = grid_idx_y * TERRAIN_GRID_BLOCK_SPACING_Y
offset = (idx_x * GRID_SPACING, idx_y * GRID_SPACING)
(lat_e7, lon_e7) = add_offset(ref_lat, ref_lon, offset[0], offset[1])
offset = get_distance_NE_e7(ref_lat, ref_lon, lat_e7, lon_e7)
grid_idx_x = int(idx_x / TERRAIN_GRID_BLOCK_SPACING_X)
grid_idx_y = int(idx_y / TERRAIN_GRID_BLOCK_SPACING_Y)
(lat_e7, lon_e7) = add_offset(ref_lat, ref_lon,
grid_idx_x * TERRAIN_GRID_BLOCK_SPACING_X * float(GRID_SPACING),
grid_idx_y * TERRAIN_GRID_BLOCK_SPACING_Y * float(GRID_SPACING))
return (lat_e7, lon_e7)
class GridBlock(object):
def __init__(self, lat_int, lon_int, lat, lon):
'''
a grid block is a structure in a local file containing height
information. Each grid block is 2048 bytes in size, to keep file IO to
block oriented SD cards efficient
'''
# crc of whole block, taken with crc=0
self.crc = 0
# format version number
self.version = TERRAIN_GRID_FORMAT_VERSION
# grid spacing in meters
self.spacing = GRID_SPACING
# heights in meters over a 32*28 grid
self.height = []
for x in range(TERRAIN_GRID_BLOCK_SIZE_X):
self.height.append([0]*TERRAIN_GRID_BLOCK_SIZE_Y)
# bitmap of 4x4 grids filled in from GCS (56 bits are used)
self.bitmap = (1<<56)-1
lat_e7 = int(lat * 1.0e7)
lon_e7 = int(lon * 1.0e7)
# grids start on integer degrees. This makes storing terrain data on
# the SD card a bit easier. Note that this relies on the python floor
# behaviour with integer division
self.lat_degrees = lat_int
self.lon_degrees = lon_int
# create reference position for this rounded degree position
ref_lat = self.lat_degrees*10*1000*1000
ref_lon = self.lon_degrees*10*1000*1000
# find offset from reference
offset = get_distance_NE_e7(ref_lat, ref_lon, lat_e7, lon_e7)
offset = (round(offset[0]), round(offset[1]))
# get indices in terms of grid_spacing elements
idx_x = int(offset[0] / GRID_SPACING)
idx_y = int(offset[1] / GRID_SPACING)
# find indexes into 32*28 grids for this degree reference. Note
# the use of TERRAIN_GRID_BLOCK_SPACING_{X,Y} which gives a one square
# overlap between grids
self.grid_idx_x = idx_x // TERRAIN_GRID_BLOCK_SPACING_X
self.grid_idx_y = idx_y // TERRAIN_GRID_BLOCK_SPACING_Y
# calculate lat/lon of SW corner of 32*28 grid_block
(ref_lat, ref_lon) = add_offset(ref_lat, ref_lon,
self.grid_idx_x * TERRAIN_GRID_BLOCK_SPACING_X * float(GRID_SPACING),
self.grid_idx_y * TERRAIN_GRID_BLOCK_SPACING_Y * float(GRID_SPACING))
self.lat = ref_lat
self.lon = ref_lon
def fill(self, gx, gy, altitude):
'''fill a square'''
self.height[gx][gy] = int(altitude)
def blocknum(self):
'''find IO block number'''
stride = east_blocks(self.lat_degrees*1e7, self.lon_degrees*1e7)
return stride * self.grid_idx_x + self.grid_idx_y
class DataFile(object):
def __init__(self, lat, lon):
if lat < 0:
NS = 'S'
else:
NS = 'N'
if lon < 0:
EW = 'W'
else:
EW = 'E'
name = "terrain/%c%02u%c%03u.DAT" % (NS, min(abs(int(lat)), 99),
EW, min(abs(int(lon)), 999))
try:
os.mkdir("terrain")
except Exception:
pass
if not os.path.exists(name):
self.fh = open(name, 'w+b')
else:
self.fh = open(name, 'r+b')
def seek_offset(self, block):
'''seek to right offset'''
# work out how many longitude blocks there are at this latitude
file_offset = block.blocknum() * IO_BLOCK_SIZE
self.fh.seek(file_offset)
def pack(self, block):
'''pack into a block'''
buf = bytes()
buf += struct.pack("<QiiHHH", block.bitmap, block.lat, block.lon, block.crc, block.version, block.spacing)
for gx in range(TERRAIN_GRID_BLOCK_SIZE_X):
buf += struct.pack("<%uh" % TERRAIN_GRID_BLOCK_SIZE_Y, *block.height[gx])
buf += struct.pack("<HHhb", block.grid_idx_x, block.grid_idx_y, block.lon_degrees, block.lat_degrees)
return buf
def write(self, block):
'''write a grid block'''
self.seek_offset(block)
block.crc = 0
buf = self.pack(block)
block.crc = crc16.crc16xmodem(buf)
buf = self.pack(block)
self.fh.write(buf)
def check_filled(self, block):
'''read a grid block and check if already filled'''
self.seek_offset(block)
buf = self.fh.read(IO_BLOCK_SIZE)
if len(buf) != IO_BLOCK_SIZE:
return False
(bitmap, lat, lon, crc, version, spacing) = struct.unpack("<QiiHHH", buf[:22])
if (version != TERRAIN_GRID_FORMAT_VERSION or
abs(lat - block.lat)>2 or
abs(lon - block.lon)>2 or
spacing != GRID_SPACING or
bitmap != (1<<56)-1):
return False
buf = buf[:16] + struct.pack("<H", 0) + buf[18:]
crc2 = crc16.crc16xmodem(buf[:1821])
if crc2 != crc:
return False
return True
def create_degree(lat, lon):
'''create data file for one degree lat/lon'''
lat_int = int(math.floor(lat))
lon_int = int(math.floor((lon)))
tiles = {}
dfile = DataFile(lat_int, lon_int)
print("Creating for %d %d" % (lat_int, lon_int))
total_blocks = east_blocks(lat_int*1e7, lon_int*1e7) * TERRAIN_GRID_BLOCK_SIZE_Y
for blocknum in range(total_blocks):
(lat_e7, lon_e7) = pos_from_file_offset(lat_int, lon_int, blocknum * IO_BLOCK_SIZE)
lat = lat_e7 * 1.0e-7
lon = lon_e7 * 1.0e-7
grid = GridBlock(lat_int, lon_int, lat, lon)
if grid.blocknum() != blocknum:
continue
if not args.force and dfile.check_filled(grid):
continue
for gx in range(TERRAIN_GRID_BLOCK_SIZE_X):
for gy in range(TERRAIN_GRID_BLOCK_SIZE_Y):
lat_e7, lon_e7 = add_offset(lat*1.0e7, lon*1.0e7, gx*GRID_SPACING, gy*GRID_SPACING)
lat2_int = int(math.floor(lat_e7*1.0e-7))
lon2_int = int(math.floor(lon_e7*1.0e-7))
tile_idx = (lat2_int, lon2_int)
while not tile_idx in tiles:
tile = downloader.getTile(lat2_int, lon2_int)
waited = False
if tile == 0:
print("waiting on download of %d,%d" % (lat2_int, lon2_int))
time.sleep(0.3)
waited = True
continue
if waited:
print("downloaded %d,%d" % (lat2_int, lon2_int))
tiles[tile_idx] = tile
altitude = tiles[tile_idx].getAltitudeFromLatLon(lat_e7*1.0e-7, lon_e7*1.0e-7)
grid.fill(gx, gy, altitude)
dfile.write(grid)
from argparse import ArgumentParser
parser = ArgumentParser(description='terrain data creator')
parser.add_argument("lat", type=float, default=-35.363261)
parser.add_argument("lon", type=float, default=149.165230)
parser.add_argument("--force", action='store_true', help="overwrite existing full blocks")
parser.add_argument("--radius", type=int, default=100, help="radius in km")
parser.add_argument("--debug", action='store_true', default=False)
parser.add_argument("--spacing", type=int, default=100, help="grid spacing in meters")
args = parser.parse_args()
downloader = srtm.SRTMDownloader(debug=args.debug)
downloader.loadFileList()
GRID_SPACING = args.spacing
done = set()
for dx in range(-args.radius, args.radius):
for dy in range(-args.radius, args.radius):
(lat2,lon2) = add_offset(args.lat*1e7, args.lon*1e7, dx*1000.0, dy*1000.0)
lat_int = int(round(lat2 * 1.0e-7))
lon_int = int(round(lon2 * 1.0e-7))
tag = (lat_int, lon_int)
if tag in done:
continue
done.add(tag)
create_degree(lat_int, lon_int)
create_degree(args.lat, args.lon)
|
matternet/ardupilot
|
libraries/AP_Terrain/tools/create_terrain.py
|
Python
|
gpl-3.0
| 11,310 | 0.004156 |
import os
class Program:
socketColorBoTe = "255 255 255 255"
socketColorBa = "77 87 152 255"
progColorRareBoTe = "0 0 0 255"
progColorRareBa = "240 220 180 255"
progColorElseBoTe = "77 87 152 255"
progColorElseBa = "0 0 0 255"
def createFile(self):
filepath = os.path.join('~/dest', "filterZZ.filter")
if not os.path.exists('~/dest'):
os.makedirs('~/dest')
setattr(self, 'f', open(filepath, "w"))
def addNewLine(self):
self.f.write("\n\n")
|
sundrome21/FilterZZ
|
program.py
|
Python
|
mit
| 519 | 0.001927 |
# Generated by Django 2.2.12 on 2020-05-09 06:28
from django.db import migrations
# Can't use fixtures because load_fixtures method is janky with django-tenant-schemas
def load_initial_data(apps, schema_editor):
Grade = apps.get_model('courses', 'Grade')
# add some initial data if none has been created yet
if not Grade.objects.exists():
Grade.objects.create(
name="8",
value=8
)
Grade.objects.create(
name="9",
value=9
)
Grade.objects.create(
name="10",
value=10
)
Grade.objects.create(
name="11",
value=11
)
Grade.objects.create(
name="12",
value=12
)
class Migration(migrations.Migration):
dependencies = [
('courses', '0015_auto_20200508_1957'),
]
operations = [
migrations.RunPython(load_initial_data),
]
|
timberline-secondary/hackerspace
|
src/courses/migrations/0016_grades_initialdata.py
|
Python
|
gpl-3.0
| 963 | 0.001038 |
"""A Python module for interacting and consuming responses from Slack."""
import logging
import slack.errors as e
from slack.web.internal_utils import _next_cursor_is_present
class AsyncSlackResponse:
"""An iterable container of response data.
Attributes:
data (dict): The json-encoded content of the response. Along
with the headers and status code information.
Methods:
validate: Check if the response from Slack was successful.
get: Retrieves any key from the response data.
next: Retrieves the next portion of results,
if 'next_cursor' is present.
Example:
```python
import os
import slack
client = slack.AsyncWebClient(token=os.environ['SLACK_API_TOKEN'])
response1 = await client.auth_revoke(test='true')
assert not response1['revoked']
response2 = await client.auth_test()
assert response2.get('ok', False)
users = []
async for page in await client.users_list(limit=2):
users = users + page['members']
```
Note:
Some responses return collections of information
like channel and user lists. If they do it's likely
that you'll only receive a portion of results. This
object allows you to iterate over the response which
makes subsequent API requests until your code hits
'break' or there are no more results to be found.
Any attributes or methods prefixed with _underscores are
intended to be "private" internal use only. They may be changed or
removed at anytime.
"""
def __init__(
self,
*,
client, # AsyncWebClient
http_verb: str,
api_url: str,
req_args: dict,
data: dict,
headers: dict,
status_code: int,
):
self.http_verb = http_verb
self.api_url = api_url
self.req_args = req_args
self.data = data
self.headers = headers
self.status_code = status_code
self._initial_data = data
self._iteration = None # for __iter__ & __next__
self._client = client
self._logger = logging.getLogger(__name__)
def __str__(self):
"""Return the Response data if object is converted to a string."""
if isinstance(self.data, bytes):
raise ValueError(
"As the response.data is binary data, this operation is unsupported"
)
return f"{self.data}"
def __contains__(self, key: str) -> bool:
return self.get(key) is not None
def __getitem__(self, key):
"""Retrieves any key from the data store.
Note:
This is implemented so users can reference the
SlackResponse object like a dictionary.
e.g. response["ok"]
Returns:
The value from data or None.
"""
if isinstance(self.data, bytes):
raise ValueError(
"As the response.data is binary data, this operation is unsupported"
)
if self.data is None:
raise ValueError(
"As the response.data is empty, this operation is unsupported"
)
return self.data.get(key, None)
def __aiter__(self):
"""Enables the ability to iterate over the response.
It's required async-for the iterator protocol.
Note:
This enables Slack cursor-based pagination.
Returns:
(AsyncSlackResponse) self
"""
self._iteration = 0
self.data = self._initial_data
return self
async def __anext__(self):
"""Retrieves the next portion of results, if 'next_cursor' is present.
Note:
Some responses return collections of information
like channel and user lists. If they do it's likely
that you'll only receive a portion of results. This
method allows you to iterate over the response until
your code hits 'break' or there are no more results
to be found.
Returns:
(AsyncSlackResponse) self
With the new response data now attached to this object.
Raises:
SlackApiError: If the request to the Slack API failed.
StopAsyncIteration: If 'next_cursor' is not present or empty.
"""
self._iteration += 1
if self._iteration == 1:
return self
if _next_cursor_is_present(self.data): # skipcq: PYL-R1705
params = self.req_args.get("params", {})
if params is None:
params = {}
params.update({"cursor": self.data["response_metadata"]["next_cursor"]})
self.req_args.update({"params": params})
response = await self._client._request( # skipcq: PYL-W0212
http_verb=self.http_verb,
api_url=self.api_url,
req_args=self.req_args,
)
self.data = response["data"]
self.headers = response["headers"]
self.status_code = response["status_code"]
return self.validate()
else:
raise StopAsyncIteration
def get(self, key, default=None):
"""Retrieves any key from the response data.
Note:
This is implemented so users can reference the
SlackResponse object like a dictionary.
e.g. response.get("ok", False)
Returns:
The value from data or the specified default.
"""
if isinstance(self.data, bytes):
raise ValueError(
"As the response.data is binary data, this operation is unsupported"
)
if self.data is None:
return None
return self.data.get(key, default)
def validate(self):
"""Check if the response from Slack was successful.
Returns:
(AsyncSlackResponse)
This method returns it's own object. e.g. 'self'
Raises:
SlackApiError: The request to the Slack API failed.
"""
if self.status_code == 200 and self.data and self.data.get("ok", False):
return self
msg = "The request to the Slack API failed."
raise e.SlackApiError(message=msg, response=self)
|
slackhq/python-slackclient
|
slack/web/async_slack_response.py
|
Python
|
mit
| 6,347 | 0.000788 |
"""
Dynamic DNS updates
===================
Ensure a DNS record is present or absent utilizing RFC 2136
type dynamic updates.
:depends: - `dnspython <http://www.dnspython.org/>`_
.. note::
The ``dnspython`` module is required when managing DDNS using a TSIG key.
If you are not using a TSIG key, DDNS is allowed by ACLs based on IP
address and the ``dnspython`` module is not required.
Example:
.. code-block:: yaml
webserver:
ddns.present:
- zone: example.com
- ttl: 60
- data: 111.222.333.444
- nameserver: 123.234.345.456
- keyfile: /srv/salt/dnspy_tsig_key.txt
"""
def __virtual__():
if "ddns.update" in __salt__:
return "ddns"
return (False, "ddns module could not be loaded")
def present(name, zone, ttl, data, rdtype="A", **kwargs):
"""
Ensures that the named DNS record is present with the given ttl.
name
The host portion of the DNS record, e.g., 'webserver'. Name and zone
are concatenated when the entry is created unless name includes a
trailing dot, so make sure that information is not duplicated in these
two arguments.
zone
The zone to check/update
ttl
TTL for the record
data
Data for the DNS record. E.g., the IP address for an A record.
rdtype
DNS resource type. Default 'A'.
``**kwargs``
Additional arguments the ddns.update function may need (e.g.
nameserver, keyfile, keyname). Note that the nsupdate key file can’t
be reused by this function, the keyfile and other arguments must
follow the `dnspython <http://www.dnspython.org/>`_ spec.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
ret["result"] = None
ret["comment"] = '{} record "{}" will be updated'.format(rdtype, name)
return ret
status = __salt__["ddns.update"](zone, name, ttl, rdtype, data, **kwargs)
if status is None:
ret["result"] = True
ret["comment"] = '{} record "{}" already present with ttl of {}'.format(
rdtype, name, ttl
)
elif status:
ret["result"] = True
ret["comment"] = 'Updated {} record for "{}"'.format(rdtype, name)
ret["changes"] = {
"name": name,
"zone": zone,
"ttl": ttl,
"rdtype": rdtype,
"data": data,
}
else:
ret["result"] = False
ret["comment"] = 'Failed to create or update {} record for "{}"'.format(
rdtype, name
)
return ret
def absent(name, zone, data=None, rdtype=None, **kwargs):
"""
Ensures that the named DNS record is absent.
name
The host portion of the DNS record, e.g., 'webserver'. Name and zone
are concatenated when the entry is created unless name includes a
trailing dot, so make sure that information is not duplicated in these
two arguments.
zone
The zone to check
data
Data for the DNS record. E.g., the IP address for an A record. If omitted,
all records matching name (and rdtype, if provided) will be purged.
rdtype
DNS resource type. If omitted, all types will be purged.
``**kwargs``
Additional arguments the ddns.update function may need (e.g.
nameserver, keyfile, keyname). Note that the nsupdate key file can’t
be reused by this function, the keyfile and other arguments must
follow the `dnspython <http://www.dnspython.org/>`_ spec.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
ret["result"] = None
ret["comment"] = '{} record "{}" will be deleted'.format(rdtype, name)
return ret
status = __salt__["ddns.delete"](zone, name, rdtype, data, **kwargs)
if status is None:
ret["result"] = True
ret["comment"] = "No matching DNS record(s) present"
elif status:
ret["result"] = True
ret["comment"] = "Deleted DNS record(s)"
ret["changes"] = {"Deleted": {"name": name, "zone": zone}}
else:
ret["result"] = False
ret["comment"] = "Failed to delete DNS record(s)"
return ret
|
saltstack/salt
|
salt/states/ddns.py
|
Python
|
apache-2.0
| 4,297 | 0.000699 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugger Wrapper Session Consisting of a Local Curses-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import shutil
import sys
import tempfile
# Google-internal import(s).
from tensorflow.python.debug.cli import analyzer_cli
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import profile_analyzer_cli
from tensorflow.python.debug.cli import stepper_cli
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.debug.lib import common
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import framework
_DUMP_ROOT_PREFIX = "tfdbg_"
class LocalCLIDebugWrapperSession(framework.BaseDebugWrapperSession):
"""Concrete subclass of BaseDebugWrapperSession implementing a local CLI.
This class has all the methods that a `session.Session` object has, in order
to support debugging with minimal code changes. Invoking its `run()` method
will launch the command-line interface (CLI) of tfdbg.
"""
def __init__(self,
sess,
dump_root=None,
log_usage=True,
ui_type="curses",
thread_name_filter=None):
"""Constructor of LocalCLIDebugWrapperSession.
Args:
sess: The TensorFlow `Session` object being wrapped.
dump_root: (`str`) optional path to the dump root directory. Must be a
directory that does not exist or an empty directory. If the directory
does not exist, it will be created by the debugger core during debug
`run()` calls and removed afterwards. If `None`, the debug dumps will
be at tfdbg_<random_string> under the system temp directory.
log_usage: (`bool`) whether the usage of this class is to be logged.
ui_type: (`str`) requested UI type. Currently supported:
(curses | readline)
thread_name_filter: Regular-expression white list for thread name. See
the doc of `BaseDebugWrapperSession` for details.
Raises:
ValueError: If dump_root is an existing and non-empty directory or if
dump_root is a file.
"""
if log_usage:
pass # No logging for open-source.
framework.BaseDebugWrapperSession.__init__(
self, sess, thread_name_filter=thread_name_filter)
if not dump_root:
self._dump_root = tempfile.mktemp(prefix=_DUMP_ROOT_PREFIX)
else:
dump_root = os.path.expanduser(dump_root)
if os.path.isfile(dump_root):
raise ValueError("dump_root path points to a file: %s" % dump_root)
elif os.path.isdir(dump_root) and os.listdir(dump_root):
raise ValueError("dump_root path points to a non-empty directory: %s" %
dump_root)
self._dump_root = dump_root
self._initialize_argparsers()
# Registered tensor filters.
self._tensor_filters = {}
# Register frequently-used filter(s).
self.add_tensor_filter("has_inf_or_nan", debug_data.has_inf_or_nan)
# Below are the state variables of this wrapper object.
# _active_tensor_filter: what (if any) tensor filter is in effect. If such
# a filter is in effect, this object will call run() method of the
# underlying TensorFlow Session object until the filter passes. This is
# activated by the "-f" flag of the "run" command.
# _run_through_times: keeps track of how many times the wrapper needs to
# run through without stopping at the run-end CLI. It is activated by the
# "-t" option of the "run" command.
# _skip_debug: keeps track of whether the current run should be executed
# without debugging. It is activated by the "-n" option of the "run"
# command.
#
# _run_start_response: keeps track what OnRunStartResponse the wrapper
# should return at the next run-start callback. If this information is
# unavailable (i.e., is None), the run-start CLI will be launched to ask
# the user. This is the case, e.g., right before the first run starts.
self._active_tensor_filter = None
self._active_tensor_filter_run_start_response = None
self._run_through_times = 1
self._skip_debug = False
self._run_start_response = None
self._is_run_start = True
self._ui_type = ui_type
def _initialize_argparsers(self):
self._argparsers = {}
ap = argparse.ArgumentParser(
description="Run through, with or without debug tensor watching.",
usage=argparse.SUPPRESS)
ap.add_argument(
"-t",
"--times",
dest="times",
type=int,
default=1,
help="How many Session.run() calls to proceed with.")
ap.add_argument(
"-n",
"--no_debug",
dest="no_debug",
action="store_true",
help="Run through without debug tensor watching.")
ap.add_argument(
"-f",
"--till_filter_pass",
dest="till_filter_pass",
type=str,
default="",
help="Run until a tensor in the graph passes the specified filter.")
ap.add_argument(
"--node_name_filter",
dest="node_name_filter",
type=str,
default="",
help="Regular-expression filter for node names to be watched in the "
"run, e.g., loss, reshape.*")
ap.add_argument(
"--op_type_filter",
dest="op_type_filter",
type=str,
default="",
help="Regular-expression filter for op type to be watched in the run, "
"e.g., (MatMul|Add), Variable.*")
ap.add_argument(
"--tensor_dtype_filter",
dest="tensor_dtype_filter",
type=str,
default="",
help="Regular-expression filter for tensor dtype to be watched in the "
"run, e.g., (float32|float64), int.*")
ap.add_argument(
"-p",
"--profile",
dest="profile",
action="store_true",
help="Run and profile TensorFlow graph execution.")
self._argparsers["run"] = ap
ap = argparse.ArgumentParser(
description="Invoke stepper (cont, step, breakpoint, etc.)",
usage=argparse.SUPPRESS)
self._argparsers["invoke_stepper"] = ap
ap = argparse.ArgumentParser(
description="Display information about this Session.run() call.",
usage=argparse.SUPPRESS)
self._argparsers["run_info"] = ap
self._argparsers["print_feed"] = command_parser.get_print_tensor_argparser(
"Print the value of a feed in feed_dict.")
def add_tensor_filter(self, filter_name, tensor_filter):
"""Add a tensor filter.
Args:
filter_name: (`str`) name of the filter.
tensor_filter: (`callable`) the filter callable. See the doc string of
`DebugDumpDir.find()` for more details about its signature.
"""
self._tensor_filters[filter_name] = tensor_filter
def on_session_init(self, request):
"""Overrides on-session-init callback.
Args:
request: An instance of `OnSessionInitRequest`.
Returns:
An instance of `OnSessionInitResponse`.
"""
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
"""Overrides on-run-start callback.
Invoke the CLI to let user choose what action to take:
`run` / `invoke_stepper`.
Args:
request: An instance of `OnRunStartRequest`.
Returns:
An instance of `OnRunStartResponse`.
"""
self._is_run_start = True
self._update_run_calls_state(
request.run_call_count, request.fetches, request.feed_dict,
is_callable_runner=request.is_callable_runner)
if self._active_tensor_filter:
# If we are running until a filter passes, we just need to keep running
# with the previous `OnRunStartResponse`.
return self._active_tensor_filter_run_start_response
self._exit_if_requested_by_user()
if self._run_call_count > 1 and not self._skip_debug:
if self._run_through_times > 0:
# Just run through without debugging.
return framework.OnRunStartResponse(
framework.OnRunStartAction.NON_DEBUG_RUN, [])
elif self._run_through_times == 0:
# It is the run at which the run-end CLI will be launched: activate
# debugging.
return (self._run_start_response or
framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN,
self._get_run_debug_urls()))
if self._run_start_response is None:
self._prep_cli_for_run_start()
self._run_start_response = self._launch_cli()
if self._active_tensor_filter:
self._active_tensor_filter_run_start_response = self._run_start_response
if self._run_through_times > 1:
self._run_through_times -= 1
self._exit_if_requested_by_user()
return self._run_start_response
def _exit_if_requested_by_user(self):
if self._run_start_response == debugger_cli_common.EXPLICIT_USER_EXIT:
# Explicit user "exit" command leads to sys.exit(1).
print(
"Note: user exited from debugger CLI: Calling sys.exit(1).",
file=sys.stderr)
sys.exit(1)
def _prep_cli_for_run_start(self):
"""Prepare (but not launch) the CLI for run-start."""
self._run_cli = ui_factory.get_ui(self._ui_type)
help_intro = debugger_cli_common.RichTextLines([])
if self._run_call_count == 1:
# Show logo at the onset of the first run.
help_intro.extend(cli_shared.get_tfdbg_logo())
help_intro.extend(debugger_cli_common.RichTextLines("Upcoming run:"))
help_intro.extend(self._run_info)
self._run_cli.set_help_intro(help_intro)
# Create initial screen output detailing the run.
self._title = "run-start: " + self._run_description
self._init_command = "run_info"
self._title_color = "blue_on_white"
def on_run_end(self, request):
"""Overrides on-run-end callback.
Actions taken:
1) Load the debug dump.
2) Bring up the Analyzer CLI.
Args:
request: An instance of OnSessionInitRequest.
Returns:
An instance of OnSessionInitResponse.
"""
self._is_run_start = False
if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:
partition_graphs = None
if request.run_metadata and request.run_metadata.partition_graphs:
partition_graphs = request.run_metadata.partition_graphs
elif request.client_graph_def:
partition_graphs = [request.client_graph_def]
if request.tf_error and not os.path.isdir(self._dump_root):
# It is possible that the dump root may not exist due to errors that
# have occurred prior to graph execution (e.g., invalid device
# assignments), in which case we will just raise the exception as the
# unwrapped Session does.
raise request.tf_error
debug_dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=partition_graphs)
debug_dump.set_python_graph(self._sess.graph)
passed_filter = None
if self._active_tensor_filter:
if not debug_dump.find(
self._tensor_filters[self._active_tensor_filter], first_n=1):
# No dumped tensor passes the filter in this run. Clean up the dump
# directory and move on.
self._remove_dump_root()
return framework.OnRunEndResponse()
else:
# Some dumped tensor(s) from this run passed the filter.
passed_filter = self._active_tensor_filter
self._active_tensor_filter = None
self._prep_debug_cli_for_run_end(
debug_dump, request.tf_error, passed_filter)
self._run_start_response = self._launch_cli()
# Clean up the dump generated by this run.
self._remove_dump_root()
elif request.performed_action == framework.OnRunStartAction.PROFILE_RUN:
self._prep_profile_cli_for_run_end(self._sess.graph, request.run_metadata)
self._run_start_response = self._launch_cli()
else:
# No debug information to show following a non-debug run() call.
self._run_start_response = None
# Return placeholder response that currently holds no additional
# information.
return framework.OnRunEndResponse()
def _remove_dump_root(self):
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _prep_debug_cli_for_run_end(self, debug_dump, tf_error, passed_filter):
"""Prepare (but not launch) CLI for run-end, with debug dump from the run.
Args:
debug_dump: (debug_data.DebugDumpDir) The debug dump directory from this
run.
tf_error: (None or OpError) OpError that happened during the run() call
(if any).
passed_filter: (None or str) Name of the tensor filter that just passed
and caused the preparation of this run-end CLI (if any).
"""
if tf_error:
help_intro = cli_shared.get_error_intro(tf_error)
self._init_command = "help"
self._title_color = "red_on_white"
else:
help_intro = None
self._init_command = "lt"
self._title_color = "black_on_white"
if passed_filter is not None:
# Some dumped tensor(s) from this run passed the filter.
self._init_command = "lt -f %s" % passed_filter
self._title_color = "red_on_white"
self._run_cli = analyzer_cli.create_analyzer_ui(
debug_dump, self._tensor_filters, ui_type=self._ui_type,
on_ui_exit=self._remove_dump_root)
# Get names of all dumped tensors.
dumped_tensor_names = []
for datum in debug_dump.dumped_tensor_data:
dumped_tensor_names.append("%s:%d" %
(datum.node_name, datum.output_slot))
# Tab completions for command "print_tensors".
self._run_cli.register_tab_comp_context(["print_tensor", "pt"],
dumped_tensor_names)
# Tab completion for commands "node_info", "list_inputs" and
# "list_outputs". The list comprehension is used below because nodes()
# output can be unicodes and they need to be converted to strs.
self._run_cli.register_tab_comp_context(
["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"],
[str(node_name) for node_name in debug_dump.nodes()])
# TODO(cais): Reduce API surface area for aliases vis-a-vis tab
# completion contexts and registered command handlers.
self._title = "run-end: " + self._run_description
if help_intro:
self._run_cli.set_help_intro(help_intro)
def _prep_profile_cli_for_run_end(self, py_graph, run_metadata):
self._init_command = "lp"
self._run_cli = profile_analyzer_cli.create_profiler_ui(
py_graph, run_metadata, ui_type=self._ui_type,
config=self._run_cli.config)
self._title = "run-end (profiler mode): " + self._run_description
def _launch_cli(self):
"""Launch the interactive command-line interface.
Returns:
The OnRunStartResponse specified by the user using the "run" command.
"""
self._register_this_run_info(self._run_cli)
response = self._run_cli.run_ui(
init_command=self._init_command,
title=self._title,
title_color=self._title_color)
return response
def _run_info_handler(self, args, screen_info=None):
output = debugger_cli_common.RichTextLines([])
if self._run_call_count == 1:
output.extend(cli_shared.get_tfdbg_logo())
output.extend(self._run_info)
if (not self._is_run_start and
debugger_cli_common.MAIN_MENU_KEY in output.annotations):
menu = output.annotations[debugger_cli_common.MAIN_MENU_KEY]
if "list_tensors" not in menu.captions():
menu.insert(
0, debugger_cli_common.MenuItem("list_tensors", "list_tensors"))
return output
def _print_feed_handler(self, args, screen_info=None):
np_printoptions = cli_shared.numpy_printoptions_from_screen_info(
screen_info)
if not self._feed_dict:
return cli_shared.error(
"The feed_dict of the current run is None or empty.")
parsed = self._argparsers["print_feed"].parse_args(args)
tensor_name, tensor_slicing = (
command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))
feed_key = None
feed_value = None
for key in self._feed_dict:
key_name = common.get_graph_element_name(key)
if key_name == tensor_name:
feed_key = key_name
feed_value = self._feed_dict[key]
break
if feed_key is None:
return cli_shared.error(
"The feed_dict of the current run does not contain the key %s" %
tensor_name)
else:
return cli_shared.format_tensor(
feed_value,
feed_key + " (feed)",
np_printoptions,
print_all=parsed.print_all,
tensor_slicing=tensor_slicing,
highlight_options=cli_shared.parse_ranges_highlight(parsed.ranges),
include_numeric_summary=parsed.numeric_summary)
def _run_handler(self, args, screen_info=None):
"""Command handler for "run" command during on-run-start."""
del screen_info # Currently unused.
parsed = self._argparsers["run"].parse_args(args)
parsed.node_name_filter = parsed.node_name_filter or None
parsed.op_type_filter = parsed.op_type_filter or None
parsed.tensor_dtype_filter = parsed.tensor_dtype_filter or None
if parsed.profile:
raise debugger_cli_common.CommandLineExit(
exit_token=framework.OnRunStartResponse(
framework.OnRunStartAction.PROFILE_RUN, []))
self._skip_debug = parsed.no_debug
self._run_through_times = parsed.times
if parsed.times > 1 or parsed.no_debug:
# If requested -t times > 1, the very next run will be a non-debug run.
action = framework.OnRunStartAction.NON_DEBUG_RUN
debug_urls = []
else:
action = framework.OnRunStartAction.DEBUG_RUN
debug_urls = self._get_run_debug_urls()
run_start_response = framework.OnRunStartResponse(
action,
debug_urls,
node_name_regex_whitelist=parsed.node_name_filter,
op_type_regex_whitelist=parsed.op_type_filter,
tensor_dtype_regex_whitelist=parsed.tensor_dtype_filter)
if parsed.till_filter_pass:
# For the run-till-filter-pass (run -f) mode, use the DEBUG_RUN
# option to access the intermediate tensors, and set the corresponding
# state flag of the class itself to True.
if parsed.till_filter_pass in self._tensor_filters:
action = framework.OnRunStartAction.DEBUG_RUN
self._active_tensor_filter = parsed.till_filter_pass
self._active_tensor_filter_run_start_response = run_start_response
else:
# Handle invalid filter name.
return debugger_cli_common.RichTextLines(
["ERROR: tensor filter \"%s\" does not exist." %
parsed.till_filter_pass])
# Raise CommandLineExit exception to cause the CLI to exit.
raise debugger_cli_common.CommandLineExit(exit_token=run_start_response)
def _register_this_run_info(self, curses_cli):
curses_cli.register_command_handler(
"run",
self._run_handler,
self._argparsers["run"].format_help(),
prefix_aliases=["r"])
curses_cli.register_command_handler(
"invoke_stepper",
self._on_run_start_step_handler,
self._argparsers["invoke_stepper"].format_help(),
prefix_aliases=["s"])
curses_cli.register_command_handler(
"run_info",
self._run_info_handler,
self._argparsers["run_info"].format_help(),
prefix_aliases=["ri"])
curses_cli.register_command_handler(
"print_feed",
self._print_feed_handler,
self._argparsers["print_feed"].format_help(),
prefix_aliases=["pf"])
if self._tensor_filters:
# Register tab completion for the filter names.
curses_cli.register_tab_comp_context(["run", "r"],
list(self._tensor_filters.keys()))
if self._feed_dict:
# Register tab completion for feed_dict keys.
feed_keys = [common.get_graph_element_name(key)
for key in self._feed_dict.keys()]
curses_cli.register_tab_comp_context(["print_feed", "pf"], feed_keys)
def _on_run_start_step_handler(self, args, screen_info=None):
"""Command handler for "invoke_stepper" command during on-run-start."""
_ = screen_info # Currently unused.
# No parsing is currently necessary for invoke_stepper. This may change
# in the future when the command has arguments.
# Raise CommandLineExit exception to cause the CLI to exit.
raise debugger_cli_common.CommandLineExit(
exit_token=framework.OnRunStartResponse(
framework.OnRunStartAction.INVOKE_STEPPER, []))
def _get_run_debug_urls(self):
"""Get the debug_urls value for the current run() call.
Returns:
debug_urls: (list of str) Debug URLs for the current run() call.
Currently, the list consists of only one URL that is a file:// URL.
"""
return ["file://" + self._dump_root]
def _update_run_calls_state(self,
run_call_count,
fetches,
feed_dict,
is_callable_runner=False):
"""Update the internal state with regard to run() call history.
Args:
run_call_count: (int) Number of run() calls that have occurred.
fetches: a node/tensor or a list of node/tensor that are the fetches of
the run() call. This is the same as the fetches argument to the run()
call.
feed_dict: None of a dict. This is the feed_dict argument to the run()
call.
is_callable_runner: (bool) whether a runner returned by
Session.make_callable is being run.
"""
self._run_call_count = run_call_count
self._feed_dict = feed_dict
self._run_description = cli_shared.get_run_short_description(
run_call_count,
fetches,
feed_dict,
is_callable_runner=is_callable_runner)
self._run_through_times -= 1
self._run_info = cli_shared.get_run_start_intro(
run_call_count,
fetches,
feed_dict,
self._tensor_filters,
is_callable_runner=is_callable_runner)
def invoke_node_stepper(self,
node_stepper,
restore_variable_values_on_exit=True):
"""Overrides method in base class to implement interactive node stepper.
Args:
node_stepper: (`stepper.NodeStepper`) The underlying NodeStepper API
object.
restore_variable_values_on_exit: (`bool`) Whether any variables whose
values have been altered during this node-stepper invocation should be
restored to their old values when this invocation ends.
Returns:
The same return values as the `Session.run()` call on the same fetches as
the NodeStepper.
"""
stepper = stepper_cli.NodeStepperCLI(node_stepper)
# On exiting the node-stepper CLI, the finalize method of the node_stepper
# object will be called, ensuring that the state of the graph will be the
# same as if the stepping did not happen.
# TODO(cais): Perhaps some users will want the effect of the interactive
# stepping and value injection to persist. When that happens, make the call
# to finalize optional.
stepper_ui = ui_factory.get_ui(
self._ui_type,
on_ui_exit=(node_stepper.restore_variable_values if
restore_variable_values_on_exit else None))
stepper_ui.register_command_handler(
"list_sorted_nodes",
stepper.list_sorted_nodes,
stepper.arg_parsers["list_sorted_nodes"].format_help(),
prefix_aliases=["lt", "lsn"])
stepper_ui.register_command_handler(
"cont",
stepper.cont,
stepper.arg_parsers["cont"].format_help(),
prefix_aliases=["ct", "c"])
stepper_ui.register_command_handler(
"step",
stepper.step,
stepper.arg_parsers["step"].format_help(),
prefix_aliases=["st", "s"])
stepper_ui.register_command_handler(
"print_tensor",
stepper.print_tensor,
stepper.arg_parsers["print_tensor"].format_help(),
prefix_aliases=["pt"])
stepper_ui.register_command_handler(
"inject_value",
stepper.inject_value,
stepper.arg_parsers["inject_value"].format_help(),
prefix_aliases=["inject", "override_value", "override"])
# Register tab completion candidates.
stepper_ui.register_tab_comp_context([
"cont", "ct", "c", "pt", "inject_value", "inject", "override_value",
"override"
], [str(elem) for elem in node_stepper.sorted_nodes()])
# TODO(cais): Tie up register_tab_comp_context to a single alias to shorten
# calls like this.
return stepper_ui.run_ui(
init_command="lt",
title="Node Stepper: " + self._run_description,
title_color="blue_on_white")
|
rabipanda/tensorflow
|
tensorflow/python/debug/wrappers/local_cli_wrapper.py
|
Python
|
apache-2.0
| 25,959 | 0.003929 |
from django.test import TestCase
from store.forms import ReviewForm
from store.models import Review
from .factories import *
class ReviewFormTest(TestCase):
def test_form_validation_for_blank_items(self):
p1 = ProductFactory.create()
form = ReviewForm(
data={'name':'', 'text': '', 'product':p1.id})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['text'],["Please fill in the review"])
self.assertEqual(form.errors['rating'],["Please leave a rating"])
def test_form_validation_for_invalid_review(self):
p1 = ProductFactory.create()
form = ReviewForm(
data={'name':'', 'text': '', 'rating': 0, 'product':p1.id})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['text'],["Please fill in the review"])
self.assertEqual(form.errors['rating'],["Please leave a valid rating"])
def test_form_validation_for_required_name_field(self):
p1 = ProductFactory.create()
form = ReviewForm(
data={'name':'', 'text': 'Hello', 'rating': 2, 'product':p1.id})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],['Please fill in your name'])
def test_form_save_handles_saving_product_reviews(self):
prod = ProductFactory.create()
form = ReviewForm(
data={'name':'Kevin', 'text': 'Review', 'rating': 3, 'product':prod.id})
new_review = form.save()
self.assertEqual(new_review, Review.objects.first())
self.assertEqual(new_review.name, 'Kevin')
self.assertEqual(new_review.product, prod)
|
kevgathuku/compshop
|
store/tests/test_forms.py
|
Python
|
bsd-3-clause
| 1,654 | 0.008464 |
{
"name" : "Add sales team to website leads (OBSOLETE)",
"version" : "0.1",
"author" : "IT-Projects LLC, Ivan Yelizariev",
'license': 'GPL-3',
"category" : "Website",
"website" : "https://yelizariev.github.io",
"depends" : ["website_crm"],
#"init_xml" : [],
#"update_xml" : [],
#"active": True,
"installable": True
}
|
veryberry/website-addons
|
website_crm_sales_team/__openerp__.py
|
Python
|
lgpl-3.0
| 361 | 0.024931 |
#-*- coding: utf-8 -*-
# Author : Jeonghoonkang, github.com/jeonghoonkang
import platform
import sys
import os
import time
import traceback
import requests
import RPi.GPIO as GPIO
from socket import gethostname
hostname = gethostname()
SERVER_ADDR = '211.184.76.80'
GPIO.setwarnings(False)
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
GPIO.setup(19,GPIO.OUT) # for LED indicating
GPIO.setup(26, GPIO.OUT) # for LED indicating
def query_last_data_point(bridge_id):
url = 'http://%s/api/raw_bridge_last/?bridge_id=%d' % (SERVER_ADDR, bridge_id)
try:
ret = requests.get(url, timeout=10)
if ret.ok:
ctx = ret.json()
if ctx['code'] == 0:
return ctx['result']['time'], ctx['result']['value']
except Exception:
#print Exception
pass
return None
bridge_id = int(hostname[5:10])
GPIO.output(26, True) # server connection is OK, showing through LED
while True:
try:
ret = query_last_data_point(bridge_id)
except:
pass
if ret is not None:
t, v = ret
if t > time.time() - 30:
dt = time.time() - t
GPIO.output(19, True)
GPIO.output(26, False)
else:
GPIO.output(19, True)
GPIO.output(26, True)
else:
GPIO.output(19, False)
GPIO.output(26, True)
time.sleep(5.0)
|
jeonghoonkang/BerePi
|
apps/check/monitor.py
|
Python
|
bsd-2-clause
| 1,308 | 0.021407 |
import time
import os
import posixpath
import datetime
import math
import re
import logging
from django import template
from django.utils.encoding import smart_unicode
from django.utils.safestring import mark_safe
from forum.models import Question, Answer, QuestionRevision, AnswerRevision, NodeRevision
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.utils import simplejson
from forum import settings
from django.template.defaulttags import url as default_url
from forum import skins
from forum.utils import html
from extra_filters import decorated_int
from django.core.urlresolvers import reverse
register = template.Library()
GRAVATAR_TEMPLATE = ('<img class="gravatar" width="%(size)s" height="%(size)s" '
'src="http://www.gravatar.com/avatar/%(gravatar_hash)s'
'?s=%(size)s&d=%(default)s&r=%(rating)s" '
'alt="%(username)s\'s gravatar image" />')
@register.simple_tag
def gravatar(user, size):
try:
gravatar = user['gravatar']
username = user['username']
except (TypeError, AttributeError, KeyError):
gravatar = user.gravatar
username = user.username
return mark_safe(GRAVATAR_TEMPLATE % {
'size': size,
'gravatar_hash': gravatar,
'default': settings.GRAVATAR_DEFAULT_IMAGE,
'rating': settings.GRAVATAR_ALLOWED_RATING,
'username': template.defaultfilters.urlencode(username),
})
@register.simple_tag
def get_score_badge(user):
if user.is_suspended():
return _("(suspended)")
repstr = decorated_int(user.reputation, "")
BADGE_TEMPLATE = '<span class="score" title="%(reputation)s %(reputationword)s">%(repstr)s</span>'
if user.gold > 0 :
BADGE_TEMPLATE = '%s%s' % (BADGE_TEMPLATE, '<span title="%(gold)s %(badgesword)s">'
'<span class="badge1">●</span>'
'<span class="badgecount">%(gold)s</span>'
'</span>')
if user.silver > 0:
BADGE_TEMPLATE = '%s%s' % (BADGE_TEMPLATE, '<span title="%(silver)s %(badgesword)s">'
'<span class="silver">●</span>'
'<span class="badgecount">%(silver)s</span>'
'</span>')
if user.bronze > 0:
BADGE_TEMPLATE = '%s%s' % (BADGE_TEMPLATE, '<span title="%(bronze)s %(badgesword)s">'
'<span class="bronze">●</span>'
'<span class="badgecount">%(bronze)s</span>'
'</span>')
BADGE_TEMPLATE = smart_unicode(BADGE_TEMPLATE, encoding='utf-8', strings_only=False, errors='strict')
return mark_safe(BADGE_TEMPLATE % {
'reputation' : user.reputation,
'repstr': repstr,
'gold' : user.gold,
'silver' : user.silver,
'bronze' : user.bronze,
'badgesword' : _('badges'),
'reputationword' : _('reputation points'),
})
@register.simple_tag
def get_age(birthday):
current_time = datetime.datetime(*time.localtime()[0:6])
year = birthday.year
month = birthday.month
day = birthday.day
diff = current_time - datetime.datetime(year, month, day, 0, 0, 0)
return diff.days / 365
@register.simple_tag
def diff_date(date, limen=2):
if not date:
return _('unknown')
now = datetime.datetime.now()
diff = now - date
days = diff.days
hours = int(diff.seconds/3600)
minutes = int(diff.seconds/60)
if days > 2:
if date.year == now.year:
return date.strftime(_("%b %d at %H:%M").encode())
else:
return date.strftime(_("%b %d '%y at %H:%M").encode())
elif days == 2:
return _('2 days ago')
elif days == 1:
return _('yesterday')
elif minutes >= 60:
return ungettext('%(hr)d ' + _("hour ago"), '%(hr)d ' + _("hours ago"), hours) % {'hr':hours}
elif diff.seconds >= 60:
return ungettext('%(min)d ' + _("min ago"), '%(min)d ' + _("mins ago"), minutes) % {'min':minutes}
else:
return ungettext('%(sec)d ' + _("sec ago"), '%(sec)d ' + _("secs ago"), diff.seconds) % {'sec':diff.seconds}
@register.simple_tag
def media(url):
url = skins.find_media_source(url)
if url:
# Create the URL prefix.
url_prefix = settings.FORCE_SCRIPT_NAME + '/m/'
# Make sure any duplicate forward slashes are replaced with a single
# forward slash.
url_prefix = re.sub("/+", "/", url_prefix)
url = url_prefix + url
return url
class ItemSeparatorNode(template.Node):
def __init__(self, separator):
sep = separator.strip()
if sep[0] == sep[-1] and sep[0] in ('\'', '"'):
sep = sep[1:-1]
else:
raise template.TemplateSyntaxError('separator in joinitems tag must be quoted')
self.content = sep
def render(self, context):
return self.content
class BlockMediaUrlNode(template.Node):
def __init__(self, nodelist):
self.items = nodelist
def render(self, context):
prefix = settings.APP_URL + 'm/'
url = ''
if self.items:
url += '/'
for item in self.items:
url += item.render(context)
url = skins.find_media_source(url)
url = prefix + url
out = url
return out.replace(' ', '')
@register.tag(name='blockmedia')
def blockmedia(parser, token):
try:
tagname = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("blockmedia tag does not use arguments")
nodelist = []
while True:
nodelist.append(parser.parse(('endblockmedia')))
next = parser.next_token()
if next.contents == 'endblockmedia':
break
return BlockMediaUrlNode(nodelist)
@register.simple_tag
def fullmedia(url):
domain = settings.APP_BASE_URL
#protocol = getattr(settings, "PROTOCOL", "http")
path = media(url)
return "%s%s" % (domain, path)
class SimpleVarNode(template.Node):
def __init__(self, name, value):
self.name = name
self.value = template.Variable(value)
def render(self, context):
context[self.name] = self.value.resolve(context)
return ''
class BlockVarNode(template.Node):
def __init__(self, name, block):
self.name = name
self.block = block
def render(self, context):
source = self.block.render(context)
context[self.name] = source.strip()
return ''
@register.tag(name='var')
def do_var(parser, token):
tokens = token.split_contents()[1:]
if not len(tokens) or not re.match('^\w+$', tokens[0]):
raise template.TemplateSyntaxError("Expected variable name")
if len(tokens) == 1:
nodelist = parser.parse(('endvar',))
parser.delete_first_token()
return BlockVarNode(tokens[0], nodelist)
elif len(tokens) == 3:
return SimpleVarNode(tokens[0], tokens[2])
raise template.TemplateSyntaxError("Invalid number of arguments")
class DeclareNode(template.Node):
dec_re = re.compile('^\s*(\w+)\s*(:?=)\s*(.*)$')
def __init__(self, block):
self.block = block
def render(self, context):
source = self.block.render(context)
for line in source.splitlines():
m = self.dec_re.search(line)
if m:
clist = list(context)
clist.reverse()
d = {}
d['_'] = _
d['os'] = os
d['html'] = html
d['reverse'] = reverse
for c in clist:
d.update(c)
try:
context[m.group(1).strip()] = eval(m.group(3).strip(), d)
except Exception, e:
logging.error("Error in declare tag, when evaluating: %s" % m.group(3).strip())
raise
return ''
@register.tag(name='declare')
def do_declare(parser, token):
nodelist = parser.parse(('enddeclare',))
parser.delete_first_token()
return DeclareNode(nodelist)
|
CLLKazan/iCQA
|
qa-engine/forum/templatetags/extra_tags.py
|
Python
|
gpl-3.0
| 7,925 | 0.007823 |
# Copright (C) 2015 Eric Skoglund
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see http://www.gnu.org/licenses/gpl-2.0.html
import requests
import sys
class NotSupported(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class PasteSite(object):
def __init__(self, url):
self.url = url
self.paste_url = None
self.data = None
@staticmethod
def siteFactory(site_name):
if site_name == 'slexy.org':
return Slexy()
elif site_name == 'pastebin.mozilla.org':
return Mozilla()
else:
raise NotSupported("This site is not supported")
def parse(self, args):
""" Internal method used by the PasteSite class.
Returns a dictionary of the parsed input arguments.
Parses the arguments given at the command line.
Many pastebin like sites use different arguments
for the paste so this method should be implemented
for each subclass of PasteSite.
See the slexy class for an example of how to implement
this method for subclasses.
"""
self.data = args
def paste(self):
"""Posts the data to the paste site.
This method tries to post the data to the paste site.
If the resulting request does not have a ok status the
program exits else we return the resulting paste url.
The method assumes that the data is in a dictionary.
"""
if self.data == None:
print('You can only paste after a parse')
sys.exit(-1)
res = requests.post(self.url, self.data)
if not res.ok:
print('Bad response {0} {1}'.format(res.reason, res.status_code))
sys.exit(-1)
self.paste_url = res.url
class Slexy(PasteSite):
def __init__(self):
super(Slexy, self).__init__('http://slexy.org/submit')
def parse(self, args):
form_data = {}
arg_translation = {'text' : 'raw_paste',
'language' : 'language',
'expiration' : 'expire',
'comment' : 'comment',
'description' : 'descr',
'visibility' : 'permissions',
'linum' : 'linenumbers',
'author' : 'author'}
for k,v in args.items():
if arg_translation.get(k):
form_data[arg_translation[k]] = v
form_data['submit'] = 'Submit Paste'
self.data = form_data
class Mozilla(PasteSite):
def __init__(self):
super(Mozilla, self).__init__('https://pastebin.mozilla.org')
def parse(self, args):
form_data = {}
arg_translation = {'text' : 'code2',
'expiration' : 'expiry',
'syntax_highlight' : 'format',
'author' : 'poster'}
for k,v in args.items():
if arg_translation.get(k):
form_data[arg_translation[k]] = v
form_data['paste'] = 'Send'
form_data['parent_pid'] = ''
self.data = form_data
|
EricIO/pasteit
|
pasteit/PasteSites.py
|
Python
|
gpl-2.0
| 3,878 | 0.005931 |
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
import os,unittest
from pyasm.security import Batch
from pyasm.command import Command
from pyasm.prod.biz import Asset
from pyams.prod.maya import *
from maya_checkin import *
class MayaCheckinTest(unittest.TestCase):
def setUp(my):
batch = Batch()
def test_all(my):
# create a scene that will be checked in
asset_code = "prp101"
sid = "12345"
# create an asset
mel('sphere -n sphere1')
mel('circle -n circle1')
mel('group -n |%s |circle1 |sphere1' % asset_code )
# convert node into a maya asset
node = MayaNode("|%s" % asset_code )
asset_node = MayaAssetNode.add_sid( node, sid )
# checkin the asset
checkin = MayaAssetNodeCheckin(asset_node)
Command.execute_cmd(checkin)
# create a file from this node
asset_node.export()
if __name__ == '__main__':
unittest.main()
|
sadanandb/pmt
|
src/pyasm/prod/checkin/maya_checkin_test.py
|
Python
|
epl-1.0
| 1,263 | 0.008709 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
def pandas_input_fn(x, y=None, batch_size=128, num_epochs=None, shuffle=True,
queue_capacity=1000, num_threads=1, target_column='target',
index_column='index'):
"""Returns input function that would feed pandas DataFrame into the model.
Note: If y's index doesn't match x's index exception will be raised.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If `None` will
run indefinetly.
shuffle: int, if shuffle the queue. Please make sure you don't shuffle at
prediction time.
queue_capacity: int, size of queue to accumulate.
num_threads: int, number of threads used for reading and enqueueing.
target_column: str, used to pack `y` into `x` DataFrame under this column.
index_column: str, name of the feature return with index.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `target_column` column is already in `x` DataFrame.
"""
def input_fn():
"""Pandas input function."""
if y is not None:
if target_column in x:
raise ValueError('Found already column \'%s\' in x, please change '
'target_column to something else. Current columns '
'in x: %s', target_column, x.columns)
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatch, this will lead '
'to missing values. Please make sure they match or '
'use .reset_index() method.\n'
'Index for x: %s\n'
'Index for y: %s\n', x.index, y.index)
x[target_column] = y
queue = feeding_functions.enqueue_data(
x, queue_capacity, shuffle=shuffle, num_threads=num_threads,
enqueue_size=batch_size, num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
features = dict(zip([index_column] + list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
|
sandeepdsouza93/TensorFlow-15712
|
tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py
|
Python
|
apache-2.0
| 6,001 | 0.004999 |
from euler_functions import is_pandigital_set, number_digits
for x in range(9123, 9876): # much smaller range: http://www.mathblog.dk/project-euler-38-pandigital-multiplying-fixed-number/
products = []
n = 1
num_digits_in_products = 0
while num_digits_in_products < 9:
products.append(x * n)
n += 1
num_digits_in_products = 0
for p in products:
num_digits_in_products += number_digits(p)
if is_pandigital_set(*products):
print products
break
|
aarestad/euler-solutions
|
euler_38.py
|
Python
|
gpl-2.0
| 471 | 0.029724 |
"""Test Yeelight."""
import asyncio
from datetime import timedelta
from unittest.mock import AsyncMock, patch
import pytest
from yeelight import BulbException, BulbType
from yeelight.aio import KEY_CONNECTED
from homeassistant.components.yeelight.const import (
CONF_DETECTED_MODEL,
CONF_NIGHTLIGHT_SWITCH,
CONF_NIGHTLIGHT_SWITCH_TYPE,
DOMAIN,
NIGHTLIGHT_SWITCH_TYPE_LIGHT,
STATE_CHANGE_TIME,
)
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import (
CONF_DEVICES,
CONF_HOST,
CONF_ID,
CONF_NAME,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from . import (
CONFIG_ENTRY_DATA,
ENTITY_AMBILIGHT,
ENTITY_BINARY_SENSOR,
ENTITY_BINARY_SENSOR_TEMPLATE,
ENTITY_LIGHT,
ENTITY_NIGHTLIGHT,
FAIL_TO_BIND_IP,
ID,
IP_ADDRESS,
MODEL,
MODULE,
SHORT_ID,
_mocked_bulb,
_patch_discovery,
_patch_discovery_interval,
_patch_discovery_timeout,
)
from tests.common import MockConfigEntry, async_fire_time_changed
async def test_ip_changes_fallback_discovery(hass: HomeAssistant):
"""Test Yeelight ip changes and we fallback to discovery."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_ID: ID, CONF_HOST: "5.5.5.5"}, unique_id=ID
)
config_entry.add_to_hass(hass)
mocked_fail_bulb = _mocked_bulb(cannot_connect=True)
mocked_fail_bulb.bulb_type = BulbType.WhiteTempMood
with patch(
f"{MODULE}.AsyncBulb", return_value=mocked_fail_bulb
), _patch_discovery():
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_RETRY
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=2))
await hass.async_block_till_done()
# The discovery should update the ip address
assert config_entry.data[CONF_HOST] == IP_ADDRESS
assert config_entry.state is ConfigEntryState.SETUP_RETRY
mocked_bulb = _mocked_bulb()
with patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb), _patch_discovery():
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=10))
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.LOADED
binary_sensor_entity_id = ENTITY_BINARY_SENSOR_TEMPLATE.format(
f"yeelight_color_{SHORT_ID}"
)
entity_registry = er.async_get(hass)
assert entity_registry.async_get(binary_sensor_entity_id) is not None
# Make sure we can still reload with the new ip right after we change it
with patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb), _patch_discovery():
await hass.config_entries.async_reload(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.LOADED
entity_registry = er.async_get(hass)
assert entity_registry.async_get(binary_sensor_entity_id) is not None
async def test_ip_changes_id_missing_cannot_fallback(hass: HomeAssistant):
"""Test Yeelight ip changes and we fallback to discovery."""
config_entry = MockConfigEntry(domain=DOMAIN, data={CONF_HOST: "5.5.5.5"})
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb(True)
mocked_bulb.bulb_type = BulbType.WhiteTempMood
mocked_bulb.async_listen = AsyncMock(side_effect=[BulbException, None, None, None])
with patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb):
assert not await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_RETRY
async def test_setup_discovery(hass: HomeAssistant):
"""Test setting up Yeelight by discovery."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: IP_ADDRESS, **CONFIG_ENTRY_DATA}
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
with _patch_discovery(), patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_BINARY_SENSOR) is not None
assert hass.states.get(ENTITY_LIGHT) is not None
# Unload
assert await hass.config_entries.async_unload(config_entry.entry_id)
assert hass.states.get(ENTITY_BINARY_SENSOR).state == STATE_UNAVAILABLE
assert hass.states.get(ENTITY_LIGHT).state == STATE_UNAVAILABLE
# Remove
assert await hass.config_entries.async_remove(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_BINARY_SENSOR) is None
assert hass.states.get(ENTITY_LIGHT) is None
_ADAPTERS_WITH_MANUAL_CONFIG = [
{
"auto": True,
"index": 2,
"default": False,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
]
async def test_setup_discovery_with_manually_configured_network_adapter(
hass: HomeAssistant,
):
"""Test setting up Yeelight by discovery with a manually configured network adapter."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: IP_ADDRESS, **CONFIG_ENTRY_DATA}
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
with _patch_discovery(), patch(
f"{MODULE}.AsyncBulb", return_value=mocked_bulb
), patch(
"homeassistant.components.zeroconf.network.async_get_adapters",
return_value=_ADAPTERS_WITH_MANUAL_CONFIG,
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_BINARY_SENSOR) is not None
assert hass.states.get(ENTITY_LIGHT) is not None
# Unload
assert await hass.config_entries.async_unload(config_entry.entry_id)
assert hass.states.get(ENTITY_BINARY_SENSOR).state == STATE_UNAVAILABLE
assert hass.states.get(ENTITY_LIGHT).state == STATE_UNAVAILABLE
# Remove
assert await hass.config_entries.async_remove(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_BINARY_SENSOR) is None
assert hass.states.get(ENTITY_LIGHT) is None
_ADAPTERS_WITH_MANUAL_CONFIG_ONE_FAILING = [
{
"auto": True,
"index": 1,
"default": False,
"enabled": True,
"ipv4": [{"address": FAIL_TO_BIND_IP, "network_prefix": 23}],
"ipv6": [],
"name": "eth0",
},
{
"auto": True,
"index": 2,
"default": False,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
]
async def test_setup_discovery_with_manually_configured_network_adapter_one_fails(
hass: HomeAssistant, caplog
):
"""Test setting up Yeelight by discovery with a manually configured network adapter with one that fails to bind."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: IP_ADDRESS, **CONFIG_ENTRY_DATA}
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
with _patch_discovery(), patch(
f"{MODULE}.AsyncBulb", return_value=mocked_bulb
), patch(
"homeassistant.components.zeroconf.network.async_get_adapters",
return_value=_ADAPTERS_WITH_MANUAL_CONFIG_ONE_FAILING,
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_BINARY_SENSOR) is not None
assert hass.states.get(ENTITY_LIGHT) is not None
# Unload
assert await hass.config_entries.async_unload(config_entry.entry_id)
assert hass.states.get(ENTITY_BINARY_SENSOR).state == STATE_UNAVAILABLE
assert hass.states.get(ENTITY_LIGHT).state == STATE_UNAVAILABLE
# Remove
assert await hass.config_entries.async_remove(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(ENTITY_BINARY_SENSOR) is None
assert hass.states.get(ENTITY_LIGHT) is None
assert f"Failed to setup listener for {FAIL_TO_BIND_IP}" in caplog.text
async def test_setup_import(hass: HomeAssistant):
"""Test import from yaml."""
mocked_bulb = _mocked_bulb()
name = "yeelight"
with patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb), _patch_discovery():
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_DEVICES: {
IP_ADDRESS: {
CONF_NAME: name,
CONF_NIGHTLIGHT_SWITCH_TYPE: NIGHTLIGHT_SWITCH_TYPE_LIGHT,
}
}
}
},
)
await hass.async_block_till_done()
assert hass.states.get(f"binary_sensor.{name}_nightlight") is not None
assert hass.states.get(f"light.{name}") is not None
assert hass.states.get(f"light.{name}_nightlight") is not None
entry = hass.config_entries.async_entries(DOMAIN)[0]
assert entry.unique_id == "0x000000000015243f"
assert entry.data[CONF_ID] == "0x000000000015243f"
async def test_unique_ids_device(hass: HomeAssistant):
"""Test Yeelight unique IDs from yeelight device IDs."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: IP_ADDRESS, **CONFIG_ENTRY_DATA, CONF_NIGHTLIGHT_SWITCH: True},
unique_id=ID,
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
mocked_bulb.bulb_type = BulbType.WhiteTempMood
with _patch_discovery(), patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
entity_registry = er.async_get(hass)
assert (
entity_registry.async_get(ENTITY_BINARY_SENSOR).unique_id
== f"{ID}-nightlight_sensor"
)
assert entity_registry.async_get(ENTITY_LIGHT).unique_id == ID
assert entity_registry.async_get(ENTITY_NIGHTLIGHT).unique_id == f"{ID}-nightlight"
assert entity_registry.async_get(ENTITY_AMBILIGHT).unique_id == f"{ID}-ambilight"
async def test_unique_ids_entry(hass: HomeAssistant):
"""Test Yeelight unique IDs from entry IDs."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: IP_ADDRESS, CONF_NIGHTLIGHT_SWITCH: True},
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
mocked_bulb.bulb_type = BulbType.WhiteTempMood
with _patch_discovery(), patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
entity_registry = er.async_get(hass)
assert (
entity_registry.async_get(ENTITY_BINARY_SENSOR).unique_id
== f"{config_entry.entry_id}-nightlight_sensor"
)
assert entity_registry.async_get(ENTITY_LIGHT).unique_id == config_entry.entry_id
assert (
entity_registry.async_get(ENTITY_NIGHTLIGHT).unique_id
== f"{config_entry.entry_id}-nightlight"
)
assert (
entity_registry.async_get(ENTITY_AMBILIGHT).unique_id
== f"{config_entry.entry_id}-ambilight"
)
async def test_bulb_off_while_adding_in_ha(hass: HomeAssistant):
"""Test Yeelight off while adding to ha, for example on HA start."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={**CONFIG_ENTRY_DATA, CONF_HOST: IP_ADDRESS}, unique_id=ID
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb(cannot_connect=True)
mocked_bulb.bulb_type = BulbType.WhiteTempMood
with patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb), _patch_discovery(
no_device=True
), _patch_discovery_timeout(), _patch_discovery_interval():
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_RETRY
with patch(f"{MODULE}.AsyncBulb", return_value=_mocked_bulb()), _patch_discovery(
no_device=True
), _patch_discovery_timeout(), _patch_discovery_interval():
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=2))
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.LOADED
async def test_async_listen_error_late_discovery(hass, caplog):
"""Test the async listen error."""
config_entry = MockConfigEntry(domain=DOMAIN, data=CONFIG_ENTRY_DATA)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb(cannot_connect=True)
with _patch_discovery(), patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_RETRY
await hass.async_block_till_done()
assert "Waiting for 0x15243f to be discovered" in caplog.text
with _patch_discovery(), patch(f"{MODULE}.AsyncBulb", return_value=_mocked_bulb()):
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=10))
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.LOADED
assert config_entry.data[CONF_DETECTED_MODEL] == MODEL
async def test_fail_to_fetch_initial_state(hass, caplog):
"""Test failing to fetch initial state results in a retry."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: IP_ADDRESS, **CONFIG_ENTRY_DATA}
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
del mocked_bulb.last_properties["power"]
del mocked_bulb.last_properties["main_power"]
with _patch_discovery(), patch(f"{MODULE}.AsyncBulb", return_value=mocked_bulb):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_RETRY
await hass.async_block_till_done()
assert "Could not fetch initial state; try power cycling the device" in caplog.text
with _patch_discovery(), patch(f"{MODULE}.AsyncBulb", return_value=_mocked_bulb()):
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=10))
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.LOADED
async def test_unload_before_discovery(hass, caplog):
"""Test unloading before discovery."""
config_entry = MockConfigEntry(domain=DOMAIN, data=CONFIG_ENTRY_DATA)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb(cannot_connect=True)
with _patch_discovery(no_device=True), patch(
f"{MODULE}.AsyncBulb", return_value=mocked_bulb
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_RETRY
await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.NOT_LOADED
async def test_async_listen_error_has_host_with_id(hass: HomeAssistant):
"""Test the async listen error."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_ID: ID, CONF_HOST: "127.0.0.1"}
)
config_entry.add_to_hass(hass)
with _patch_discovery(
no_device=True
), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=_mocked_bulb(cannot_connect=True)
):
await hass.config_entries.async_setup(config_entry.entry_id)
assert config_entry.state is ConfigEntryState.SETUP_RETRY
async def test_async_listen_error_has_host_without_id(hass: HomeAssistant):
"""Test the async listen error but no id."""
config_entry = MockConfigEntry(domain=DOMAIN, data={CONF_HOST: "127.0.0.1"})
config_entry.add_to_hass(hass)
with _patch_discovery(
no_device=True
), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=_mocked_bulb(cannot_connect=True)
):
await hass.config_entries.async_setup(config_entry.entry_id)
assert config_entry.state is ConfigEntryState.SETUP_RETRY
async def test_async_setup_with_missing_id(hass: HomeAssistant):
"""Test that setting adds the missing CONF_ID from unique_id."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=ID,
data={CONF_HOST: "127.0.0.1"},
options={CONF_NAME: "Test name"},
)
config_entry.add_to_hass(hass)
with _patch_discovery(), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=_mocked_bulb(cannot_connect=True)
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_RETRY
assert config_entry.data[CONF_ID] == ID
with _patch_discovery(), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=_mocked_bulb()
):
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=2))
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.LOADED
async def test_async_setup_with_missing_unique_id(hass: HomeAssistant):
"""Test that setting adds the missing unique_id from CONF_ID."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "127.0.0.1", CONF_ID: ID},
options={CONF_NAME: "Test name"},
)
config_entry.add_to_hass(hass)
with _patch_discovery(), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=_mocked_bulb(cannot_connect=True)
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_RETRY
assert config_entry.unique_id == ID
with _patch_discovery(), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=_mocked_bulb()
):
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=2))
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.LOADED
async def test_connection_dropped_resyncs_properties(hass: HomeAssistant):
"""Test handling a connection drop results in a property resync."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=ID,
data={CONF_HOST: "127.0.0.1"},
options={CONF_NAME: "Test name"},
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
with _patch_discovery(), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=mocked_bulb
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert len(mocked_bulb.async_get_properties.mock_calls) == 1
mocked_bulb._async_callback({KEY_CONNECTED: False})
await hass.async_block_till_done()
assert hass.states.get("light.test_name").state == STATE_UNAVAILABLE
assert len(mocked_bulb.async_get_properties.mock_calls) == 1
mocked_bulb._async_callback({KEY_CONNECTED: True})
async_fire_time_changed(
hass, dt_util.utcnow() + timedelta(seconds=STATE_CHANGE_TIME)
)
await hass.async_block_till_done()
assert hass.states.get("light.test_name").state == STATE_ON
assert len(mocked_bulb.async_get_properties.mock_calls) == 2
async def test_oserror_on_first_update_results_in_unavailable(hass: HomeAssistant):
"""Test that an OSError on first update results in unavailable."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=ID,
data={CONF_HOST: "127.0.0.1"},
options={CONF_NAME: "Test name"},
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
mocked_bulb.async_get_properties = AsyncMock(side_effect=OSError)
with _patch_discovery(), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=mocked_bulb
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get("light.test_name").state == STATE_UNAVAILABLE
@pytest.mark.parametrize("exception", [BulbException, asyncio.TimeoutError])
async def test_non_oserror_exception_on_first_update(
hass: HomeAssistant, exception: Exception
):
"""Test that an exceptions other than OSError on first update do not result in unavailable.
The unavailable state will come as a push update in this case
"""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=ID,
data={CONF_HOST: "127.0.0.1"},
options={CONF_NAME: "Test name"},
)
config_entry.add_to_hass(hass)
mocked_bulb = _mocked_bulb()
mocked_bulb.async_get_properties = AsyncMock(side_effect=exception)
with _patch_discovery(), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=mocked_bulb
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get("light.test_name").state != STATE_UNAVAILABLE
async def test_async_setup_with_discovery_not_working(hass: HomeAssistant):
"""Test we can setup even if discovery is broken."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "127.0.0.1", CONF_ID: ID},
options={},
unique_id=ID,
)
config_entry.add_to_hass(hass)
with _patch_discovery(
no_device=True
), _patch_discovery_timeout(), _patch_discovery_interval(), patch(
f"{MODULE}.AsyncBulb", return_value=_mocked_bulb()
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.LOADED
assert hass.states.get("light.yeelight_color_0x15243f").state == STATE_ON
|
mezz64/home-assistant
|
tests/components/yeelight/test_init.py
|
Python
|
apache-2.0
| 22,950 | 0.001438 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""HierarchicalController Class.
The HierarchicalController encompasses the entire lifecycle of training the
device placement policy, including generating op embeddings, getting groups for
each op, placing those groups and running the predicted placements.
Different assignment models can inherit from this class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.grappler.controller import Controller
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import training_util
class PlacerParams(object):
"""Class to hold a set of placement parameters as name-value pairs.
A typical usage is as follows:
```python
# Create a PlacerParams object specifying names and values of the model
# parameters:
params = PlacerParams(hidden_size=128, decay_steps=50)
# The parameters are available as attributes of the PlacerParams object:
hparams.hidden_size ==> 128
hparams.decay_steps ==> 50
```
"""
def __init__(self, **kwargs):
"""Create an instance of `PlacerParams` from keyword arguments.
The keyword arguments specify name-values pairs for the parameters.
The parameter types are inferred from the type of the values passed.
The parameter names are added as attributes of `PlacerParams` object,
and they can be accessed directly with the dot notation `params._name_`.
Example:
```python
# Define 1 parameter: 'hidden_size'
params = PlacerParams(hidden_size=128)
params.hidden_size ==> 128
```
Args:
**kwargs: Key-value pairs where the key is the parameter name and
the value is the value for the parameter.
"""
for name, value in six.iteritems(kwargs):
self.add_param(name, value)
def add_param(self, name, value):
"""Adds {name, value} pair to hyperparameters.
Args:
name: Name of the hyperparameter.
value: Value of the hyperparameter. Can be one of the following types:
int, float, string, int list, float list, or string list.
Raises:
ValueError: if one of the arguments is invalid.
"""
# Keys in kwargs are unique, but 'name' could be the name of a pre-existing
# attribute of this object. In that case we refuse to use it as a
# parameter name.
if getattr(self, name, None) is not None:
raise ValueError("Parameter name is reserved: %s" % name)
setattr(self, name, value)
def hierarchical_controller_hparams():
"""Hyperparameters for hierarchical planner."""
return PlacerParams(
hidden_size=512,
forget_bias_init=1.0,
temperature=1.0,
logits_std_noise=0.5,
stop_noise_step=750,
decay_steps=50,
max_num_outputs=5,
max_output_size=5,
tanh_constant=1.0,
adj_embed_dim=20,
grouping_hidden_size=64,
num_groups=None,
bi_lstm=True,
failing_signal=100,
stop_sampling=500,
start_with_failing_signal=True,
always_update_baseline=False,
bl_dec=0.9,
grad_bound=1.0,
lr=0.1,
lr_dec=0.95,
start_decay_step=400,
optimizer_type="adam",
stop_updating_after_steps=1000,
name="hierarchical_controller",
keep_prob=1.0,
reward_function="sqrt",
seed=1234,
# distributed training params
num_children=1)
class HierarchicalController(Controller):
"""HierarchicalController class."""
def __init__(self, hparams, item, cluster, controller_id=0):
"""HierarchicalController class initializer.
Args:
hparams: All hyper-parameters.
item: The metagraph to place.
cluster: The cluster of hardware devices to optimize for.
controller_id: the id of the controller in a multi-controller setup.
"""
super(HierarchicalController, self).__init__(item, cluster)
self.ctrl_id = controller_id
self.hparams = hparams
if self.hparams.num_groups is None:
self.num_groups = min(256, 20 * self.num_devices)
else:
self.num_groups = self.hparams.num_groups
# creates self.op_embeddings and self.type_dict
self.create_op_embeddings(verbose=False)
# TODO(azalia) clean up embedding/group_embedding_size names
self.group_emb_size = (
2 * self.num_groups + len(self.type_dict) +
self.hparams.max_num_outputs * self.hparams.max_output_size)
self.embedding_size = self.group_emb_size
self.initializer = init_ops.glorot_uniform_initializer(
seed=self.hparams.seed)
with variable_scope.variable_scope(
self.hparams.name,
initializer=self.initializer,
reuse=variable_scope.AUTO_REUSE):
# define parameters of feedforward
variable_scope.get_variable("w_grouping_ff", [
1 + self.hparams.max_num_outputs * self.hparams.max_output_size +
self.hparams.adj_embed_dim, self.hparams.grouping_hidden_size
])
variable_scope.get_variable(
"w_grouping_softmax",
[self.hparams.grouping_hidden_size, self.num_groups])
if self.hparams.bi_lstm:
variable_scope.get_variable("encoder_lstm_forward", [
self.embedding_size + self.hparams.hidden_size / 2,
2 * self.hparams.hidden_size
])
variable_scope.get_variable("encoder_lstm_backward", [
self.embedding_size + self.hparams.hidden_size / 2,
2 * self.hparams.hidden_size
])
variable_scope.get_variable(
"device_embeddings", [self.num_devices, self.hparams.hidden_size])
variable_scope.get_variable(
"decoder_lstm",
[2 * self.hparams.hidden_size, 4 * self.hparams.hidden_size])
variable_scope.get_variable(
"device_softmax", [2 * self.hparams.hidden_size, self.num_devices])
variable_scope.get_variable("device_go_embedding",
[1, self.hparams.hidden_size])
variable_scope.get_variable(
"encoder_forget_bias",
shape=1,
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
self.hparams.forget_bias_init))
variable_scope.get_variable(
"decoder_forget_bias",
shape=1,
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
self.hparams.forget_bias_init))
variable_scope.get_variable(
"attn_w_1", [self.hparams.hidden_size, self.hparams.hidden_size])
variable_scope.get_variable(
"attn_w_2", [self.hparams.hidden_size, self.hparams.hidden_size])
variable_scope.get_variable("attn_v", [self.hparams.hidden_size, 1])
else:
variable_scope.get_variable("encoder_lstm", [
self.embedding_size + self.hparams.hidden_size,
4 * self.hparams.hidden_size
])
variable_scope.get_variable(
"device_embeddings", [self.num_devices, self.hparams.hidden_size])
variable_scope.get_variable(
"decoder_lstm",
[2 * self.hparams.hidden_size, 4 * self.hparams.hidden_size])
variable_scope.get_variable(
"device_softmax", [2 * self.hparams.hidden_size, self.num_devices])
variable_scope.get_variable("device_go_embedding",
[1, self.hparams.hidden_size])
variable_scope.get_variable(
"encoder_forget_bias",
shape=1,
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
self.hparams.forget_bias_init))
variable_scope.get_variable(
"decoder_forget_bias",
shape=1,
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
self.hparams.forget_bias_init))
variable_scope.get_variable(
"attn_w_1", [self.hparams.hidden_size, self.hparams.hidden_size])
variable_scope.get_variable(
"attn_w_2", [self.hparams.hidden_size, self.hparams.hidden_size])
variable_scope.get_variable("attn_v", [self.hparams.hidden_size, 1])
seq2seq_input_layer = array_ops.placeholder_with_default(
array_ops.zeros([self.hparams.num_children,
self.num_groups,
self.group_emb_size],
dtypes.float32),
shape=(self.hparams.num_children, self.num_groups, self.group_emb_size))
self.seq2seq_input_layer = seq2seq_input_layer
def compute_reward(self, run_time):
if self.hparams.reward_function == "id":
reward = run_time
elif self.hparams.reward_function == "sqrt":
reward = math.sqrt(run_time)
elif self.hparams.reward_function == "log":
reward = math.log1p(run_time)
else:
raise NotImplementedError(
"Unrecognized reward function '%s', consider your "
"--reward_function flag value." % self.hparams.reward_function)
return reward
def build_controller(self):
"""RL optimization interface.
Returns:
ops: A dictionary holding handles of the model used for training.
"""
self._global_step = training_util.get_or_create_global_step()
ops = {}
ops["loss"] = 0
failing_signal = self.compute_reward(self.hparams.failing_signal)
ctr = {}
with tf_ops.name_scope("controller_{}".format(self.ctrl_id)):
with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)):
ctr["reward"] = {"value": [], "ph": [], "update": []}
ctr["ready"] = {"value": [], "ph": [], "update": []}
ctr["best_reward"] = {"value": [], "update": []}
for i in range(self.hparams.num_children):
reward_value = variable_scope.get_local_variable(
"reward_{}".format(i),
initializer=0.0,
dtype=dtypes.float32,
trainable=False)
reward_ph = array_ops.placeholder(
dtypes.float32, shape=(), name="reward_ph_{}".format(i))
reward_update = state_ops.assign(
reward_value, reward_ph, use_locking=True)
ctr["reward"]["value"].append(reward_value)
ctr["reward"]["ph"].append(reward_ph)
ctr["reward"]["update"].append(reward_update)
best_reward = variable_scope.get_local_variable(
"best_reward_{}".format(i),
initializer=failing_signal,
dtype=dtypes.float32,
trainable=False)
ctr["best_reward"]["value"].append(best_reward)
ctr["best_reward"]["update"].append(
state_ops.assign(best_reward,
math_ops.minimum(best_reward, reward_update)))
ready_value = variable_scope.get_local_variable(
"ready_{}".format(i),
initializer=True,
dtype=dtypes.bool,
trainable=False)
ready_ph = array_ops.placeholder(
dtypes.bool, shape=(), name="ready_ph_{}".format(i))
ready_update = state_ops.assign(
ready_value, ready_ph, use_locking=True)
ctr["ready"]["value"].append(ready_value)
ctr["ready"]["ph"].append(ready_ph)
ctr["ready"]["update"].append(ready_update)
ctr["grouping_y_preds"], ctr["grouping_log_probs"] = self.get_groupings()
summary.histogram(
"grouping_actions",
array_ops.slice(ctr["grouping_y_preds"]["sample"], [0, 0],
[1, array_ops.shape(self.op_embeddings)[0]]))
with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)):
ctr["baseline"] = variable_scope.get_local_variable(
"baseline",
initializer=failing_signal
if self.hparams.start_with_failing_signal else 0.0,
dtype=dtypes.float32,
trainable=False)
new_baseline = self.hparams.bl_dec * ctr["baseline"] + (
1 - self.hparams.bl_dec) * math_ops.reduce_mean(
ctr["reward"]["value"])
if not self.hparams.always_update_baseline:
baseline_mask = math_ops.less(ctr["reward"]["value"], failing_signal)
selected_reward = array_ops.boolean_mask(ctr["reward"]["value"],
baseline_mask)
selected_baseline = control_flow_ops.cond(
math_ops.reduce_any(baseline_mask),
lambda: math_ops.reduce_mean(selected_reward),
lambda: constant_op.constant(0, dtype=dtypes.float32))
ctr["pos_reward"] = selected_baseline
pos_ = math_ops.less(
constant_op.constant(0, dtype=dtypes.float32), selected_baseline)
selected_baseline = self.hparams.bl_dec * ctr["baseline"] + (
1 - self.hparams.bl_dec) * selected_baseline
selected_baseline = control_flow_ops.cond(
pos_, lambda: selected_baseline, lambda: ctr["baseline"])
new_baseline = control_flow_ops.cond(
math_ops.less(self.global_step,
self.hparams.stop_updating_after_steps),
lambda: new_baseline, lambda: selected_baseline)
ctr["baseline_update"] = state_ops.assign(
ctr["baseline"], new_baseline, use_locking=True)
ctr["y_preds"], ctr["log_probs"] = self.get_placements()
summary.histogram("actions", ctr["y_preds"]["sample"])
mask = math_ops.less(ctr["reward"]["value"], failing_signal)
ctr["loss"] = ctr["reward"]["value"] - ctr["baseline"]
ctr["loss"] *= (
ctr["log_probs"]["sample"] + ctr["grouping_log_probs"]["sample"])
selected_loss = array_ops.boolean_mask(ctr["loss"], mask)
selected_loss = control_flow_ops.cond(
math_ops.reduce_any(mask),
lambda: math_ops.reduce_mean(-selected_loss),
lambda: constant_op.constant(0, dtype=dtypes.float32))
ctr["loss"] = control_flow_ops.cond(
math_ops.less(self.global_step,
self.hparams.stop_updating_after_steps),
lambda: math_ops.reduce_mean(-ctr["loss"]), lambda: selected_loss)
ctr["reward_s"] = math_ops.reduce_mean(ctr["reward"]["value"])
summary.scalar("loss", ctr["loss"])
summary.scalar("avg_reward", ctr["reward_s"])
summary.scalar("best_reward_so_far", best_reward)
summary.scalar(
"advantage",
math_ops.reduce_mean(ctr["reward"]["value"] - ctr["baseline"]))
with variable_scope.variable_scope(
"optimizer", reuse=variable_scope.AUTO_REUSE):
(ctr["train_op"], ctr["lr"], ctr["grad_norm"],
ctr["grad_norms"]) = self._get_train_ops(
ctr["loss"],
tf_ops.get_collection(tf_ops.GraphKeys.TRAINABLE_VARIABLES),
self.global_step,
grad_bound=self.hparams.grad_bound,
lr_init=self.hparams.lr,
lr_dec=self.hparams.lr_dec,
start_decay_step=self.hparams.start_decay_step,
decay_steps=self.hparams.decay_steps,
optimizer_type=self.hparams.optimizer_type)
summary.scalar("gradnorm", ctr["grad_norm"])
summary.scalar("lr", ctr["lr"])
ctr["summary"] = summary.merge_all()
ops["controller"] = ctr
self.ops = ops
return ops
@property
def global_step(self):
return self._global_step
def create_op_embeddings(self, verbose=False):
if verbose:
print("process input graph for op embeddings")
self.num_ops = len(self.important_ops)
# topological sort of important nodes
topo_order = [op.name for op in self.important_ops]
# create index to name for topologicaly sorted important nodes
name_to_topo_order_index = {}
for idx, x in enumerate(topo_order):
name_to_topo_order_index[x] = idx
self.name_to_topo_order_index = name_to_topo_order_index
# create adj matrix
adj_dict = {}
for idx, op in enumerate(self.important_ops):
for output_op in self.get_node_fanout(op):
output_op_name = output_op.name
if output_op_name in self.important_op_names:
if name_to_topo_order_index[op.name] not in adj_dict:
adj_dict[name_to_topo_order_index[op.name]] = []
adj_dict[name_to_topo_order_index[op.name]].extend(
[name_to_topo_order_index[output_op_name], 1])
if output_op_name not in adj_dict:
adj_dict[name_to_topo_order_index[output_op_name]] = []
adj_dict[name_to_topo_order_index[output_op_name]].extend(
[name_to_topo_order_index[op.name], -1])
# get op_type op_output_shape, and adj info
output_embed_dim = (self.hparams.max_num_outputs *
self.hparams.max_output_size)
# TODO(bsteiner): don't filter based on used ops so that we can generalize
# to models that use other types of ops.
used_ops = set()
for node in self.important_ops:
op_type = str(node.op)
used_ops.add(op_type)
self.type_dict = {}
for op_type in self.cluster.ListAvailableOps():
if op_type in used_ops:
self.type_dict[op_type] = len(self.type_dict)
op_types = np.zeros([self.num_ops], dtype=np.int32)
op_output_shapes = np.full(
[self.num_ops, output_embed_dim], -1.0, dtype=np.float32)
for idx, node in enumerate(self.important_ops):
op_types[idx] = self.type_dict[node.op]
# output shape
op_name = node.name
for i, output_prop in enumerate(self.node_properties[op_name]):
if output_prop.shape.__str__() == "<unknown>":
continue
shape = output_prop.shape
for j, dim in enumerate(shape.dim):
if dim.size >= 0:
if i * self.hparams.max_output_size + j >= output_embed_dim:
break
op_output_shapes[idx,
i * self.hparams.max_output_size + j] = dim.size
# adj for padding
op_adj = np.full(
[self.num_ops, self.hparams.adj_embed_dim], 0, dtype=np.float32)
for idx in adj_dict:
neighbors = adj_dict[int(idx)]
min_dim = min(self.hparams.adj_embed_dim, len(neighbors))
padding_size = self.hparams.adj_embed_dim - min_dim
neighbors = neighbors[:min_dim] + [0] * padding_size
op_adj[int(idx)] = neighbors
# op_embedding starts here
op_embeddings = np.zeros(
[
self.num_ops,
1 + self.hparams.max_num_outputs * self.hparams.max_output_size +
self.hparams.adj_embed_dim
],
dtype=np.float32)
for idx, op_name in enumerate(topo_order):
op_embeddings[idx] = np.concatenate(
(np.array([op_types[idx]]), op_output_shapes[idx], op_adj[int(idx)]))
self.op_embeddings = constant_op.constant(
op_embeddings, dtype=dtypes.float32)
if verbose:
print("num_ops = {}".format(self.num_ops))
print("num_types = {}".format(len(self.type_dict)))
def get_groupings(self, *args, **kwargs):
num_children = self.hparams.num_children
with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)):
grouping_actions_cache = variable_scope.get_local_variable(
"grouping_actions_cache",
initializer=init_ops.zeros_initializer,
dtype=dtypes.int32,
shape=[num_children, self.num_ops],
trainable=False)
input_layer = self.op_embeddings
input_layer = array_ops.expand_dims(input_layer, 0)
feed_ff_input_layer = array_ops.tile(input_layer, [num_children, 1, 1])
grouping_actions, grouping_log_probs = {}, {}
grouping_actions["sample"], grouping_log_probs[
"sample"] = self.make_grouping_predictions(feed_ff_input_layer)
grouping_actions["sample"] = state_ops.assign(grouping_actions_cache,
grouping_actions["sample"])
self.grouping_actions_cache = grouping_actions_cache
return grouping_actions, grouping_log_probs
def make_grouping_predictions(self, input_layer, reuse=None):
"""model that predicts grouping (grouping_actions).
Args:
input_layer: group_input_layer
reuse: reuse
Returns:
grouping_actions: actions
grouping_log_probs: log probabilities corresponding to actions
"""
with variable_scope.variable_scope(self.hparams.name, reuse=True):
# input_layer: tensor of size [1, num_ops, hidden_size]
w_grouping_ff = variable_scope.get_variable("w_grouping_ff")
w_grouping_softmax = variable_scope.get_variable("w_grouping_softmax")
batch_size = array_ops.shape(input_layer)[0]
embedding_dim = array_ops.shape(input_layer)[2]
reshaped = array_ops.reshape(input_layer,
[batch_size * self.num_ops, embedding_dim])
ff_output = math_ops.matmul(reshaped, w_grouping_ff)
logits = math_ops.matmul(ff_output, w_grouping_softmax)
if self.hparams.logits_std_noise > 0:
num_in_logits = math_ops.cast(
array_ops.size(logits), dtype=dtypes.float32)
avg_norm = math_ops.divide(
linalg_ops.norm(logits), math_ops.sqrt(num_in_logits))
logits_noise = random_ops.random_normal(
array_ops.shape(logits),
stddev=self.hparams.logits_std_noise * avg_norm)
logits = control_flow_ops.cond(
self.global_step > self.hparams.stop_noise_step, lambda: logits,
lambda: logits + logits_noise)
logits = array_ops.reshape(logits,
[batch_size * self.num_ops, self.num_groups])
actions = random_ops.multinomial(logits, 1, seed=self.hparams.seed)
actions = math_ops.to_int32(actions)
actions = array_ops.reshape(actions, [batch_size, self.num_ops])
action_label = array_ops.reshape(actions, [-1])
log_probs = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=action_label)
log_probs = array_ops.reshape(log_probs, [batch_size, -1])
log_probs = math_ops.reduce_sum(log_probs, 1)
grouping_actions = actions
grouping_log_probs = log_probs
return grouping_actions, grouping_log_probs
def create_group_embeddings(self, grouping_actions, verbose=False):
"""Approximating the blocks of a TF graph from a graph_def.
Args:
grouping_actions: grouping predictions.
verbose: print stuffs.
Returns:
groups: list of groups.
"""
groups = [
self._create_group_embeddings(grouping_actions, i, verbose) for
i in range(self.hparams.num_children)
]
return np.stack(groups, axis=0)
def _create_group_embeddings(self, grouping_actions, child_id, verbose=False):
"""Approximating the blocks of a TF graph from a graph_def for each child.
Args:
grouping_actions: grouping predictions.
child_id: child_id for the group.
verbose: print stuffs.
Returns:
groups: group embedding for the child_id.
"""
if verbose:
print("Processing input_graph")
# TODO(azalia): Build inter-adjacencies dag matrix.
# record dag_matrix
dag_matrix = np.zeros([self.num_groups, self.num_groups], dtype=np.float32)
for op in self.important_ops:
topo_op_index = self.name_to_topo_order_index[op.name]
group_index = grouping_actions[child_id][topo_op_index]
for output_op in self.get_node_fanout(op):
if output_op.name not in self.important_op_names:
continue
output_group_index = (
grouping_actions[child_id][self.name_to_topo_order_index[
output_op.name]])
dag_matrix[group_index, output_group_index] += 1.0
num_connections = np.sum(dag_matrix)
num_intra_group_connections = dag_matrix.trace()
num_inter_group_connections = num_connections - num_intra_group_connections
if verbose:
print("grouping evaluation metric")
print(("num_connections={} num_intra_group_connections={} "
"num_inter_group_connections={}").format(
num_connections, num_intra_group_connections,
num_inter_group_connections))
self.dag_matrix = dag_matrix
# output_shape
op_output_shapes = np.zeros(
[
len(self.important_ops),
self.hparams.max_num_outputs * self.hparams.max_output_size
],
dtype=np.float32)
for idx, op in enumerate(self.important_ops):
for i, output_properties in enumerate(self.node_properties[op.name]):
if output_properties.shape.__str__() == "<unknown>":
continue
if i > self.hparams.max_num_outputs:
break
shape = output_properties.shape
for j, dim in enumerate(shape.dim):
if dim.size > 0:
k = i * self.hparams.max_output_size + j
if k >= self.hparams.max_num_outputs * self.hparams.max_output_size:
break
op_output_shapes[idx, k] = dim.size
# group_embedding
group_embedding = np.zeros(
[
self.num_groups, len(self.type_dict) +
self.hparams.max_num_outputs * self.hparams.max_output_size
],
dtype=np.float32)
for op_index, op in enumerate(self.important_ops):
group_index = grouping_actions[child_id][
self.name_to_topo_order_index[op.name]]
type_name = str(op.op)
type_index = self.type_dict[type_name]
group_embedding[group_index, type_index] += 1
group_embedding[group_index, :self.hparams.max_num_outputs * self.hparams.
max_output_size] += (
op_output_shapes[op_index])
grouping_adjacencies = np.concatenate(
[dag_matrix, np.transpose(dag_matrix)], axis=1)
group_embedding = np.concatenate(
[grouping_adjacencies, group_embedding], axis=1)
group_normalizer = np.amax(group_embedding, axis=1, keepdims=True)
group_embedding /= (group_normalizer + 1.0)
if verbose:
print("Finished Processing Input Graph")
return group_embedding
def get_placements(self, *args, **kwargs):
num_children = self.hparams.num_children
with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)):
actions_cache = variable_scope.get_local_variable(
"actions_cache",
initializer=init_ops.zeros_initializer,
dtype=dtypes.int32,
shape=[num_children, self.num_groups],
trainable=False)
x = self.seq2seq_input_layer
last_c, last_h, attn_mem = self.encode(x)
actions, log_probs = {}, {}
actions["sample"], log_probs["sample"] = (
self.decode(
x, last_c, last_h, attn_mem, mode="sample"))
actions["target"], log_probs["target"] = (
self.decode(
x,
last_c,
last_h,
attn_mem,
mode="target",
y=actions_cache))
actions["greedy"], log_probs["greedy"] = (
self.decode(
x, last_c, last_h, attn_mem, mode="greedy"))
actions["sample"] = control_flow_ops.cond(
self.global_step < self.hparams.stop_sampling,
lambda: state_ops.assign(actions_cache, actions["sample"]),
lambda: state_ops.assign(actions_cache, actions["target"]))
self.actions_cache = actions_cache
return actions, log_probs
def encode(self, x):
"""Encoder using LSTM.
Args:
x: tensor of size [num_children, num_groups, embedding_size]
Returns:
last_c, last_h: tensors of size [num_children, hidden_size], the final
LSTM states
attn_mem: tensor of size [num_children, num_groups, hidden_size], the
attention
memory, i.e. concatenation of all hidden states, linearly transformed by
an attention matrix attn_w_1
"""
if self.hparams.bi_lstm:
with variable_scope.variable_scope(self.hparams.name, reuse=True):
w_lstm_forward = variable_scope.get_variable("encoder_lstm_forward")
w_lstm_backward = variable_scope.get_variable("encoder_lstm_backward")
forget_bias = variable_scope.get_variable("encoder_forget_bias")
attn_w_1 = variable_scope.get_variable("attn_w_1")
else:
with variable_scope.variable_scope(self.hparams.name, reuse=True):
w_lstm = variable_scope.get_variable("encoder_lstm")
forget_bias = variable_scope.get_variable("encoder_forget_bias")
attn_w_1 = variable_scope.get_variable("attn_w_1")
embedding_size = array_ops.shape(x)[2]
signals = array_ops.split(x, self.num_groups, axis=1)
for i in range(len(signals)):
signals[i] = array_ops.reshape(
signals[i], [self.hparams.num_children, embedding_size])
if self.hparams.bi_lstm:
def body(i, prev_c_forward, prev_h_forward, prev_c_backward,
prev_h_backward):
"""while loop for LSTM."""
signal_forward = signals[i]
next_c_forward, next_h_forward = lstm(signal_forward, prev_c_forward,
prev_h_forward, w_lstm_forward,
forget_bias)
signal_backward = signals[self.num_groups - 1 - i]
next_c_backward, next_h_backward = lstm(
signal_backward, prev_c_backward, prev_h_backward, w_lstm_backward,
forget_bias)
next_h = array_ops.concat([next_h_forward, next_h_backward], axis=1)
all_h.append(next_h)
return (next_c_forward, next_h_forward, next_c_backward,
next_h_backward)
c_forward = array_ops.zeros(
[self.hparams.num_children, self.hparams.hidden_size / 2],
dtype=dtypes.float32)
h_forward = array_ops.zeros(
[self.hparams.num_children, self.hparams.hidden_size / 2],
dtype=dtypes.float32)
c_backward = array_ops.zeros(
[self.hparams.num_children, self.hparams.hidden_size / 2],
dtype=dtypes.float32)
h_backward = array_ops.zeros(
[self.hparams.num_children, self.hparams.hidden_size / 2],
dtype=dtypes.float32)
all_h = []
for i in range(0, self.num_groups):
c_forward, h_forward, c_backward, h_backward = body(
i, c_forward, h_forward, c_backward, h_backward)
last_c = array_ops.concat([c_forward, c_backward], axis=1)
last_h = array_ops.concat([h_forward, h_backward], axis=1)
attn_mem = array_ops.stack(all_h)
else:
def body(i, prev_c, prev_h):
signal = signals[i]
next_c, next_h = lstm(signal, prev_c, prev_h, w_lstm, forget_bias)
all_h.append(next_h)
return next_c, next_h
c = array_ops.zeros(
[self.hparams.num_children, self.hparams.hidden_size],
dtype=dtypes.float32)
h = array_ops.zeros(
[self.hparams.num_children, self.hparams.hidden_size],
dtype=dtypes.float32)
all_h = []
for i in range(0, self.num_groups):
c, h = body(i, c, h)
last_c = c
last_h = h
attn_mem = array_ops.stack(all_h)
attn_mem = array_ops.transpose(attn_mem, [1, 0, 2])
attn_mem = array_ops.reshape(
attn_mem,
[self.hparams.num_children * self.num_groups, self.hparams.hidden_size])
attn_mem = math_ops.matmul(attn_mem, attn_w_1)
attn_mem = array_ops.reshape(
attn_mem,
[self.hparams.num_children, self.num_groups, self.hparams.hidden_size])
return last_c, last_h, attn_mem
def decode(self,
x,
last_c,
last_h,
attn_mem,
mode="target",
y=None):
"""Decoder using LSTM.
Args:
x: tensor of size [num_children, num_groups, embedding_size].
last_c: tensor of size [num_children, hidden_size], the final LSTM states
computed by self.encoder.
last_h: same as last_c.
attn_mem: tensor of size [num_children, num_groups, hidden_size].
mode: "target" or "sample".
y: tensor of size [num_children, num_groups], the device placements.
Returns:
actions: tensor of size [num_children, num_groups], the placements of
devices
"""
with variable_scope.variable_scope(self.hparams.name, reuse=True):
w_lstm = variable_scope.get_variable("decoder_lstm")
forget_bias = variable_scope.get_variable("decoder_forget_bias")
device_embeddings = variable_scope.get_variable("device_embeddings")
device_softmax = variable_scope.get_variable("device_softmax")
device_go_embedding = variable_scope.get_variable("device_go_embedding")
attn_w_2 = variable_scope.get_variable("attn_w_2")
attn_v = variable_scope.get_variable("attn_v")
actions = tensor_array_ops.TensorArray(
dtypes.int32,
size=self.num_groups,
infer_shape=False,
clear_after_read=False)
# pylint: disable=unused-argument
def condition(i, *args):
return math_ops.less(i, self.num_groups)
# pylint: disable=missing-docstring
def body(i, prev_c, prev_h, actions, log_probs):
# pylint: disable=g-long-lambda
signal = control_flow_ops.cond(
math_ops.equal(i, 0),
lambda: array_ops.tile(device_go_embedding,
[self.hparams.num_children, 1]),
lambda: embedding_ops.embedding_lookup(device_embeddings,
actions.read(i - 1))
)
if self.hparams.keep_prob is not None:
signal = nn_ops.dropout(signal, self.hparams.keep_prob)
next_c, next_h = lstm(signal, prev_c, prev_h, w_lstm, forget_bias)
query = math_ops.matmul(next_h, attn_w_2)
query = array_ops.reshape(
query, [self.hparams.num_children, 1, self.hparams.hidden_size])
query = math_ops.tanh(query + attn_mem)
query = array_ops.reshape(query, [
self.hparams.num_children * self.num_groups, self.hparams.hidden_size
])
query = math_ops.matmul(query, attn_v)
query = array_ops.reshape(query,
[self.hparams.num_children, self.num_groups])
query = nn_ops.softmax(query)
query = array_ops.reshape(query,
[self.hparams.num_children, self.num_groups, 1])
query = math_ops.reduce_sum(attn_mem * query, axis=1)
query = array_ops.concat([next_h, query], axis=1)
logits = math_ops.matmul(query, device_softmax)
logits /= self.hparams.temperature
if self.hparams.tanh_constant > 0:
logits = math_ops.tanh(logits) * self.hparams.tanh_constant
if self.hparams.logits_std_noise > 0:
num_in_logits = math_ops.cast(
array_ops.size(logits), dtype=dtypes.float32)
avg_norm = math_ops.divide(
linalg_ops.norm(logits), math_ops.sqrt(num_in_logits))
logits_noise = random_ops.random_normal(
array_ops.shape(logits),
stddev=self.hparams.logits_std_noise * avg_norm)
logits = control_flow_ops.cond(
self.global_step > self.hparams.stop_noise_step, lambda: logits,
lambda: logits + logits_noise)
if mode == "sample":
next_y = random_ops.multinomial(logits, 1, seed=self.hparams.seed)
elif mode == "greedy":
next_y = math_ops.argmax(logits, 1)
elif mode == "target":
next_y = array_ops.slice(y, [0, i], [-1, 1])
else:
raise NotImplementedError
next_y = math_ops.to_int32(next_y)
next_y = array_ops.reshape(next_y, [self.hparams.num_children])
actions = actions.write(i, next_y)
log_probs += nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=next_y)
return i + 1, next_c, next_h, actions, log_probs
loop_vars = [
constant_op.constant(0, dtype=dtypes.int32), last_c, last_h, actions,
array_ops.zeros([self.hparams.num_children], dtype=dtypes.float32)
]
loop_outputs = control_flow_ops.while_loop(condition, body, loop_vars)
last_c = loop_outputs[-4]
last_h = loop_outputs[-3]
actions = loop_outputs[-2].stack()
actions = array_ops.transpose(actions, [1, 0])
log_probs = loop_outputs[-1]
return actions, log_probs
def eval_placement(self,
sess,
child_id=0,
verbose=False):
grouping_actions, actions = sess.run([
self.grouping_actions_cache,
self.actions_cache
])
grouping_actions = grouping_actions[child_id]
actions = actions[child_id]
if verbose:
global_step = sess.run(self.global_step)
if global_step % 100 == 0:
log_string = "op group assignments: "
for a in grouping_actions:
log_string += "{} ".format(a)
print(log_string[:-1])
log_string = "group device assignments: "
for a in actions:
log_string += "{} ".format(a)
print(log_string[:-1])
for op in self.important_ops:
topo_order_index = self.name_to_topo_order_index[op.name]
group_index = grouping_actions[topo_order_index]
op.device = self.devices[actions[group_index]].name
try:
_, run_time, _ = self.cluster.MeasureCosts(self.item)
except errors.ResourceExhaustedError:
run_time = self.hparams.failing_signal
return run_time
def update_reward(self,
sess,
run_time,
child_id=0,
verbose=False):
reward = self.compute_reward(run_time)
controller_ops = self.ops["controller"]
_, best_reward = sess.run(
[
controller_ops["reward"]["update"][child_id],
controller_ops["best_reward"]["update"][child_id]
],
feed_dict={
controller_ops["reward"]["ph"][child_id]: reward,
})
if verbose:
print(("run_time={:<.5f} reward={:<.5f} "
"best_reward={:<.5f}").format(run_time, reward, best_reward))
# Reward is a double, best_reward a float: allow for some slack in the
# comparison.
updated = abs(best_reward - reward) < 1e-6
return updated
def generate_grouping(self, sess):
controller_ops = self.ops["controller"]
grouping_actions = sess.run(controller_ops["grouping_y_preds"]["sample"])
return grouping_actions
def generate_placement(self, grouping, sess):
controller_ops = self.ops["controller"]
feed_seq2seq_input_dict = {}
feed_seq2seq_input_dict[self.seq2seq_input_layer] = grouping
sess.run(
controller_ops["y_preds"]["sample"], feed_dict=feed_seq2seq_input_dict)
def process_reward(self, sess):
controller_ops = self.ops["controller"]
run_ops = [
controller_ops["loss"], controller_ops["lr"],
controller_ops["grad_norm"], controller_ops["grad_norms"],
controller_ops["train_op"]
]
sess.run(run_ops)
sess.run(controller_ops["baseline_update"])
def _get_train_ops(self,
loss,
tf_variables,
global_step,
grad_bound=1.25,
lr_init=1e-3,
lr_dec=0.9,
start_decay_step=10000,
decay_steps=100,
optimizer_type="adam"):
"""Loss optimizer.
Args:
loss: scalar tf tensor
tf_variables: list of training variables, typically
tf.trainable_variables()
global_step: global_step
grad_bound: max gradient norm
lr_init: initial learning rate
lr_dec: leaning rate decay coefficient
start_decay_step: start decaying learning rate after this many steps
decay_steps: apply decay rate factor at this step intervals
optimizer_type: optimizer type should be either adam or sgd
Returns:
train_op: training op
learning_rate: scalar learning rate tensor
grad_norm: l2 norm of the gradient vector
all_grad_norms: l2 norm of each component
"""
lr_gstep = global_step - start_decay_step
def f1():
return constant_op.constant(lr_init)
def f2():
return learning_rate_decay.exponential_decay(lr_init, lr_gstep,
decay_steps, lr_dec, True)
learning_rate = control_flow_ops.cond(
math_ops.less(global_step, start_decay_step),
f1,
f2,
name="learning_rate")
if optimizer_type == "adam":
opt = adam.AdamOptimizer(learning_rate)
elif optimizer_type == "sgd":
opt = gradient_descent.GradientDescentOptimizer(learning_rate)
grads_and_vars = opt.compute_gradients(loss, tf_variables)
grad_norm = clip_ops.global_norm([g for g, v in grads_and_vars])
all_grad_norms = {}
clipped_grads = []
clipped_rate = math_ops.maximum(grad_norm / grad_bound, 1.0)
for g, v in grads_and_vars:
if g is not None:
if isinstance(g, tf_ops.IndexedSlices):
clipped = g.values / clipped_rate
norm_square = math_ops.reduce_sum(clipped * clipped)
clipped = tf_ops.IndexedSlices(clipped, g.indices)
else:
clipped = g / clipped_rate
norm_square = math_ops.reduce_sum(clipped * clipped)
all_grad_norms[v.name] = math_ops.sqrt(norm_square)
clipped_grads.append((clipped, v))
train_op = opt.apply_gradients(clipped_grads, global_step)
return train_op, learning_rate, grad_norm, all_grad_norms
def lstm(x, prev_c, prev_h, w_lstm, forget_bias):
"""LSTM cell.
Args:
x: tensors of size [num_children, hidden_size].
prev_c: tensors of size [num_children, hidden_size].
prev_h: same as prev_c.
w_lstm: .
forget_bias: .
Returns:
next_c:
next_h:
"""
ifog = math_ops.matmul(array_ops.concat([x, prev_h], axis=1), w_lstm)
i, f, o, g = array_ops.split(ifog, 4, axis=1)
i = math_ops.sigmoid(i)
f = math_ops.sigmoid(f + forget_bias)
o = math_ops.sigmoid(o)
g = math_ops.tanh(g)
next_c = i * g + f * prev_c
next_h = o * math_ops.tanh(next_c)
return next_c, next_h
|
nburn42/tensorflow
|
tensorflow/python/grappler/hierarchical_controller.py
|
Python
|
apache-2.0
| 43,598 | 0.005069 |
# -*- coding: utf-8 -*-
import json
import os
import random
import requests
import re
import subprocess
import string
from django import forms
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.views.decorators.csrf import csrf_exempt
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from constance import config
from register.models import Registration, Batch, colleges
new_message = u"""نشكرك على التسجيل في السحابة الطبية
اسم المستخدم: %s
كلمة السر: %s
رابط السحابة: https://ksauhs-med.com/
آملين أن تجد فيها ما يفيد!
"""
forgotten_message = u"""هذه معلوماتك الجديدة للدخول إلى السحابة الطبية:
اسم المستخدم: %s
كلمة السر: %s
رابط السحابة: https://ksauhs-med.com/
آملين أن تجد فيها ما يفيد!
"""
class RegistrationForm(forms.ModelForm):
college = forms.CharField(label=u'الكلية',
max_length=1,
widget=forms.Select(choices=colleges))
number = forms.IntegerField(label=u"الدفعة", widget=forms.Select(choices=[(i, i) for i in range(1, 17)]))
def clean(self):
cleaned_data = super(RegistrationForm, self).clean()
batch_msg = u"الدفعة التي اخترت غير موجودة."
if 'college' in cleaned_data and 'number' in cleaned_data:
try:
Batch.objects.get(
college=cleaned_data['college'],
number=int(cleaned_data['number']))
except Batch.DoesNotExist:
self._errors['college'] = self.error_class([batch_msg])
self._errors['number'] = self.error_class([batch_msg])
del cleaned_data['college']
del cleaned_data['number']
return cleaned_data
def save(self):
new_registration = super(RegistrationForm, self).save()
batch = Batch.objects.get(
college=self.cleaned_data['college'],
number=int(self.cleaned_data['number']),
)
new_registration.group = batch
new_registration.save()
return new_registration
class Meta:
model = Registration
fields = ['email', 'college', 'number', 'unisersity_id']
widgets = {
'university_id': forms.TextInput(),
}
class ResetPasswordForm(forms.Form):
email = forms.EmailField(label=u'بريدك الجامعي', max_length=100)
@csrf_exempt
def register(request):
if request.method == 'POST':
password = generate_password()
initial_registration = Registration(password=password)
form = RegistrationForm(request.POST,
instance=initial_registration)
if form.is_valid():
email = form.cleaned_data['email']
if not email.endswith('ksau-hs.edu.sa'):
context = {'form': form, 'error_message': 'university_email'}
elif Registration.objects.filter(email__iexact=email, is_successful=True):
context = {'form': form, 'error_message': u'already_registered'}
else:
user = email.split('@')[0].lower()
registration = form.save()
group = str(registration.group)
if createuser(user, password, group):
registration.is_successful = True
registration.save()
send_mail(u'حسابك على السحابة الطبية', new_message %
(user, password), 'info@ksauhs-med.com',
[email], fail_silently=False)
return HttpResponseRedirect(reverse('register:thanks'))
else:
context = {'form': form, 'error_message': 'unknown'}
else:
context = {'form': form}
else:
form = RegistrationForm()
context = {'form': form}
return render(request, 'register/register.html', context)
@csrf_exempt
def forgotten(request):
if request.method == 'POST':
form = ResetPasswordForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
if not email.endswith('ksau-hs.edu.sa'):
context = {'form': form, 'error_message': 'university_email'}
else:
try:
previous_registration = Registration.objects.get(email__iexact=email,
is_successful=True)
except ObjectDoesNotExist:
previous_registration = None
context = {'form': form, 'error_message': 'not_registered'}
if previous_registration:
new_password = generate_password()
user = previous_registration.email.split('@')[0]
if reset_password(user, new_password):
previous_registration.password = new_password
previous_registration.forgotten_password = True
previous_registration.save()
send_mail(u'حسابك على السحابة الطبية', forgotten_message %
(user, new_password), 'info@ksauhs-med.com',
[email], fail_silently=False)
return HttpResponseRedirect(reverse('register:thanks'))
else:
context = {'form': form, 'error_message': 'unknown'}
else:
context = {'form': form}
else:
form = ResetPasswordForm()
context = {'form': form}
return render(request, 'register/reset.html', context)
def generate_password():
return ''.join(random.choice(string.ascii_uppercase) for i in range(6))
def login():
homepage_url = "https://www.ksauhs-med.com"
homepage = requests.get(homepage_url)
oc1d6beae686 = homepage.cookies['oc1d6beae686']
cookies = {'oc1d6beae686': oc1d6beae686}
login_requesttoken_regex = re.compile('data-requesttoken="(.+?)"', re.U)
login_requesttoken = re.findall(login_requesttoken_regex, homepage.content)[0]
login_data = {'user': config.OWNCLOUD_ADMIN_USERNAME,
'password': config.OWNCLOUD_ADMIN_PASSWORD,
'requesttoken': login_requesttoken,
'remember_login': '1',
'timezone-offset': 'Asia/Baghdad',
}
login_page = requests.post(homepage_url, data=login_data, cookies=cookies)
login_cookies = login_page.history[0].cookies
cookies = {#'oc_username': login_cookies['oc_username'],
#'oc_token': login_cookies['oc_token'],
#'oc_remember_login': login_cookies['oc_remember_login'],
'oc1d6beae686': login_cookies['oc1d6beae686'],
}
return cookies
def createuser(user, password, group):
os.environ['OC_PASS'] = password
command = "/usr/local/bin/php70 /home/medcloud/webapps/ownphp70/occ user:add {} --password-from-env -g {} -n".format(user, group)
output = subprocess.call(command, shell=True)
if output == 0:
return True
else:
return False
# createuser_url = "https://www.ksauhs-med.com/index.php/settings/users/users"
# user_url = "https://www.ksauhs-med.com/index.php/settings/users"
# login_cookies = login()
# user_page = requests.post(user_url, cookies=login_cookies)
# regex = re.findall("data-requesttoken=\"([^\"]+)\"", user_page.text)
# requesttoken = regex[0]
# user_data = {'username': user,
# 'password': password,
# 'groups[]': group}
# headers = {'requesttoken': requesttoken}
# createuser_page = requests.post(createuser_url, data=user_data, cookies=login_cookies, headers=headers)
# json_object = json.loads(createuser_page.text)
# if createuser_page.status_code == 201:
# return True
# else:
# print json_object # REMOVE
def reset_password(user, password):
os.environ['OC_PASS'] = password
command = "/usr/local/bin/php70 /home/medcloud/webapps/ownphp70/occ user:resetpassword {} --password-from-env -n".format(user)
output = subprocess.call(command, shell=True)
if output == 0:
return True
else:
return False
# changepassword_url = "https://www.ksauhs-med.com/index.php/settings/users/changepassword"
# user_url = "https://www.ksauhs-med.com/index.php/settings/users"
# login_cookies = login()
# user_page = requests.post(user_url, cookies=login_cookies)
# regex = re.findall("data-requesttoken=\"([^\"]+)\"", user_page.text)
# requesttoken = regex[0]
# user_data = {'username': user,
# 'password': password}
# headers = {'requesttoken': requesttoken,
# 'X-Requested-With': 'XMLHttpRequest',
# 'Referer': user_url}
# changepassword_page = requests.post(changepassword_url,
# data=user_data,
# cookies=login_cookies,
# headers=headers)
# try:
# json_object = json.loads(changepassword_page.text)
# except ValueError:
# json_object = {}
# if json_object.get('status') == 'success':
# return True
|
osamak/medcloud-registration
|
register/temp/views.py
|
Python
|
agpl-3.0
| 9,644 | 0.002771 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils import timezone
from django.db import models, migrations
def fill_tables(apps, schema_editor):
eventsforbusv2 = apps.get_model('AndroidRequests', 'EventForBusv2')
eventsforbusstop = apps.get_model('AndroidRequests', 'EventForBusStop')
hhperiods = apps.get_model('AndroidRequests', 'HalfHourPeriod')
for ev in eventsforbusv2.objects.all():
creationTime = timezone.localtime(ev.timeCreation).time().replace(microsecond=0)
hhperiod = hhperiods.objects.get(initial_time__lte = creationTime , end_time__gte = creationTime)
ev.halfHourPeriod = hhperiod
ev.save()
for ev in eventsforbusstop.objects.all():
creationTime = timezone.localtime(ev.timeCreation).time().replace(microsecond=0)
hhperiod = hhperiods.objects.get(initial_time__lte = creationTime , end_time__gte = creationTime)
ev.halfHourPeriod = hhperiod
ev.save()
class Migration(migrations.Migration):
dependencies = [
('AndroidRequests', '0903_transformation_halfhourperiod'),
]
operations = [
migrations.AddField(
model_name='eventforbusv2',
name='halfHourPeriod',
field=models.ForeignKey(verbose_name=b'Half Hour Period', to='AndroidRequests.HalfHourPeriod', null=True),
preserve_default=False,
),
migrations.AddField(
model_name='eventforbusstop',
name='halfHourPeriod',
field=models.ForeignKey(verbose_name=b'Half Hour Period', to='AndroidRequests.HalfHourPeriod', null=True),
preserve_default=False,
),
migrations.RunPython(fill_tables, reverse_code=migrations.RunPython.noop),
migrations.AlterField(
model_name='eventforbusv2',
name='halfHourPeriod',
field=models.ForeignKey(verbose_name=b'Half Hour Period', to='AndroidRequests.HalfHourPeriod', null=False),
),
migrations.AlterField(
model_name='eventforbusstop',
name='halfHourPeriod',
field=models.ForeignKey(verbose_name=b'Half Hour Period', to='AndroidRequests.HalfHourPeriod', null=False),
),
]
|
InspectorIncognito/visualization
|
AndroidRequests/migrations/0904_transformation_eventforbusv2_half_hour_period.py
|
Python
|
gpl-3.0
| 2,254 | 0.00976 |
import exceptions
# throws by anything which doesn't like what was passed to it
class DataError(exceptions.StandardError):
pass
# thrown by MojoMessage
class MojoMessageError(DataError):
pass
# thrown by DataTypes
class BadFormatError(DataError):
pass
# throws by things which do block reassembly
class ReassemblyError(IOError):
pass
|
zooko/egtp
|
common/MojoErrors.py
|
Python
|
agpl-3.0
| 355 | 0.014085 |
from setuptools import setup
setup(
name = "zml",
packages = ["zml"],
version = "0.8.1",
description = "zero markup language",
author = "Christof Hagedorn",
author_email = "team@zml.org",
url = "http://www.zml.org/",
download_url = "https://pypi.python.org/pypi/zml",
keywords = ["zml", "zero", "markup", "language", "template", "templating"],
install_requires = ['pyparsing', 'html5lib', 'pyyaml', 'asteval' ],
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Software Development :: Libraries :: Python Modules",
],
long_description = """\
zml - zero markup language
-------------------------------------
Features
- zero markup templates
- clean syntax
- extensible
- components
- namespaces
- lean code
This version requires Python 3 or later.
"""
)
|
babadoo/zml
|
setup.py
|
Python
|
bsd-3-clause
| 1,343 | 0.018615 |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import inspect
import json
from dataclasses import dataclass
from enum import Enum
from typing import Any, Callable, Dict, Generic, List, Optional, Tuple, Type, cast, get_type_hints
from pants.base import deprecated
from pants.engine.goal import GoalSubsystem
from pants.engine.target import (
AsyncFieldMixin,
BoolField,
DictStringToStringField,
DictStringToStringSequenceField,
Field,
FloatField,
IntField,
RegisteredTargetTypes,
ScalarField,
SequenceField,
StringField,
StringSequenceField,
Target,
)
from pants.engine.unions import UnionMembership
from pants.option.option_util import is_dict_option, is_list_option
from pants.option.options import Options
from pants.option.parser import OptionValueHistory, Parser
from pants.util.objects import get_docstring, get_docstring_summary, pretty_print_type_hint
from pants.util.strutil import first_paragraph
class HelpJSONEncoder(json.JSONEncoder):
"""Class for JSON-encoding help data (including option values).
Note that JSON-encoded data is not intended to be decoded back. It exists purely for terminal
and browser help display.
"""
def default(self, o):
if callable(o):
return o.__name__
if isinstance(o, type):
return type.__name__
if isinstance(o, Enum):
return o.value
return super().default(o)
def to_help_str(val) -> str:
if isinstance(val, (list, dict)):
return json.dumps(val, sort_keys=True, indent=2, cls=HelpJSONEncoder)
if isinstance(val, Enum):
return str(val.value)
else:
return str(val)
@dataclass(frozen=True)
class OptionHelpInfo:
"""A container for help information for a single option.
display_args: Arg strings suitable for display in help text, including value examples
(e.g., [-f, --[no]-foo-bar, --baz=<metavar>].)
comma_separated_display_args: Display args as a comma-delimited string, used in
reference documentation.
scoped_cmd_line_args: The explicitly scoped raw flag names allowed anywhere on the cmd line,
(e.g., [--scope-baz, --no-scope-baz, --scope-qux])
unscoped_cmd_line_args: The unscoped raw flag names allowed on the cmd line in this option's
scope context (e.g., [--baz, --no-baz, --qux])
env_var: The environment variable that set's the option.
config_key: The config key for this option (in the section named for its scope).
typ: The type of the option.
default: The value of this option if no flags are specified (derived from config and env vars).
help: The help message registered for this option.
deprecated_message: If deprecated: A message explaining that this option is deprecated at
removal_version.
removal_version: If deprecated: The version at which this option is to be removed.
removal_hint: If deprecated: The removal hint message registered for this option.
choices: If this option has a constrained set of choices, a tuple of the stringified choices.
"""
display_args: Tuple[str, ...]
comma_separated_display_args: str
scoped_cmd_line_args: Tuple[str, ...]
unscoped_cmd_line_args: Tuple[str, ...]
env_var: str
config_key: str
typ: Type
default: Any
help: str
deprecation_active: bool
deprecated_message: Optional[str]
removal_version: Optional[str]
removal_hint: Optional[str]
choices: Optional[Tuple[str, ...]]
comma_separated_choices: Optional[str]
value_history: Optional[OptionValueHistory]
@dataclass(frozen=True)
class OptionScopeHelpInfo:
"""A container for help information for a scope of options.
scope: The scope of the described options.
basic|advanced|deprecated: A list of OptionHelpInfo for the options in that group.
"""
scope: str
description: str
is_goal: bool # True iff the scope belongs to a GoalSubsystem.
basic: Tuple[OptionHelpInfo, ...]
advanced: Tuple[OptionHelpInfo, ...]
deprecated: Tuple[OptionHelpInfo, ...]
def collect_unscoped_flags(self) -> List[str]:
flags: List[str] = []
for options in (self.basic, self.advanced, self.deprecated):
for ohi in options:
flags.extend(ohi.unscoped_cmd_line_args)
return flags
def collect_scoped_flags(self) -> List[str]:
flags: List[str] = []
for options in (self.basic, self.advanced, self.deprecated):
for ohi in options:
flags.extend(ohi.scoped_cmd_line_args)
return flags
@dataclass(frozen=True)
class GoalHelpInfo:
"""A container for help information for a goal."""
name: str
description: str
is_implemented: bool # True iff all unions required by the goal are implemented.
consumed_scopes: Tuple[str, ...] # The scopes of subsystems consumed by this goal.
@dataclass(frozen=True)
class TargetFieldHelpInfo:
"""A container for help information for a field in a target type."""
alias: str
description: Optional[str]
type_hint: str
required: bool
default: Optional[str]
@classmethod
def create(cls, field: Type[Field]) -> TargetFieldHelpInfo:
description: Optional[str]
if hasattr(field, "help"):
description = field.help
else:
# NB: It is very common (and encouraged) to subclass Fields to give custom behavior, e.g.
# `PythonSources` subclassing `Sources`. Here, we set `fallback_to_ancestors=True` so that
# we can still generate meaningful documentation for all these custom fields without
# requiring the Field author to rewrite the docstring.
#
# However, if the original plugin author did not define docstring, then this means we
# would typically fall back to the docstring for `Field` or a template like `StringField`.
# This is a an awkward edge of our heuristic and it's not intentional since these core
# `Field` types have documentation oriented to the plugin author and not the end user
# filling in fields in a BUILD file.
description = get_docstring(
field,
flatten=True,
fallback_to_ancestors=True,
ignored_ancestors={
*Field.mro(),
AsyncFieldMixin,
BoolField,
DictStringToStringField,
DictStringToStringSequenceField,
FloatField,
Generic, # type: ignore[arg-type]
IntField,
ScalarField,
SequenceField,
StringField,
StringSequenceField,
},
)
raw_value_type = get_type_hints(field.compute_value)["raw_value"]
type_hint = pretty_print_type_hint(raw_value_type)
# Check if the field only allows for certain choices.
if issubclass(field, StringField) and field.valid_choices is not None:
valid_choices = sorted(
field.valid_choices
if isinstance(field.valid_choices, tuple)
else (choice.value for choice in field.valid_choices)
)
type_hint = " | ".join([*(repr(c) for c in valid_choices), "None"])
if field.required:
# We hackily remove `None` as a valid option for the field when it's required. This
# greatly simplifies Field definitions because it means that they don't need to
# override the type hints for `PrimitiveField.compute_value()` and
# `AsyncField.sanitize_raw_value()` to indicate that `None` is an invalid type.
type_hint = type_hint.replace(" | None", "")
return cls(
alias=field.alias,
description=description,
type_hint=type_hint,
required=field.required,
default=(
repr(field.default) if (not field.required and field.default is not None) else None
),
)
@dataclass(frozen=True)
class TargetTypeHelpInfo:
"""A container for help information for a target type."""
alias: str
summary: Optional[str]
description: Optional[str]
fields: Tuple[TargetFieldHelpInfo, ...]
@classmethod
def create(
cls, target_type: Type[Target], *, union_membership: UnionMembership
) -> TargetTypeHelpInfo:
description: Optional[str]
summary: Optional[str]
if hasattr(target_type, "help"):
description = target_type.help
summary = first_paragraph(description)
else:
description = get_docstring(target_type)
summary = get_docstring_summary(target_type)
return cls(
alias=target_type.alias,
summary=summary,
description=description,
fields=tuple(
TargetFieldHelpInfo.create(field)
for field in target_type.class_field_types(union_membership=union_membership)
if not field.alias.startswith("_") and field.deprecated_removal_version is None
),
)
@dataclass(frozen=True)
class AllHelpInfo:
"""All available help info."""
scope_to_help_info: Dict[str, OptionScopeHelpInfo]
name_to_goal_info: Dict[str, GoalHelpInfo]
name_to_target_type_info: Dict[str, TargetTypeHelpInfo]
ConsumedScopesMapper = Callable[[str], Tuple[str, ...]]
class HelpInfoExtracter:
"""Extracts information useful for displaying help from option registration args."""
@classmethod
def get_all_help_info(
cls,
options: Options,
union_membership: UnionMembership,
consumed_scopes_mapper: ConsumedScopesMapper,
registered_target_types: RegisteredTargetTypes,
) -> AllHelpInfo:
scope_to_help_info = {}
name_to_goal_info = {}
for scope_info in sorted(options.known_scope_to_info.values(), key=lambda x: x.scope):
options.for_scope(scope_info.scope) # Force parsing.
optionable_cls = scope_info.optionable_cls
if not scope_info.description:
cls_name = (
f"{optionable_cls.__module__}.{optionable_cls.__qualname__}"
if optionable_cls
else ""
)
raise ValueError(
f"Subsystem {cls_name} with scope `{scope_info.scope}` has no description. "
f"Add a class property `help`."
)
is_goal = optionable_cls is not None and issubclass(optionable_cls, GoalSubsystem)
oshi = HelpInfoExtracter(scope_info.scope).get_option_scope_help_info(
scope_info.description, options.get_parser(scope_info.scope), is_goal
)
scope_to_help_info[oshi.scope] = oshi
if is_goal:
goal_subsystem_cls = cast(Type[GoalSubsystem], optionable_cls)
is_implemented = union_membership.has_members_for_all(
goal_subsystem_cls.required_union_implementations
)
name_to_goal_info[scope_info.scope] = GoalHelpInfo(
goal_subsystem_cls.name,
scope_info.description,
is_implemented,
consumed_scopes_mapper(scope_info.scope),
)
name_to_target_type_info = {
alias: TargetTypeHelpInfo.create(target_type, union_membership=union_membership)
for alias, target_type in registered_target_types.aliases_to_types.items()
if not alias.startswith("_") and target_type.deprecated_removal_version is None
}
return AllHelpInfo(
scope_to_help_info=scope_to_help_info,
name_to_goal_info=name_to_goal_info,
name_to_target_type_info=name_to_target_type_info,
)
@staticmethod
def compute_default(**kwargs) -> Any:
"""Compute the default val for help display for an option registered with these kwargs.
Returns a pair (default, stringified default suitable for display).
"""
ranked_default = kwargs.get("default")
fallback: Any = None
if is_list_option(kwargs):
fallback = []
elif is_dict_option(kwargs):
fallback = {}
default = (
ranked_default.value
if ranked_default and ranked_default.value is not None
else fallback
)
return default
@staticmethod
def stringify_type(t: Type) -> str:
if t == dict:
return "{'key1': val1, 'key2': val2, ...}"
return f"<{t.__name__}>"
@staticmethod
def compute_metavar(kwargs):
"""Compute the metavar to display in help for an option registered with these kwargs."""
stringify = lambda t: HelpInfoExtracter.stringify_type(t)
metavar = kwargs.get("metavar")
if not metavar:
if is_list_option(kwargs):
member_typ = kwargs.get("member_type", str)
metavar = stringify(member_typ)
# In a cmd-line list literal, string members must be quoted.
if member_typ == str:
metavar = f"'{metavar}'"
elif is_dict_option(kwargs):
metavar = f'"{stringify(dict)}"'
else:
metavar = stringify(kwargs.get("type", str))
if is_list_option(kwargs):
# For lists, the metavar (either explicit or deduced) is the representation
# of a single list member, so we turn the help string into a list of those here.
return f'"[{metavar}, {metavar}, ...]"'
return metavar
@staticmethod
def compute_choices(kwargs) -> Optional[Tuple[str, ...]]:
"""Compute the option choices to display."""
typ = kwargs.get("type", [])
member_type = kwargs.get("member_type", str)
if typ == list and inspect.isclass(member_type) and issubclass(member_type, Enum):
return tuple(choice.value for choice in member_type)
elif inspect.isclass(typ) and issubclass(typ, Enum):
return tuple(choice.value for choice in typ)
elif "choices" in kwargs:
return tuple(str(choice) for choice in kwargs["choices"])
else:
return None
def __init__(self, scope: str):
self._scope = scope
self._scope_prefix = scope.replace(".", "-")
def get_option_scope_help_info(self, description: str, parser: Parser, is_goal: bool):
"""Returns an OptionScopeHelpInfo for the options parsed by the given parser."""
basic_options = []
advanced_options = []
deprecated_options = []
for args, kwargs in parser.option_registrations_iter():
history = parser.history(kwargs["dest"])
ohi = self.get_option_help_info(args, kwargs)
ohi = dataclasses.replace(ohi, value_history=history)
if ohi.deprecation_active:
deprecated_options.append(ohi)
elif kwargs.get("advanced") or (
kwargs.get("recursive") and not kwargs.get("recursive_root")
):
# In order to keep the regular help output uncluttered, we treat recursive
# options as advanced. The concept of recursive options is not widely used
# and not clear to the end user, so it's best not to expose it as a concept.
advanced_options.append(ohi)
else:
basic_options.append(ohi)
return OptionScopeHelpInfo(
scope=self._scope,
description=description,
is_goal=is_goal,
basic=tuple(basic_options),
advanced=tuple(advanced_options),
deprecated=tuple(deprecated_options),
)
def get_option_help_info(self, args, kwargs):
"""Returns an OptionHelpInfo for the option registered with the given (args, kwargs)."""
display_args = []
scoped_cmd_line_args = []
unscoped_cmd_line_args = []
for arg in args:
is_short_arg = len(arg) == 2
unscoped_cmd_line_args.append(arg)
if self._scope_prefix:
scoped_arg = f"--{self._scope_prefix}-{arg.lstrip('-')}"
else:
scoped_arg = arg
scoped_cmd_line_args.append(scoped_arg)
if kwargs.get("type") == bool:
if is_short_arg:
display_args.append(scoped_arg)
else:
unscoped_cmd_line_args.append(f"--no-{arg[2:]}")
sa_2 = scoped_arg[2:]
scoped_cmd_line_args.append(f"--no-{sa_2}")
display_args.append(f"--[no-]{sa_2}")
else:
metavar = self.compute_metavar(kwargs)
display_args.append(f"{scoped_arg}={metavar}")
if kwargs.get("passthrough"):
type_str = self.stringify_type(kwargs.get("member_type", str))
display_args.append(f"... -- [{type_str} [{type_str} [...]]]")
typ = kwargs.get("type", str)
default = self.compute_default(**kwargs)
help_msg = kwargs.get("help", "No help available.")
deprecation_start_version = kwargs.get("deprecation_start_version")
removal_version = kwargs.get("removal_version")
deprecation_active = removal_version is not None and deprecated.is_deprecation_active(
deprecation_start_version
)
deprecated_message = None
if removal_version:
deprecated_tense = deprecated.get_deprecated_tense(removal_version)
message_start = (
"Deprecated"
if deprecation_active
else f"Upcoming deprecation in version: {deprecation_start_version}"
)
deprecated_message = (
f"{message_start}, {deprecated_tense} removed in version: {removal_version}."
)
removal_hint = kwargs.get("removal_hint")
choices = self.compute_choices(kwargs)
dest = Parser.parse_dest(*args, **kwargs)
# Global options have three env var variants. The last one is the most human-friendly.
env_var = Parser.get_env_var_names(self._scope, dest)[-1]
ret = OptionHelpInfo(
display_args=tuple(display_args),
comma_separated_display_args=", ".join(display_args),
scoped_cmd_line_args=tuple(scoped_cmd_line_args),
unscoped_cmd_line_args=tuple(unscoped_cmd_line_args),
env_var=env_var,
config_key=dest,
typ=typ,
default=default,
help=help_msg,
deprecation_active=deprecation_active,
deprecated_message=deprecated_message,
removal_version=removal_version,
removal_hint=removal_hint,
choices=choices,
comma_separated_choices=None if choices is None else ", ".join(choices),
value_history=None,
)
return ret
|
jsirois/pants
|
src/python/pants/help/help_info_extracter.py
|
Python
|
apache-2.0
| 19,574 | 0.002963 |
#!/usr/bin/env python
# coding: utf-8
import os
import shutil
import tempfile
import unittest
from mkdocs import build, nav, config
from mkdocs.compat import zip
from mkdocs.exceptions import MarkdownNotFound
from mkdocs.tests.base import dedent
class BuildTests(unittest.TestCase):
def test_empty_document(self):
html, toc, meta = build.convert_markdown("")
self.assertEqual(html, '')
self.assertEqual(len(list(toc)), 0)
self.assertEqual(meta, {})
def test_convert_markdown(self):
"""
Ensure that basic Markdown -> HTML and TOC works.
"""
html, toc, meta = build.convert_markdown(dedent("""
page_title: custom title
# Heading 1
This is some text.
# Heading 2
And some more text.
"""))
expected_html = dedent("""
<h1 id="heading-1">Heading 1</h1>
<p>This is some text.</p>
<h1 id="heading-2">Heading 2</h1>
<p>And some more text.</p>
""")
expected_toc = dedent("""
Heading 1 - #heading-1
Heading 2 - #heading-2
""")
expected_meta = {'page_title': ['custom title']}
self.assertEqual(html.strip(), expected_html)
self.assertEqual(str(toc).strip(), expected_toc)
self.assertEqual(meta, expected_meta)
def test_convert_internal_link(self):
md_text = 'An [internal link](internal.md) to another document.'
expected = '<p>An <a href="internal/">internal link</a> to another document.</p>'
html, toc, meta = build.convert_markdown(md_text)
self.assertEqual(html.strip(), expected.strip())
def test_convert_multiple_internal_links(self):
md_text = '[First link](first.md) [second link](second.md).'
expected = '<p><a href="first/">First link</a> <a href="second/">second link</a>.</p>'
html, toc, meta = build.convert_markdown(md_text)
self.assertEqual(html.strip(), expected.strip())
def test_convert_internal_link_differing_directory(self):
md_text = 'An [internal link](../internal.md) to another document.'
expected = '<p>An <a href="../internal/">internal link</a> to another document.</p>'
html, toc, meta = build.convert_markdown(md_text)
self.assertEqual(html.strip(), expected.strip())
def test_convert_internal_link_with_anchor(self):
md_text = 'An [internal link](internal.md#section1.1) to another document.'
expected = '<p>An <a href="internal/#section1.1">internal link</a> to another document.</p>'
html, toc, meta = build.convert_markdown(md_text)
self.assertEqual(html.strip(), expected.strip())
def test_convert_internal_media(self):
"""Test relative image URL's are the same for different base_urls"""
pages = [
('index.md',),
('internal.md',),
('sub/internal.md')
]
site_navigation = nav.SiteNavigation(pages)
expected_results = (
'./img/initial-layout.png',
'../img/initial-layout.png',
'../img/initial-layout.png',
)
template = '<p><img alt="The initial MkDocs layout" src="%s" /></p>'
for (page, expected) in zip(site_navigation.walk_pages(), expected_results):
md_text = ''
html, _, _ = build.convert_markdown(md_text, site_navigation=site_navigation)
self.assertEqual(html, template % expected)
def test_convert_internal_asbolute_media(self):
"""Test absolute image URL's are correct for different base_urls"""
pages = [
('index.md',),
('internal.md',),
('sub/internal.md')
]
site_navigation = nav.SiteNavigation(pages)
expected_results = (
'./img/initial-layout.png',
'../img/initial-layout.png',
'../../img/initial-layout.png',
)
template = '<p><img alt="The initial MkDocs layout" src="%s" /></p>'
for (page, expected) in zip(site_navigation.walk_pages(), expected_results):
md_text = ''
html, _, _ = build.convert_markdown(md_text, site_navigation=site_navigation)
self.assertEqual(html, template % expected)
def test_dont_convert_code_block_urls(self):
pages = [
('index.md',),
('internal.md',),
('sub/internal.md')
]
site_navigation = nav.SiteNavigation(pages)
expected = dedent("""
<p>An HTML Anchor::</p>
<pre><code><a href="index.md">My example link</a>
</code></pre>
""")
for page in site_navigation.walk_pages():
markdown = 'An HTML Anchor::\n\n <a href="index.md">My example link</a>\n'
html, _, _ = build.convert_markdown(markdown, site_navigation=site_navigation)
self.assertEqual(dedent(html), expected)
def test_anchor_only_link(self):
pages = [
('index.md',),
('internal.md',),
('sub/internal.md')
]
site_navigation = nav.SiteNavigation(pages)
for page in site_navigation.walk_pages():
markdown = '[test](#test)'
html, _, _ = build.convert_markdown(markdown, site_navigation=site_navigation)
self.assertEqual(html, '<p><a href="#test">test</a></p>')
def test_ignore_external_link(self):
md_text = 'An [external link](http://example.com/external.md).'
expected = '<p>An <a href="http://example.com/external.md">external link</a>.</p>'
html, toc, meta = build.convert_markdown(md_text)
self.assertEqual(html.strip(), expected.strip())
def test_not_use_directory_urls(self):
md_text = 'An [internal link](internal.md) to another document.'
expected = '<p>An <a href="internal/index.html">internal link</a> to another document.</p>'
pages = [
('internal.md',)
]
site_navigation = nav.SiteNavigation(pages, use_directory_urls=False)
html, toc, meta = build.convert_markdown(md_text, site_navigation=site_navigation)
self.assertEqual(html.strip(), expected.strip())
def test_markdown_table_extension(self):
"""
Ensure that the table extension is supported.
"""
html, toc, meta = build.convert_markdown(dedent("""
First Header | Second Header
-------------- | --------------
Content Cell 1 | Content Cell 2
Content Cell 3 | Content Cell 4
"""))
expected_html = dedent("""
<table>
<thead>
<tr>
<th>First Header</th>
<th>Second Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>Content Cell 1</td>
<td>Content Cell 2</td>
</tr>
<tr>
<td>Content Cell 3</td>
<td>Content Cell 4</td>
</tr>
</tbody>
</table>
""")
self.assertEqual(html.strip(), expected_html)
def test_markdown_fenced_code_extension(self):
"""
Ensure that the fenced code extension is supported.
"""
html, toc, meta = build.convert_markdown(dedent("""
```
print 'foo'
```
"""))
expected_html = dedent("""
<pre><code>print 'foo'\n</code></pre>
""")
self.assertEqual(html.strip(), expected_html)
def test_markdown_custom_extension(self):
"""
Check that an extension applies when requested in the arguments to
`convert_markdown`.
"""
md_input = "foo__bar__baz"
# Check that the plugin is not active when not requested.
expected_without_smartstrong = "<p>foo<strong>bar</strong>baz</p>"
html_base, _, _ = build.convert_markdown(md_input)
self.assertEqual(html_base.strip(), expected_without_smartstrong)
# Check that the plugin is active when requested.
expected_with_smartstrong = "<p>foo__bar__baz</p>"
html_ext, _, _ = build.convert_markdown(md_input, extensions=['smart_strong'])
self.assertEqual(html_ext.strip(), expected_with_smartstrong)
def test_markdown_duplicate_custom_extension(self):
"""
Duplicated extension names should not cause problems.
"""
md_input = "foo"
html_ext, _, _ = build.convert_markdown(md_input, ['toc'])
self.assertEqual(html_ext.strip(), '<p>foo</p>')
def test_copying_media(self):
docs_dir = tempfile.mkdtemp()
site_dir = tempfile.mkdtemp()
try:
# Create a non-empty markdown file, image, dot file and dot directory.
f = open(os.path.join(docs_dir, 'index.md'), 'w')
f.write(dedent("""
page_title: custom title
# Heading 1
This is some text.
# Heading 2
And some more text.
"""))
f.close()
open(os.path.join(docs_dir, 'img.jpg'), 'w').close()
open(os.path.join(docs_dir, '.hidden'), 'w').close()
os.mkdir(os.path.join(docs_dir, '.git'))
open(os.path.join(docs_dir, '.git/hidden'), 'w').close()
conf = config.validate_config({
'site_name': 'Example',
'docs_dir': docs_dir,
'site_dir': site_dir
})
build.build(conf)
# Verify only the markdown (coverted to html) and the image are copied.
self.assertTrue(os.path.isfile(os.path.join(site_dir, 'index.html')))
self.assertTrue(os.path.isfile(os.path.join(site_dir, 'img.jpg')))
self.assertFalse(os.path.isfile(os.path.join(site_dir, '.hidden')))
self.assertFalse(os.path.isfile(os.path.join(site_dir, '.git/hidden')))
finally:
shutil.rmtree(docs_dir)
shutil.rmtree(site_dir)
def test_strict_mode_valid(self):
pages = [
('index.md',),
('internal.md',),
('sub/internal.md')
]
site_nav = nav.SiteNavigation(pages)
valid = "[test](internal.md)"
build.convert_markdown(valid, site_nav, strict=False)
build.convert_markdown(valid, site_nav, strict=True)
def test_strict_mode_invalid(self):
pages = [
('index.md',),
('internal.md',),
('sub/internal.md')
]
site_nav = nav.SiteNavigation(pages)
invalid = "[test](bad_link.md)"
build.convert_markdown(invalid, site_nav, strict=False)
self.assertRaises(
MarkdownNotFound,
build.convert_markdown, invalid, site_nav, strict=True)
|
xeechou/mkblogs
|
mkblogs/tests/build_tests.py
|
Python
|
bsd-2-clause
| 10,943 | 0.001828 |
def func():
value = "not-none"
# Is none
<caret>if value is None:
print("None")
else:
print("Not none")
|
siosio/intellij-community
|
python/testData/intentions/PyInvertIfConditionIntentionTest/commentsIf.py
|
Python
|
apache-2.0
| 136 | 0.014706 |
from geojson_rewind import rewind
from geomet import wkt
import decimal
import statistics
def wkt_rewind(x, digits=None):
"""
reverse WKT winding order
:param x: [str] WKT string
:param digits: [int] number of digits after decimal to use for the return string.
by default, we use the mean number of digits in your string.
:return: a string
Usage::
from pygbif import wkt_rewind
x = 'POLYGON((144.6 13.2, 144.6 13.6, 144.9 13.6, 144.9 13.2, 144.6 13.2))'
wkt_rewind(x)
wkt_rewind(x, digits = 0)
wkt_rewind(x, digits = 3)
wkt_rewind(x, digits = 7)
"""
z = wkt.loads(x)
if digits is None:
coords = z["coordinates"]
nums = __flatten(coords)
dec_n = [decimal.Decimal(str(w)).as_tuple().exponent for w in nums]
digits = abs(statistics.mean(dec_n))
else:
if not isinstance(digits, int):
raise TypeError("'digits' must be an int")
wound = rewind(z)
back_to_wkt = wkt.dumps(wound, decimals=digits)
return back_to_wkt
# from https://stackoverflow.com/a/12472564/1091766
def __flatten(S):
if S == []:
return S
if isinstance(S[0], list):
return __flatten(S[0]) + __flatten(S[1:])
return S[:1] + __flatten(S[1:])
|
sckott/pygbif
|
pygbif/utils/wkt_rewind.py
|
Python
|
mit
| 1,305 | 0.003065 |
from ..broker import Broker
class DeviceServiceServiceBroker(Broker):
controller = "device_service_services"
def show(self, **kwargs):
"""Shows the details for the specified device service service.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceServiceServiceID: The internal NetMRI identifier of this usage relationship between service objects.
:type DeviceServiceServiceID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device service service methods. The listed methods will be called on each device service service returned and included in the output. Available methods are: parent_device_service, child_device_service, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: parent_device_service, child_device_service, data_source, device.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_service_service: The device service service identified by the specified DeviceServiceServiceID.
:rtype device_service_service: DeviceServiceService
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available device service services. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device to which belongs this services.
:type DeviceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceServiceServiceID: The internal NetMRI identifier of this usage relationship between service objects.
:type DeviceServiceServiceID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device service services as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device service service methods. The listed methods will be called on each device service service returned and included in the output. Available methods are: parent_device_service, child_device_service, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: parent_device_service, child_device_service, data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceServiceServiceID
:param sort: The data field(s) to use for sorting the output. Default is DeviceServiceServiceID. Valid values are DeviceServiceServiceID, DeviceID, DataSourceID, ParentDeviceServiceID, ChildDeviceServiceID, SvsvFirstSeenTime, SvsvStartTime, SvsvEndTime, SvsvTimestamp, SvsvChangedCols, SvsvUsage, SvsvProvisionData.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceServiceService. Valid values are DeviceServiceServiceID, DeviceID, DataSourceID, ParentDeviceServiceID, ChildDeviceServiceID, SvsvFirstSeenTime, SvsvStartTime, SvsvEndTime, SvsvTimestamp, SvsvChangedCols, SvsvUsage, SvsvProvisionData. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_service_services: An array of the DeviceServiceService objects that match the specified input criteria.
:rtype device_service_services: Array of DeviceServiceService
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available device service services matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ChildDeviceServiceID: The internal NetMRI identifier of the child service (the used service).
:type ChildDeviceServiceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device to which belongs this services.
:type DeviceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceServiceServiceID: The internal NetMRI identifier of this usage relationship between service objects.
:type DeviceServiceServiceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ParentDeviceServiceID: The internal NetMRI identifier of the parent service (the user).
:type ParentDeviceServiceID: Array of Integer
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvsvChangedCols: The fields that changed between this revision of the record and the previous revision.
:type SvsvChangedCols: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvsvEndTime: The ending effective time of this record, or empty if still in effect.
:type SvsvEndTime: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvsvFirstSeenTime: The timestamp of when NetMRI saw for the first time this relationship.
:type SvsvFirstSeenTime: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvsvProvisionData: Internal data - do not modify, may change without warning.
:type SvsvProvisionData: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvsvStartTime: The starting effective time of this record.
:type SvsvStartTime: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvsvTimestamp: The date and time this record was collected or calculated.
:type SvsvTimestamp: Array of DateTime
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SvsvUsage: An indicator of the kind of relationship. One of : child, protID, srcPrtID, dstPrtID, protDstID. The regular indicator is 'child'.
:type SvsvUsage: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device service services as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device service service methods. The listed methods will be called on each device service service returned and included in the output. Available methods are: parent_device_service, child_device_service, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: parent_device_service, child_device_service, data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceServiceServiceID
:param sort: The data field(s) to use for sorting the output. Default is DeviceServiceServiceID. Valid values are DeviceServiceServiceID, DeviceID, DataSourceID, ParentDeviceServiceID, ChildDeviceServiceID, SvsvFirstSeenTime, SvsvStartTime, SvsvEndTime, SvsvTimestamp, SvsvChangedCols, SvsvUsage, SvsvProvisionData.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceServiceService. Valid values are DeviceServiceServiceID, DeviceID, DataSourceID, ParentDeviceServiceID, ChildDeviceServiceID, SvsvFirstSeenTime, SvsvStartTime, SvsvEndTime, SvsvTimestamp, SvsvChangedCols, SvsvUsage, SvsvProvisionData. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against device service services, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: ChildDeviceServiceID, DataSourceID, DeviceID, DeviceServiceServiceID, ParentDeviceServiceID, SvsvChangedCols, SvsvEndTime, SvsvFirstSeenTime, SvsvProvisionData, SvsvStartTime, SvsvTimestamp, SvsvUsage.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_service_services: An array of the DeviceServiceService objects that match the specified input criteria.
:rtype device_service_services: Array of DeviceServiceService
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available device service services matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: ChildDeviceServiceID, DataSourceID, DeviceID, DeviceServiceServiceID, ParentDeviceServiceID, SvsvChangedCols, SvsvEndTime, SvsvFirstSeenTime, SvsvProvisionData, SvsvStartTime, SvsvTimestamp, SvsvUsage.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ChildDeviceServiceID: The operator to apply to the field ChildDeviceServiceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ChildDeviceServiceID: The internal NetMRI identifier of the child service (the used service). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ChildDeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ChildDeviceServiceID: If op_ChildDeviceServiceID is specified, the field named in this input will be compared to the value in ChildDeviceServiceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ChildDeviceServiceID must be specified if op_ChildDeviceServiceID is specified.
:type val_f_ChildDeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ChildDeviceServiceID: If op_ChildDeviceServiceID is specified, this value will be compared to the value in ChildDeviceServiceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ChildDeviceServiceID must be specified if op_ChildDeviceServiceID is specified.
:type val_c_ChildDeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device to which belongs this services. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceServiceServiceID: The operator to apply to the field DeviceServiceServiceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceServiceServiceID: The internal NetMRI identifier of this usage relationship between service objects. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceServiceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceServiceServiceID: If op_DeviceServiceServiceID is specified, the field named in this input will be compared to the value in DeviceServiceServiceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceServiceServiceID must be specified if op_DeviceServiceServiceID is specified.
:type val_f_DeviceServiceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceServiceServiceID: If op_DeviceServiceServiceID is specified, this value will be compared to the value in DeviceServiceServiceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceServiceServiceID must be specified if op_DeviceServiceServiceID is specified.
:type val_c_DeviceServiceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ParentDeviceServiceID: The operator to apply to the field ParentDeviceServiceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ParentDeviceServiceID: The internal NetMRI identifier of the parent service (the user). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ParentDeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ParentDeviceServiceID: If op_ParentDeviceServiceID is specified, the field named in this input will be compared to the value in ParentDeviceServiceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ParentDeviceServiceID must be specified if op_ParentDeviceServiceID is specified.
:type val_f_ParentDeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ParentDeviceServiceID: If op_ParentDeviceServiceID is specified, this value will be compared to the value in ParentDeviceServiceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ParentDeviceServiceID must be specified if op_ParentDeviceServiceID is specified.
:type val_c_ParentDeviceServiceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvsvChangedCols: The operator to apply to the field SvsvChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvsvChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvsvChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvsvChangedCols: If op_SvsvChangedCols is specified, the field named in this input will be compared to the value in SvsvChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvsvChangedCols must be specified if op_SvsvChangedCols is specified.
:type val_f_SvsvChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvsvChangedCols: If op_SvsvChangedCols is specified, this value will be compared to the value in SvsvChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvsvChangedCols must be specified if op_SvsvChangedCols is specified.
:type val_c_SvsvChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvsvEndTime: The operator to apply to the field SvsvEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvsvEndTime: The ending effective time of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvsvEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvsvEndTime: If op_SvsvEndTime is specified, the field named in this input will be compared to the value in SvsvEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvsvEndTime must be specified if op_SvsvEndTime is specified.
:type val_f_SvsvEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvsvEndTime: If op_SvsvEndTime is specified, this value will be compared to the value in SvsvEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvsvEndTime must be specified if op_SvsvEndTime is specified.
:type val_c_SvsvEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvsvFirstSeenTime: The operator to apply to the field SvsvFirstSeenTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvsvFirstSeenTime: The timestamp of when NetMRI saw for the first time this relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvsvFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvsvFirstSeenTime: If op_SvsvFirstSeenTime is specified, the field named in this input will be compared to the value in SvsvFirstSeenTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvsvFirstSeenTime must be specified if op_SvsvFirstSeenTime is specified.
:type val_f_SvsvFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvsvFirstSeenTime: If op_SvsvFirstSeenTime is specified, this value will be compared to the value in SvsvFirstSeenTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvsvFirstSeenTime must be specified if op_SvsvFirstSeenTime is specified.
:type val_c_SvsvFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvsvProvisionData: The operator to apply to the field SvsvProvisionData. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvsvProvisionData: Internal data - do not modify, may change without warning. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvsvProvisionData: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvsvProvisionData: If op_SvsvProvisionData is specified, the field named in this input will be compared to the value in SvsvProvisionData using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvsvProvisionData must be specified if op_SvsvProvisionData is specified.
:type val_f_SvsvProvisionData: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvsvProvisionData: If op_SvsvProvisionData is specified, this value will be compared to the value in SvsvProvisionData using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvsvProvisionData must be specified if op_SvsvProvisionData is specified.
:type val_c_SvsvProvisionData: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvsvStartTime: The operator to apply to the field SvsvStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvsvStartTime: The starting effective time of this record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvsvStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvsvStartTime: If op_SvsvStartTime is specified, the field named in this input will be compared to the value in SvsvStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvsvStartTime must be specified if op_SvsvStartTime is specified.
:type val_f_SvsvStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvsvStartTime: If op_SvsvStartTime is specified, this value will be compared to the value in SvsvStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvsvStartTime must be specified if op_SvsvStartTime is specified.
:type val_c_SvsvStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvsvTimestamp: The operator to apply to the field SvsvTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvsvTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvsvTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvsvTimestamp: If op_SvsvTimestamp is specified, the field named in this input will be compared to the value in SvsvTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvsvTimestamp must be specified if op_SvsvTimestamp is specified.
:type val_f_SvsvTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvsvTimestamp: If op_SvsvTimestamp is specified, this value will be compared to the value in SvsvTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvsvTimestamp must be specified if op_SvsvTimestamp is specified.
:type val_c_SvsvTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SvsvUsage: The operator to apply to the field SvsvUsage. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SvsvUsage: An indicator of the kind of relationship. One of : child, protID, srcPrtID, dstPrtID, protDstID. The regular indicator is 'child'. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SvsvUsage: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SvsvUsage: If op_SvsvUsage is specified, the field named in this input will be compared to the value in SvsvUsage using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SvsvUsage must be specified if op_SvsvUsage is specified.
:type val_f_SvsvUsage: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SvsvUsage: If op_SvsvUsage is specified, this value will be compared to the value in SvsvUsage using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SvsvUsage must be specified if op_SvsvUsage is specified.
:type val_c_SvsvUsage: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the device service services as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device service service methods. The listed methods will be called on each device service service returned and included in the output. Available methods are: parent_device_service, child_device_service, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: parent_device_service, child_device_service, data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceServiceServiceID
:param sort: The data field(s) to use for sorting the output. Default is DeviceServiceServiceID. Valid values are DeviceServiceServiceID, DeviceID, DataSourceID, ParentDeviceServiceID, ChildDeviceServiceID, SvsvFirstSeenTime, SvsvStartTime, SvsvEndTime, SvsvTimestamp, SvsvChangedCols, SvsvUsage, SvsvProvisionData.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceServiceService. Valid values are DeviceServiceServiceID, DeviceID, DataSourceID, ParentDeviceServiceID, ChildDeviceServiceID, SvsvFirstSeenTime, SvsvStartTime, SvsvEndTime, SvsvTimestamp, SvsvChangedCols, SvsvUsage, SvsvProvisionData. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_service_services: An array of the DeviceServiceService objects that match the specified input criteria.
:rtype device_service_services: Array of DeviceServiceService
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def parent_device_service(self, **kwargs):
"""The parent service object of this relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceServiceServiceID: The internal NetMRI identifier of this usage relationship between service objects.
:type DeviceServiceServiceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The parent service object of this relationship.
:rtype : DeviceService
"""
return self.api_request(self._get_method_fullname("parent_device_service"), kwargs)
def child_device_service(self, **kwargs):
"""The child service object of this relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceServiceServiceID: The internal NetMRI identifier of this usage relationship between service objects.
:type DeviceServiceServiceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The child service object of this relationship.
:rtype : DeviceService
"""
return self.api_request(self._get_method_fullname("child_device_service"), kwargs)
def data_source(self, **kwargs):
"""The collector NetMRI that collected this data record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceServiceServiceID: The internal NetMRI identifier of this usage relationship between service objects.
:type DeviceServiceServiceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The collector NetMRI that collected this data record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def device(self, **kwargs):
"""The device from which this data was collected.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceServiceServiceID: The internal NetMRI identifier of this usage relationship between service objects.
:type DeviceServiceServiceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device from which this data was collected.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
|
infobloxopen/infoblox-netmri
|
infoblox_netmri/api/broker/v3_8_0/device_service_service_broker.py
|
Python
|
apache-2.0
| 49,305 | 0.002109 |
"""
Library for autotest-remote usage.
"""
import sys, os, re, traceback, signal, time, logging, getpass
try:
import autotest.common as common
except ImportError:
import common
from autotest.client.shared.global_config import global_config
require_atfork = global_config.get_config_value(
'AUTOSERV', 'require_atfork_module', type=bool, default=True)
try:
import atfork
atfork.monkeypatch_os_fork_functions()
import atfork.stdlib_fixer
# Fix the Python standard library for threading+fork safety with its
# internal locks. http://code.google.com/p/python-atfork/
import warnings
warnings.filterwarnings('ignore', 'logging module already imported')
atfork.stdlib_fixer.fix_logging_module()
except ImportError, e:
from autotest.client.shared import global_config
if global_config.global_config.get_config_value(
'AUTOSERV', 'require_atfork_module', type=bool, default=False):
print >>sys.stderr, 'Please run utils/build_externals.py'
print e
sys.exit(1)
from autotest.server import server_logging_config
from autotest.server import server_job, autoserv_parser
from autotest.server import autotest_remote
from autotest.client.shared import pidfile, logging_manager
def run_autoserv(pid_file_manager, results, parser):
# send stdin to /dev/null
dev_null = os.open(os.devnull, os.O_RDONLY)
os.dup2(dev_null, sys.stdin.fileno())
os.close(dev_null)
# Create separate process group
os.setpgrp()
# Implement SIGTERM handler
def handle_sigterm(signum, frame):
if pid_file_manager:
pid_file_manager.close_file(1, signal.SIGTERM)
os.killpg(os.getpgrp(), signal.SIGKILL)
# Set signal handler
signal.signal(signal.SIGTERM, handle_sigterm)
# Ignore SIGTTOU's generated by output from forked children.
signal.signal(signal.SIGTTOU, signal.SIG_IGN)
# Server side tests that call shell scripts often depend on $USER being set
# but depending on how you launch your autotest scheduler it may not be set.
os.environ['USER'] = getpass.getuser()
if parser.options.machines:
machines = parser.options.machines.replace(',', ' ').strip().split()
else:
machines = []
machines_file = parser.options.machines_file
label = parser.options.label
group_name = parser.options.group_name
user = parser.options.user
client = parser.options.client
server = parser.options.server
install_before = parser.options.install_before
install_after = parser.options.install_after
verify = parser.options.verify
repair = parser.options.repair
cleanup = parser.options.cleanup
no_tee = parser.options.no_tee
parse_job = parser.options.parse_job
execution_tag = parser.options.execution_tag
if not execution_tag:
execution_tag = parse_job
host_protection = parser.options.host_protection
ssh_user = parser.options.ssh_user
ssh_port = parser.options.ssh_port
ssh_pass = parser.options.ssh_pass
collect_crashinfo = parser.options.collect_crashinfo
control_filename = parser.options.control_filename
# can't be both a client and a server side test
if client and server:
parser.parser.error("Can not specify a test as both server and client!")
if len(parser.args) < 1 and not (verify or repair or cleanup
or collect_crashinfo):
parser.parser.error("Missing argument: control file")
# We have a control file unless it's just a verify/repair/cleanup job
if len(parser.args) > 0:
control = parser.args[0]
else:
control = None
if machines_file:
machines = []
for m in open(machines_file, 'r').readlines():
# remove comments, spaces
m = re.sub('#.*', '', m).strip()
if m:
machines.append(m)
print "Read list of machines from file: %s" % machines_file
print ','.join(machines)
if machines:
for machine in machines:
if not machine or re.search('\s', machine):
parser.parser.error("Invalid machine: %s" % str(machine))
machines = list(set(machines))
machines.sort()
if group_name and len(machines) < 2:
parser.parser.error("-G %r may only be supplied with more than one machine."
% group_name)
kwargs = {'group_name': group_name, 'tag': execution_tag}
if control_filename:
kwargs['control_filename'] = control_filename
job = server_job.server_job(control, parser.args[1:], results, label,
user, machines, client, parse_job,
ssh_user, ssh_port, ssh_pass, **kwargs)
job.logging.start_logging()
job.init_parser()
# perform checks
job.precheck()
# run the job
exit_code = 0
try:
try:
if repair:
job.repair(host_protection)
elif verify:
job.verify()
else:
job.run(cleanup, install_before, install_after,
only_collect_crashinfo=collect_crashinfo)
finally:
while job.hosts:
host = job.hosts.pop()
host.close()
except:
exit_code = 1
traceback.print_exc()
if pid_file_manager:
pid_file_manager.num_tests_failed = job.num_tests_failed
pid_file_manager.close_file(exit_code)
job.cleanup_parser()
sys.exit(exit_code)
def main():
# grab the parser
parser = autoserv_parser.autoserv_parser
parser.parse_args()
if len(sys.argv) == 1:
parser.parser.print_help()
sys.exit(1)
if parser.options.no_logging:
results = None
else:
output_dir = global_config.get_config_value('COMMON',
'test_output_dir',
default="")
results = parser.options.results
if not results:
results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
if output_dir:
results = os.path.join(output_dir, results)
results = os.path.abspath(results)
resultdir_exists = False
for filename in ('control.srv', 'status.log', '.autoserv_execute'):
if os.path.exists(os.path.join(results, filename)):
resultdir_exists = True
if not parser.options.use_existing_results and resultdir_exists:
error = "Error: results directory already exists: %s\n" % results
sys.stderr.write(error)
sys.exit(1)
# Now that we certified that there's no leftover results dir from
# previous jobs, lets create the result dir since the logging system
# needs to create the log file in there.
if not os.path.isdir(results):
os.makedirs(results)
logging_manager.configure_logging(
server_logging_config.ServerLoggingConfig(), results_dir=results,
use_console=not parser.options.no_tee,
verbose=parser.options.verbose,
no_console_prefix=parser.options.no_console_prefix)
if results:
logging.info("Results placed in %s" % results)
# wait until now to perform this check, so it get properly logged
if parser.options.use_existing_results and not resultdir_exists:
logging.error("No existing results directory found: %s", results)
sys.exit(1)
if parser.options.write_pidfile:
pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
results)
pid_file_manager.open_file()
else:
pid_file_manager = None
autotest_remote.BaseAutotest.set_install_in_tmpdir(
parser.options.install_in_tmpdir)
exit_code = 0
try:
try:
run_autoserv(pid_file_manager, results, parser)
except SystemExit, e:
exit_code = e.code
except:
traceback.print_exc()
# If we don't know what happened, we'll classify it as
# an 'abort' and return 1.
exit_code = 1
finally:
if pid_file_manager:
pid_file_manager.close_file(exit_code)
sys.exit(exit_code)
|
ColinIanKing/autotest
|
server/autoserv.py
|
Python
|
gpl-2.0
| 8,389 | 0.001788 |
import copy
import logging
import os
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
import numpy as np
from pimp.importance.importance import Importance
from smac.runhistory.runhistory import RunHistory, DataOrigin
from smac.utils.io.input_reader import InputReader
from smac.utils.validate import Validator
from smac import __version__ as smac_version
from cave.reader.smac2_reader import SMAC2Reader
from cave.reader.smac3_reader import SMAC3Reader
from cave.utils.helpers import scenario_sanity_check
from cave.utils.timing import timing
class ConfiguratorRun(object):
"""
ConfiguratorRuns load and maintain information about individual configurator
runs. There are different supported formats, like: BOHB, SMAC3, SMAC2 and CSV
This class is responsible for providing a scenario, a runhistory and a
trajectory and handling original/validated data appropriately.
To create a ConfiguratorRun from a folder, use Configurator.from_folder()
"""
def __init__(self,
scenario,
original_runhistory,
validated_runhistory,
trajectory,
options,
path_to_folder=None,
ta_exec_dir=None,
file_format=None,
validation_format=None,
reduced_to_budgets=None,
output_dir=None,
):
"""
Parameters
----------
scenario: Scenario
scenario
original_runhistory, validated_runhistory: RunHistory
runhistores containing only the original evaluated data (during optimization process) or the validated data
where points of interest are reevaluated after the optimization process
trajectory: List[dict]
a trajectory of the best performing configurations at each point in time
options: dict
options can define a number of custom settings
path_to_folder: str
path to the physical folder containing the data
ta_exec_dir: str
path to the target-algorithm-execution-directory. This is only important for SMAC-optimized data
file_format, validation_format: str
will be autodetected some point soon, until then, specify the file-format (SMAC2, SMAC3, BOHB, etc...)
reduced_to_budgets: List str int or float
budgets, with which this cr is associated
output_dir: str
where to save analysis-data for this cr
"""
self.logger = logging.getLogger("cave.ConfiguratorRun.{}".format(path_to_folder))
self.rng = np.random.RandomState(42)
self.options = options
self.path_to_folder = path_to_folder
self.reduced_to_budgets = [None] if reduced_to_budgets is None else reduced_to_budgets
self.scenario = scenario
self.original_runhistory = original_runhistory
self.validated_runhistory = validated_runhistory
self.trajectory = trajectory
self.ta_exec_dir = ta_exec_dir
self.file_format = file_format
self.validation_format = validation_format
if not output_dir:
self.logger.debug("New outputdir")
output_dir = tempfile.mkdtemp()
self.output_dir = os.path.join(output_dir, 'analysis_data', self.get_identifier())
os.makedirs(self.output_dir, exist_ok=True)
self.default = self.scenario.cs.get_default_configuration()
self.incumbent = self.trajectory[-1]['incumbent'] if self.trajectory else None
self.feature_names = self._get_feature_names()
# Create combined runhistory to collect all "real" runs
self.combined_runhistory = RunHistory()
self.combined_runhistory.update(self.original_runhistory, origin=DataOrigin.INTERNAL)
if self.validated_runhistory is not None:
self.combined_runhistory.update(self.validated_runhistory, origin=DataOrigin.EXTERNAL_SAME_INSTANCES)
# Create runhistory with estimated runs (create Importance-object of pimp and use epm-model for validation)
self.epm_runhistory = RunHistory()
self.epm_runhistory.update(self.combined_runhistory)
# Initialize importance and validator
self._init_pimp_and_validator()
try:
self._validate_default_and_incumbents("epm", self.ta_exec_dir)
except KeyError as err:
self.logger.debug(err, exc_info=True)
msg = "Validation of default and incumbent failed. SMAC (v: {}) does not support validation of budgets+ins"\
"tances yet, if you use budgets but no instances ignore this warning.".format(str(smac_version))
if self.feature_names:
self.logger.warning(msg)
else:
self.logger.debug(msg)
# Set during execution, to share information between Analyzers
self.share_information = {'parameter_importance': OrderedDict(),
'feature_importance': OrderedDict(),
'evaluators': OrderedDict(),
'validator': None,
'hpbandster_result': None, # Only for file-format BOHB
}
def get_identifier(self):
return self.identify(self.path_to_folder, self.reduced_to_budgets)
@classmethod
def identify(cls, path, budget):
path = path if path is not None else "all_folders"
budget = str(budget) if budget is not None else "all_budgets"
res = "_".join([path, budget]).replace('/', '_')
if len(res) > len(str(hash(res))):
res = str(hash(res))
return res
def get_budgets(self):
return set([k.budget for k in self.original_runhistory.data.keys()])
@classmethod
def from_folder(cls,
folder: str,
ta_exec_dir: str,
options,
file_format: str='SMAC3',
validation_format: str='NONE',
output_dir=None,
):
"""Initialize scenario, runhistory and incumbent from folder
Parameters
----------
folder: string
output-dir of this configurator-run -> this is also the 'id' for a single run in parallel optimization
ta_exec_dir: string
if the execution directory for the SMAC-run differs from the cwd,
there might be problems loading instance-, feature- or PCS-files
in the scenario-object. since instance- and PCS-files are necessary,
specify the path to the execution-dir of SMAC here
file_format: string
from [SMAC2, SMAC3, BOHB, APT, CSV]
validation_format: string
from [SMAC2, SMAC3, APT, CSV, NONE], in which format to look for validated data
"""
logger = logging.getLogger("cave.ConfiguratorRun.{}".format(folder))
logger.debug("Loading from \'%s\' with ta_exec_dir \'%s\' with file-format '%s' and validation-format %s. ",
folder, ta_exec_dir, file_format, validation_format)
if file_format == 'BOHB' or file_format == "APT":
logger.debug("File format is BOHB or APT, assmuming data was converted to SMAC3-format using "
"HpBandSter2SMAC from cave.reader.converter.hpbandster2smac.")
validation_format = validation_format if validation_format != 'NONE' else None
# Read in data (scenario, runhistory & trajectory)
reader = cls.get_reader(file_format, folder, ta_exec_dir)
scenario = reader.get_scenario()
scenario_sanity_check(scenario, logger)
original_runhistory = reader.get_runhistory(scenario.cs)
validated_runhistory = None
if validation_format == "NONE" or validation_format is None:
validation_format = None
else:
logger.debug('Using format %s for validation', validation_format)
vali_reader = cls.get_reader(validation_format, folder, ta_exec_dir)
vali_reader.scen = scenario
validated_runhistory = vali_reader.get_validated_runhistory(scenario.cs)
#self._check_rh_for_inc_and_def(self.validated_runhistory, 'validated runhistory')
logger.info("Found validated runhistory for \"%s\" and using "
"it for evaluation. #configs in validated rh: %d",
folder, len(validated_runhistory.config_ids))
trajectory = reader.get_trajectory(scenario.cs)
return cls(scenario,
original_runhistory,
validated_runhistory,
trajectory,
options,
path_to_folder=folder,
ta_exec_dir=ta_exec_dir,
file_format=file_format,
validation_format=validation_format,
output_dir=output_dir,
)
def get_incumbent(self):
return self.incumbent
def _init_pimp_and_validator(self,
alternative_output_dir=None,
):
"""
Create ParameterImportance-object and use it's trained model for validation and further predictions.
We pass a combined (original + validated) runhistory, so that the returned model will be based on as much
information as possible
Parameters
----------
alternative_output_dir: str
e.g. for budgets we want pimp to use an alternative output-dir (subfolders per budget)
"""
self.logger.debug("Using '%s' as output for pimp", alternative_output_dir if alternative_output_dir else
self.output_dir)
self.pimp = Importance(scenario=copy.deepcopy(self.scenario),
runhistory=self.combined_runhistory,
incumbent=self.incumbent if self.incumbent else self.default,
save_folder=alternative_output_dir if alternative_output_dir is not None else self.output_dir,
seed=self.rng.randint(1, 100000),
max_sample_size=self.options['fANOVA'].getint("pimp_max_samples"),
fANOVA_pairwise=self.options['fANOVA'].getboolean("fanova_pairwise"),
preprocess=False,
verbose=False, # disable progressbars in pimp...
)
# Validator (initialize without trajectory)
self.validator = Validator(self.scenario, None, None)
self.validator.epm = self.pimp.model
@timing
def _validate_default_and_incumbents(self,
method,
ta_exec_dir,
):
"""Validate default and incumbent configurations on all instances possible.
Either use validation (physically execute the target algorithm) or EPM-estimate and update according runhistory
(validation -> self.global_validated_rh; epm -> self.global_epm_rh).
Parameters
----------
method: str
epm or validation
ta_exec_dir: str
path from where the target algorithm can be executed as found in scenario (only used for actual validation)
"""
# TODO maybe just validate whole trajectory?
self.logger.debug("Validating %s using %s!", self.get_identifier(), method)
self.validator.traj = self.trajectory
if method == "validation":
with _changedir(ta_exec_dir):
# TODO determine # repetitions
new_rh = self.validator.validate('def+inc', 'train+test', 1, -1, runhistory=self.combined_runhistory)
self.validated_runhistory.update(new_rh)
self.combined_runhistory_rh.update(new_rh)
elif method == "epm":
# Only do test-instances if features for test-instances are available
instance_mode = 'train+test'
if (any([i not in self.scenario.feature_dict for i in self.scenario.test_insts]) and
any([i in self.scenario.feature_dict for i in self.scenario.train_insts])): # noqa
self.logger.debug("No features provided for test-instances (but for train!). Cannot validate on \"epm\".")
self.logger.warning("Features detected for train-instances, but not for test-instances. This is "
"unintended usage and may lead to errors for some analysis-methods.")
instance_mode = 'train'
new_rh = self.validator.validate_epm('def+inc', instance_mode, 1, runhistory=self.combined_runhistory)
self.epm_runhistory.update(new_rh)
else:
raise ValueError("Missing data method illegal (%s)", method)
self.validator.traj = None # Avoid usage-mistakes
def _get_feature_names(self):
if not self.scenario.feature_dict:
self.logger.info("No features available. Skipping feature analysis.")
return
feat_fn = self.scenario.feature_fn
if not self.scenario.feature_names:
self.logger.debug("`scenario.feature_names` is not set. Loading from '%s'", feat_fn)
with _changedir(self.ta_exec_dir if self.ta_exec_dir else '.'):
if not feat_fn or not os.path.exists(feat_fn):
self.logger.warning("Feature names are missing. Either provide valid feature_file in scenario "
"(currently %s) or set `scenario.feature_names` manually." % feat_fn)
self.logger.error("Skipping Feature Analysis.")
return
else:
# Feature names are contained in feature-file and retrieved
feat_names = InputReader().read_instance_features_file(feat_fn)[0]
else:
feat_names = copy.deepcopy(self.scenario.feature_names)
return feat_names
def _check_rh_for_inc_and_def(self, rh, name=''):
"""
Check if default and incumbent are evaluated on all instances in this rh
Parameters
----------
rh: RunHistory
runhistory to be checked
name: str
name for logging-purposes
Returns
-------
return_value: bool
False if either inc or def was not evaluated on all
train/test-instances
"""
return_value = True
for c_name, c in [("default", self.default), ("inc", self.incumbent)]:
runs = rh.get_runs_for_config(c, only_max_observed_budget=False)
evaluated = set([inst for inst, seed in runs])
for i_name, i in [("train", self.train_inst),
("test", self.test_inst)]:
not_evaluated = set(i) - evaluated
if len(not_evaluated) > 0:
self.logger.debug("RunHistory %s only evaluated on %d/%d %s-insts for %s in folder %s",
name, len(i) - len(not_evaluated), len(i), i_name, c_name, self.folder)
return_value = False
return return_value
@classmethod
def get_reader(cls, name, folder, ta_exec_dir):
""" Returns an appropriate reader for the specified format. """
# TODO make autodetect format (here? where?)
if name == 'SMAC3':
return SMAC3Reader(folder, ta_exec_dir)
elif name == 'BOHB':
return SMAC3Reader(folder, ta_exec_dir)
elif name == 'APT':
return SMAC3Reader(folder, ta_exec_dir)
elif name == 'SMAC2':
return SMAC2Reader(folder, ta_exec_dir)
elif name == 'CSV':
return SMAC3Reader(folder, ta_exec_dir)
else:
raise ValueError("%s not supported as file-format" % name)
@contextmanager
def _changedir(newdir):
""" Helper function to change directory, for example to create a scenario from file, where paths to the instance-
and feature-files are relative to the original SMAC-execution-directory. Same with target algorithms that need
be executed for validation. """
olddir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(olddir)
|
automl/SpySMAC
|
cave/reader/configurator_run.py
|
Python
|
bsd-3-clause
| 16,612 | 0.003792 |
from django.contrib import admin
from workflow.models import State, StateLog, NextState, Project, Location
from workflow.activities import StateActivity
class NextStateInline(admin.StackedInline):
model = NextState
fk_name = 'current_state'
extra = 0
class StateAdmin(admin.ModelAdmin):
inlines = [NextStateInline, ]
list_display = ('name', 'is_work_state',)
class StateLogAdmin(admin.ModelAdmin):
readonly_fields = ['start', 'end', 'state', 'user']
list_display = ('user', 'state', 'project', 'location', 'start', 'end',)
admin.site.register(State, StateAdmin)
admin.site.register(StateLog, StateLogAdmin)
admin.site.register(Project)
admin.site.register(Location)
|
django-stars/dash2011
|
presence/apps/workflow/admin.py
|
Python
|
bsd-3-clause
| 703 | 0 |
import unittest
import instruction_set
class TestInstructionSet(unittest.TestCase):
def test_generate(self):
self.assertIsInstance(instruction_set.generate(), list)
self.assertEqual(len(instruction_set.generate()), 64)
self.assertEqual(len(instruction_set.generate(32)), 32)
inset = instruction_set.generate()
for instruction in inset:
self.assertGreaterEqual(instruction, 0)
self.assertLess(instruction, 256)
def test_crossover(self):
parent1 = instruction_set.generate()
parent2 = instruction_set.generate()
children = instruction_set.crossover(parent1, parent2)
random_children = instruction_set.crossover(parent1, parent2, take_random=True)
self.assertIsInstance(children, tuple)
self.assertIsInstance(children[0], list)
self.assertIsInstance(children[1], list)
self.assertEqual(len(children[0]), len(parent1))
self.assertEqual(len(children[1]), len(parent1))
for i, _ in enumerate(parent1):
self.assertTrue(
(children[0][i] in parent1 and children[1][i] in parent2) or
(children[0][i] in parent2 and children[1][i] in parent1)
)
self.assertTrue(
(random_children[0][i] in parent1 and random_children[1][i] in parent2) or
(random_children[0][i] in parent2 and random_children[1][i] in parent1)
)
def test_mutate_bits(self):
inset = instruction_set.generate()
self.assertEqual(len(inset), len(instruction_set.mutate_bits(inset)))
self.assertEqual(inset, instruction_set.mutate_bits(inset, mutation_chance=0))
self.assertNotEqual(inset, instruction_set.mutate_bits(inset, mutation_chance=100))
for instruction in instruction_set.mutate_bits(inset):
self.assertGreaterEqual(instruction, 0)
self.assertLess(instruction, 256)
def test_mutate_bytes(self):
inset = instruction_set.generate()
self.assertEqual(len(inset), len(instruction_set.mutate_bytes(inset)))
self.assertEqual(inset, instruction_set.mutate_bytes(inset, mutation_chance=0))
self.assertNotEqual(inset, instruction_set.mutate_bytes(inset, mutation_chance=100))
for instruction in instruction_set.mutate_bytes(inset):
self.assertGreaterEqual(instruction, 0)
self.assertLess(instruction, 256)
def test_mutate_combined(self):
inset = instruction_set.generate()
self.assertEqual(len(inset), len(instruction_set.mutate_combined(inset)))
for instruction in instruction_set.mutate_combined(inset):
self.assertGreaterEqual(instruction, 0)
self.assertLess(instruction, 256)
if __name__ == '__main__':
unittest.main()
|
chuckeles/genetic-treasures
|
test_instruction_set.py
|
Python
|
mit
| 2,849 | 0.002808 |
import json
import time
from cStringIO import StringIO
import pytest
from Crypt import CryptBitcoin
from Content.ContentManager import VerifyError, SignError
from util.SafeRe import UnsafePatternError
@pytest.mark.usefixtures("resetSettings")
class TestContent:
privatekey = "5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv"
def testInclude(self, site):
# Rules defined in parent content.json
rules = site.content_manager.getRules("data/test_include/content.json")
assert rules["signers"] == ["15ik6LeBWnACWfaika1xqGapRZ1zh3JpCo"] # Valid signer
assert rules["user_name"] == "test" # Extra data
assert rules["max_size"] == 20000 # Max size of files
assert not rules["includes_allowed"] # Don't allow more includes
assert rules["files_allowed"] == "data.json" # Allowed file pattern
# Valid signers for "data/test_include/content.json"
valid_signers = site.content_manager.getValidSigners("data/test_include/content.json")
assert "15ik6LeBWnACWfaika1xqGapRZ1zh3JpCo" in valid_signers # Extra valid signer defined in parent content.json
assert "1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT" in valid_signers # The site itself
assert len(valid_signers) == 2 # No more
# Valid signers for "data/users/content.json"
valid_signers = site.content_manager.getValidSigners("data/users/content.json")
assert "1LSxsKfC9S9TVXGGNSM3vPHjyW82jgCX5f" in valid_signers # Extra valid signer defined in parent content.json
assert "1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT" in valid_signers # The site itself
assert len(valid_signers) == 2
# Valid signers for root content.json
assert site.content_manager.getValidSigners("content.json") == ["1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT"]
def testInlcudeLimits(self, site):
# Data validation
data_dict = {
"files": {
"data.json": {
"sha512": "369d4e780cc80504285f13774ca327fe725eed2d813aad229e62356b07365906",
"size": 505
}
},
"modified": time.time()
}
# Normal data
data_dict["signs"] = {"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict), self.privatekey)}
data = StringIO(json.dumps(data_dict))
assert site.content_manager.verifyFile("data/test_include/content.json", data, ignore_same=False)
# Reset
del data_dict["signs"]
# Too large
data_dict["files"]["data.json"]["size"] = 200000 # Emulate 2MB sized data.json
data_dict["signs"] = {"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict), self.privatekey)}
data = StringIO(json.dumps(data_dict))
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile("data/test_include/content.json", data, ignore_same=False)
assert "Include too large" in str(err)
# Reset
data_dict["files"]["data.json"]["size"] = 505
del data_dict["signs"]
# Not allowed file
data_dict["files"]["notallowed.exe"] = data_dict["files"]["data.json"]
data_dict["signs"] = {"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict), self.privatekey)}
data = StringIO(json.dumps(data_dict))
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile("data/test_include/content.json", data, ignore_same=False)
assert "File not allowed" in str(err)
# Reset
del data_dict["files"]["notallowed.exe"]
del data_dict["signs"]
# Should work again
data_dict["signs"] = {"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict), self.privatekey)}
data = StringIO(json.dumps(data_dict))
assert site.content_manager.verifyFile("data/test_include/content.json", data, ignore_same=False)
@pytest.mark.parametrize("inner_path", ["content.json", "data/test_include/content.json", "data/users/content.json"])
def testSign(self, site, inner_path):
# Bad privatekey
with pytest.raises(SignError) as err:
site.content_manager.sign(inner_path, privatekey="5aaa3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMnaa", filewrite=False)
assert "Private key invalid" in str(err)
# Good privatekey
content = site.content_manager.sign(inner_path, privatekey=self.privatekey, filewrite=False)
content_old = site.content_manager.contents[inner_path] # Content before the sign
assert not content_old == content # Timestamp changed
assert site.address in content["signs"] # Used the site's private key to sign
if inner_path == "content.json":
assert len(content["files"]) == 17
elif inner_path == "data/test-include/content.json":
assert len(content["files"]) == 1
elif inner_path == "data/users/content.json":
assert len(content["files"]) == 0
# Everything should be same as before except the modified timestamp and the signs
assert (
{key: val for key, val in content_old.items() if key not in ["modified", "signs", "sign", "zeronet_version"]}
==
{key: val for key, val in content.items() if key not in ["modified", "signs", "sign", "zeronet_version"]}
)
def testSignOptionalFiles(self, site):
for hash in list(site.content_manager.hashfield):
site.content_manager.hashfield.remove(hash)
assert len(site.content_manager.hashfield) == 0
site.content_manager.contents["content.json"]["optional"] = "((data/img/zero.*))"
content_optional = site.content_manager.sign(privatekey=self.privatekey, filewrite=False, remove_missing_optional=True)
del site.content_manager.contents["content.json"]["optional"]
content_nooptional = site.content_manager.sign(privatekey=self.privatekey, filewrite=False, remove_missing_optional=True)
assert len(content_nooptional.get("files_optional", {})) == 0 # No optional files if no pattern
assert len(content_optional["files_optional"]) > 0
assert len(site.content_manager.hashfield) == len(content_optional["files_optional"]) # Hashed optional files should be added to hashfield
assert len(content_nooptional["files"]) > len(content_optional["files"])
def testFileInfo(self, site):
assert "sha512" in site.content_manager.getFileInfo("index.html")
assert site.content_manager.getFileInfo("data/img/domain.png")["content_inner_path"] == "content.json"
assert site.content_manager.getFileInfo("data/users/hello.png")["content_inner_path"] == "data/users/content.json"
assert site.content_manager.getFileInfo("data/users/content.json")["content_inner_path"] == "data/users/content.json"
assert not site.content_manager.getFileInfo("notexist")
# Optional file
file_info_optional = site.content_manager.getFileInfo("data/optional.txt")
assert "sha512" in file_info_optional
assert file_info_optional["optional"] is True
# Not exists yet user content.json
assert "cert_signers" in site.content_manager.getFileInfo("data/users/unknown/content.json")
# Optional user file
file_info_optional = site.content_manager.getFileInfo("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif")
assert "sha512" in file_info_optional
assert file_info_optional["optional"] is True
def testVerify(self, site):
inner_path = "data/test_include/content.json"
data_dict = site.storage.loadJson(inner_path)
data = StringIO(json.dumps(data_dict))
# Re-sign
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), self.privatekey)
}
assert site.content_manager.verifyFile(inner_path, data, ignore_same=False)
# Wrong address
data_dict["address"] = "Othersite"
del data_dict["signs"]
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), self.privatekey)
}
data = StringIO(json.dumps(data_dict))
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(inner_path, data, ignore_same=False)
assert "Wrong site address" in str(err)
# Wrong inner_path
data_dict["address"] = "1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT"
data_dict["inner_path"] = "content.json"
del data_dict["signs"]
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), self.privatekey)
}
data = StringIO(json.dumps(data_dict))
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(inner_path, data, ignore_same=False)
assert "Wrong inner_path" in str(err)
# Everything right again
data_dict["address"] = "1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT"
data_dict["inner_path"] = inner_path
del data_dict["signs"]
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), self.privatekey)
}
data = StringIO(json.dumps(data_dict))
assert site.content_manager.verifyFile(inner_path, data, ignore_same=False)
def testVerifyInnerPath(self, site):
inner_path = "content.json"
data_dict = site.storage.loadJson(inner_path)
for good_relative_path in ["data.json", "out/data.json", "Any File [by none] (1).jpg"]:
data_dict["files"] = {good_relative_path: {"sha512": "369d4e780cc80504285f13774ca327fe725eed2d813aad229e62356b07365906", "size": 505}}
if "sign" in data_dict:
del data_dict["sign"]
del data_dict["signs"]
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), self.privatekey)
}
data = StringIO(json.dumps(data_dict))
assert site.content_manager.verifyFile(inner_path, data, ignore_same=False)
for bad_relative_path in ["../data.json", "data/" * 100, "invalid|file.jpg"]:
data_dict["files"] = {bad_relative_path: {"sha512": "369d4e780cc80504285f13774ca327fe725eed2d813aad229e62356b07365906", "size": 505}}
if "sign" in data_dict:
del data_dict["sign"]
del data_dict["signs"]
data_dict["signs"] = {
"1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT": CryptBitcoin.sign(json.dumps(data_dict, sort_keys=True), self.privatekey)
}
data = StringIO(json.dumps(data_dict))
with pytest.raises(VerifyError) as err:
site.content_manager.verifyFile(inner_path, data, ignore_same=False)
assert "Invalid relative path" in str(err)
@pytest.mark.parametrize("key", ["ignore", "optional"])
def testSignUnsafePattern(self, site, key):
site.content_manager.contents["content.json"][key] = "([a-zA-Z]+)*"
with pytest.raises(UnsafePatternError) as err:
site.content_manager.sign("content.json", privatekey=self.privatekey, filewrite=False)
assert "Potentially unsafe" in str(err)
def testVerifyUnsafePattern(self, site):
site.content_manager.contents["content.json"]["includes"]["data/test_include/content.json"]["files_allowed"] = "([a-zA-Z]+)*"
with pytest.raises(UnsafePatternError) as err:
with site.storage.open("data/test_include/content.json") as data:
site.content_manager.verifyFile("data/test_include/content.json", data, ignore_same=False)
assert "Potentially unsafe" in str(err)
site.content_manager.contents["data/users/content.json"]["user_contents"]["permission_rules"]["([a-zA-Z]+)*"] = {"max_size": 0}
with pytest.raises(UnsafePatternError) as err:
with site.storage.open("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json") as data:
site.content_manager.verifyFile("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json", data, ignore_same=False)
assert "Potentially unsafe" in str(err)
|
OliverCole/ZeroNet
|
src/Test/TestContent.py
|
Python
|
gpl-2.0
| 12,511 | 0.004876 |
# pylint: disable=missing-module-docstring, missing-class-docstring
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('rdrhc_calendar', '0013_auto_20171016_1915'),
]
operations = [
migrations.RenameField(
model_name='shift',
old_name='user',
new_name='sb_user',
),
migrations.RenameField(
model_name='shiftcode',
old_name='user',
new_name='sb_user',
),
migrations.AlterUniqueTogether(
name='shiftcode',
unique_together=set([('code', 'sb_user', 'role')]),
),
]
|
studybuffalo/studybuffalo
|
study_buffalo/rdrhc_calendar/migrations/0014_auto_20171016_1922.py
|
Python
|
gpl-3.0
| 813 | 0 |
from sdoc.sdoc1.data_type.DataType import DataType
class StringDataType(DataType):
"""
Class for string data types.
"""
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, value: str):
"""
Object constructor.
:param str value: The value of this string constant.
"""
self._value: str = value
"""
The value of this constant integer.
"""
# ------------------------------------------------------------------------------------------------------------------
def debug(self, indent: int = 0) -> str:
"""
Returns a string for debugging.
:param int indent: Unused.
"""
return "'" + self._value + "'"
# ------------------------------------------------------------------------------------------------------------------
def dereference(self):
"""
Returns a clone of this string.
:rtype: sdoc.sdoc1.data_type.StringDataType.StringDataType
"""
return StringDataType(self._value)
# ------------------------------------------------------------------------------------------------------------------
def get_value(self) -> str:
"""
Returns the underling value of this data type.
"""
return self._value
# ------------------------------------------------------------------------------------------------------------------
def get_type_id(self) -> int:
"""
Returns the ID of this data type.
"""
return DataType.STRING
# ------------------------------------------------------------------------------------------------------------------
def is_constant(self) -> bool:
"""
Returns False always.
"""
return False
# ------------------------------------------------------------------------------------------------------------------
def is_defined(self) -> bool:
"""
Returns True always.
"""
return True
# ------------------------------------------------------------------------------------------------------------------
def is_scalar(self) -> bool:
"""
Returns True always.
"""
return True
# ------------------------------------------------------------------------------------------------------------------
def is_true(self) -> bool:
"""
Returns True if this string is not empty. Returns False otherwise.
"""
return self._value != ''
# ------------------------------------------------------------------------------------------------------------------
def __str__(self) -> str:
"""
Returns the string representation of the string constant.
"""
return self._value
# ----------------------------------------------------------------------------------------------------------------------
|
SDoc/py-sdoc
|
sdoc/sdoc1/data_type/StringDataType.py
|
Python
|
mit
| 3,015 | 0 |
import pytest
from mitmproxy.addons import intercept
from mitmproxy import exceptions
from mitmproxy.test import taddons
from mitmproxy.test import tflow
@pytest.mark.asyncio
async def test_simple():
r = intercept.Intercept()
with taddons.context(r) as tctx:
assert not r.filt
tctx.configure(r, intercept="~q")
assert r.filt
assert tctx.options.intercept_active
with pytest.raises(exceptions.OptionsError):
tctx.configure(r, intercept="~~")
tctx.configure(r, intercept=None)
assert not r.filt
assert not tctx.options.intercept_active
tctx.configure(r, intercept="~s")
f = tflow.tflow(resp=True)
await tctx.cycle(r, f)
assert f.intercepted
f = tflow.tflow(resp=False)
await tctx.cycle(r, f)
assert not f.intercepted
f = tflow.tflow(resp=True)
r.response(f)
assert f.intercepted
tctx.configure(r, intercept_active=False)
f = tflow.tflow(resp=True)
await tctx.cycle(r, f)
assert not f.intercepted
tctx.configure(r, intercept_active=True)
f = tflow.tflow(resp=True)
await tctx.cycle(r, f)
assert f.intercepted
@pytest.mark.asyncio
async def test_tcp():
r = intercept.Intercept()
with taddons.context(r) as tctx:
tctx.configure(r, intercept="~tcp")
f = tflow.ttcpflow()
await tctx.cycle(r, f)
assert f.intercepted
tctx.configure(r, intercept_active=False)
f = tflow.ttcpflow()
await tctx.cycle(r, f)
assert not f.intercepted
|
mitmproxy/mitmproxy
|
test/mitmproxy/addons/test_intercept.py
|
Python
|
mit
| 1,632 | 0 |
"""
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC(gamma='auto')
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linestyles='dashed')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired,
edgecolors='k')
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
|
glemaitre/scikit-learn
|
examples/svm/plot_svm_nonlinear.py
|
Python
|
bsd-3-clause
| 1,136 | 0.002641 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from urlparse import urlparse
import httplib2
import urllib
import logging
from datetime import datetime
from lxml import etree
from django.conf import settings
from django.db import models
from django.db.models import signals
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext, ugettext_lazy as _
from django.core.urlresolvers import reverse
from geonode import GeoNodeException
from geonode.base.models import ResourceBase, ResourceBaseManager, Link, \
resourcebase_post_save, resourcebase_post_delete
from geonode.utils import _user, _password, get_wms
from geonode.utils import http_client
from geonode.geoserver.helpers import cascading_delete
from geonode.people.models import Profile
from geonode.security.enumerations import AUTHENTICATED_USERS, ANONYMOUS_USERS
from geonode.layers.ows import wcs_links, wfs_links, wms_links, \
wps_execute_layer_attribute_statistics
from geonode.layers.enumerations import LAYER_ATTRIBUTE_NUMERIC_DATA_TYPES
from geonode.utils import ogc_server_settings
from geoserver.catalog import Catalog, FailedRequestError
from agon_ratings.models import OverallRating
logger = logging.getLogger("geonode.layers.models")
class Style(models.Model):
"""Model for storing styles.
"""
name = models.CharField(_('style name'), max_length=255, unique=True)
sld_title = models.CharField(max_length=255, null=True, blank=True)
sld_body = models.TextField(_('sld text'), null=True, blank=True)
sld_version = models.CharField(_('sld version'), max_length=12, null=True, blank=True)
sld_url = models.CharField(_('sld url'), null = True, max_length=1000)
workspace = models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return "%s" % self.name.encode('utf-8')
class LayerManager(ResourceBaseManager):
def __init__(self):
models.Manager.__init__(self)
url = ogc_server_settings.rest
self.gs_catalog = Catalog(url, _user, _password)
def add_bbox_query(q, bbox):
'''modify the queryset q to limit to the provided bbox
bbox - 4 tuple of floats representing x0,x1,y0,y1
returns the modified query
'''
bbox = map(str, bbox) # 2.6 compat - float to decimal conversion
q = q.filter(bbox_x0__gte=bbox[0])
q = q.filter(bbox_x1__lte=bbox[1])
q = q.filter(bbox_y0__gte=bbox[2])
return q.filter(bbox_y1__lte=bbox[3])
class Layer(ResourceBase):
"""
Layer (inherits ResourceBase fields)
"""
# internal fields
objects = LayerManager()
workspace = models.CharField(max_length=128)
store = models.CharField(max_length=128)
storeType = models.CharField(max_length=128)
name = models.CharField(max_length=128)
typename = models.CharField(max_length=128, unique=True)
popular_count = models.IntegerField(default=0)
share_count = models.IntegerField(default=0)
default_style = models.ForeignKey(Style, related_name='layer_default_style', null=True, blank=True)
styles = models.ManyToManyField(Style, related_name='layer_styles')
def update_thumbnail(self, save=True):
try:
self.save_thumbnail(self._thumbnail_url(width=200, height=150), save)
except RuntimeError, e:
logger.warn('Could not create thumbnail for %s' % self, e)
def _render_thumbnail(self, spec):
resp, content = http_client.request(spec)
if 'ServiceException' in content or resp.status < 200 or resp.status > 299:
msg = 'Unable to obtain thumbnail: %s' % content
raise RuntimeError(msg)
return content
def _thumbnail_url(self, width=20, height=None):
""" Generate a URL representing thumbnail of the layer """
params = {
'layers': self.typename.encode('utf-8'),
'format': 'image/png8',
'width': width,
}
if height is not None:
params['height'] = height
# Avoid using urllib.urlencode here because it breaks the url.
# commas and slashes in values get encoded and then cause trouble
# with the WMS parser.
p = "&".join("%s=%s"%item for item in params.items())
return ogc_server_settings.LOCATION + "wms/reflect?" + p
def verify(self):
"""Makes sure the state of the layer is consistent in GeoServer and Catalogue.
"""
# Check the layer is in the wms get capabilities record
# FIXME: Implement caching of capabilities record site wide
_local_wms = get_wms()
record = _local_wms.contents.get(self.typename)
if record is None:
msg = "WMS Record missing for layer [%s]" % self.typename.encode('utf-8')
raise GeoNodeException(msg)
@property
def display_type(self):
return ({
"dataStore" : "Vector Data",
"coverageStore": "Raster Data",
}).get(self.storeType, "Data")
@property
def store_type(self):
cat = Layer.objects.gs_catalog
res = cat.get_resource(self.name)
res.store.fetch()
return res.store.dom.find('type').text
@property
def service_type(self):
if self.storeType == 'coverageStore':
return "WCS"
if self.storeType == 'dataStore':
return "WFS"
def get_absolute_url(self):
return reverse('layer_detail', args=(self.typename,))
def attribute_config(self):
#Get custom attribute sort order and labels if any
cfg = {}
visible_attributes = self.attribute_set.visible()
if (visible_attributes.count() > 0):
cfg["getFeatureInfo"] = {
"fields": [l.attribute for l in visible_attributes],
"propertyNames": dict([(l.attribute,l.attribute_label) for l in visible_attributes])
}
return cfg
def __str__(self):
return "%s Layer" % self.typename.encode('utf-8')
class Meta:
# custom permissions,
# change and delete are standard in django
permissions = (('view_layer', 'Can view'),
('change_layer_permissions', "Can change permissions"), )
# Permission Level Constants
# LEVEL_NONE inherited
LEVEL_READ = 'layer_readonly'
LEVEL_WRITE = 'layer_readwrite'
LEVEL_ADMIN = 'layer_admin'
def set_default_permissions(self):
self.set_gen_level(ANONYMOUS_USERS, self.LEVEL_READ)
self.set_gen_level(AUTHENTICATED_USERS, self.LEVEL_READ)
# remove specific user permissions
current_perms = self.get_all_level_info()
for username in current_perms['users'].keys():
user = User.objects.get(username=username)
self.set_user_level(user, self.LEVEL_NONE)
# assign owner admin privileges
if self.owner:
self.set_user_level(self.owner, self.LEVEL_ADMIN)
def tiles_url(self):
return self.link_set.get(name='Tiles').url
def maps(self):
from geonode.maps.models import MapLayer
return MapLayer.objects.filter(name=self.typename)
@property
def class_name(self):
return self.__class__.__name__
class Layer_Styles(models.Model):
layer = models.ForeignKey(Layer)
style = models.ForeignKey(Style)
class AttributeManager(models.Manager):
"""Helper class to access filtered attributes
"""
def visible(self):
return self.get_query_set().filter(visible=True).order_by('display_order')
class Attribute(models.Model):
"""
Auxiliary model for storing layer attributes.
This helps reduce the need for runtime lookups
to GeoServer, and lets users customize attribute titles,
sort order, and visibility.
"""
layer = models.ForeignKey(Layer, blank=False, null=False, unique=False, related_name='attribute_set')
attribute = models.CharField(_('attribute name'), help_text=_('name of attribute as stored in shapefile/spatial database'), max_length=255, blank=False, null=True, unique=False)
description = models.CharField(_('attribute description'), help_text=_('description of attribute to be used in metadata'), max_length=255, blank=True, null=True)
attribute_label = models.CharField(_('attribute label'), help_text=_('title of attribute as displayed in GeoNode'), max_length=255, blank=False, null=True, unique=False)
attribute_type = models.CharField(_('attribute type'), help_text=_('the data type of the attribute (integer, string, geometry, etc)'), max_length=50, blank=False, null=False, default='xsd:string', unique=False)
visible = models.BooleanField(_('visible?'), help_text=_('specifies if the attribute should be displayed in identify results'), default=True)
display_order = models.IntegerField(_('display order'), help_text=_('specifies the order in which attribute should be displayed in identify results'), default=1)
# statistical derivations
count = models.IntegerField(_('count'), help_text=_('count value for this field'), default=1)
min = models.CharField(_('min'), help_text=_('minimum value for this field'), max_length=255, blank=False, null=True, unique=False, default='NA')
max = models.CharField(_('max'), help_text=_('maximum value for this field'), max_length=255, blank=False, null=True, unique=False, default='NA')
average = models.CharField(_('average'), help_text=_('average value for this field'), max_length=255, blank=False, null=True, unique=False, default='NA')
median = models.CharField(_('median'), help_text=_('median value for this field'), max_length=255, blank=False, null=True, unique=False, default='NA')
stddev = models.CharField(_('standard deviation'), help_text=_('standard deviation for this field'), max_length=255, blank=False, null=True, unique=False, default='NA')
sum = models.CharField(_('sum'), help_text=_('sum value for this field'), max_length=255, blank=False, null=True, unique=False, default='NA')
unique_values = models.TextField(_('unique values for this field'), null=True, blank=True, default='NA')
last_stats_updated = models.DateTimeField(_('last modified'), default=datetime.now, help_text=_('date when attribute statistics were last updated')) # passing the method itself, not
objects = AttributeManager()
def __str__(self):
return "%s" % self.attribute_label.encode("utf-8") if self.attribute_label else self.attribute.encode("utf-8")
def unique_values_as_list(self):
return self.unique_values.split(',')
def geoserver_pre_delete(instance, sender, **kwargs):
"""Removes the layer from GeoServer
"""
ct = ContentType.objects.get_for_model(instance)
OverallRating.objects.filter(content_type = ct, object_id = instance.id).delete()
#cascading_delete should only be called if ogc_server_settings.BACKEND_WRITE_ENABLED == True
if getattr(ogc_server_settings,"BACKEND_WRITE_ENABLED", True):
cascading_delete(Layer.objects.gs_catalog, instance.typename)
def pre_save_layer(instance, sender, **kwargs):
if kwargs.get('raw', False):
instance.owner = instance.resourcebase_ptr.owner
instance.uuid = instance.resourcebase_ptr.uuid
instance.bbox_x0 = instance.resourcebase_ptr.bbox_x0
instance.bbox_x1 = instance.resourcebase_ptr.bbox_x1
instance.bbox_y0 = instance.resourcebase_ptr.bbox_y0
instance.bbox_y1 = instance.resourcebase_ptr.bbox_y1
if instance.abstract == '' or instance.abstract is None:
instance.abstract = 'No abstract provided'
if instance.title == '' or instance.title is None:
instance.title = instance.name
def pre_delete_layer(instance, sender, **kwargs):
"""
Remove any associated style to the layer, if it is not used by other layers.
Default style will be deleted in post_delete_layer
"""
logger.debug("Going to delete the styles associated for [%s]", instance.typename.encode('utf-8'))
default_style = instance.default_style
for style in instance.styles.all():
if style.layer_styles.all().count()==1:
if style != default_style:
style.delete()
def post_delete_layer(instance, sender, **kwargs):
"""
Removed the layer from any associated map, if any.
Remove the layer default style.
"""
from geonode.maps.models import MapLayer
logger.debug("Going to delete associated maplayers for [%s]", instance.typename.encode('utf-8'))
MapLayer.objects.filter(name=instance.typename).delete()
logger.debug("Going to delete the default style for [%s]", instance.typename.encode('utf-8'))
if instance.default_style and Layer.objects.filter(default_style__id=instance.default_style.id).count() == 0:
instance.default_style.delete()
def geoserver_pre_save(instance, sender, **kwargs):
"""Send information to geoserver.
The attributes sent include:
* Title
* Abstract
* Name
* Keywords
* Metadata Links,
* Point of Contact name and url
"""
url = ogc_server_settings.internal_rest
try:
gs_catalog = Catalog(url, _user, _password)
gs_resource = gs_catalog.get_resource(instance.name)
except (EnvironmentError, FailedRequestError) as e:
gs_resource = None
msg = ('Could not connect to geoserver at "%s"'
'to save information for layer "%s"' % (
ogc_server_settings.LOCATION, instance.name.encode('utf-8'))
)
logger.warn(msg, e)
# If geoserver is not online, there is no need to continue
return
# If there is no resource returned it could mean one of two things:
# a) There is a synchronization problem in geoserver
# b) The unit tests are running and another geoserver is running in the
# background.
# For both cases it is sensible to stop processing the layer
if gs_resource is None:
logger.warn('Could not get geoserver resource for %s' % instance)
return
gs_resource.title = instance.title
gs_resource.abstract = instance.abstract
gs_resource.name= instance.name
# Get metadata links
metadata_links = []
for link in instance.link_set.metadata():
metadata_links.append((link.name, link.mime, link.url))
gs_resource.metadata_links = metadata_links
#gs_resource should only be called if ogc_server_settings.BACKEND_WRITE_ENABLED == True
if getattr(ogc_server_settings,"BACKEND_WRITE_ENABLED", True):
gs_catalog.save(gs_resource)
gs_layer = gs_catalog.get_layer(instance.name)
if instance.poc and instance.poc.user:
gs_layer.attribution = str(instance.poc.user)
profile = Profile.objects.get(user=instance.poc.user)
gs_layer.attribution_link = settings.SITEURL[:-1] + profile.get_absolute_url()
#gs_layer should only be called if ogc_server_settings.BACKEND_WRITE_ENABLED == True
if getattr(ogc_server_settings,"BACKEND_WRITE_ENABLED", True):
gs_catalog.save(gs_layer)
"""Get information from geoserver.
The attributes retrieved include:
* Bounding Box
* SRID
* Download links (WMS, WCS or WFS and KML)
* Styles (SLD)
"""
gs_resource = gs_catalog.get_resource(instance.name)
bbox = gs_resource.latlon_bbox
#FIXME(Ariel): Correct srid setting below
#self.srid = gs_resource.src
# Set bounding box values
instance.bbox_x0 = bbox[0]
instance.bbox_x1 = bbox[1]
instance.bbox_y0 = bbox[2]
instance.bbox_y1 = bbox[3]
instance.update_thumbnail(save=False)
def geoserver_post_save(instance, sender, **kwargs):
"""Save keywords to GeoServer
The way keywords are implemented requires the layer
to be saved to the database before accessing them.
"""
url = ogc_server_settings.internal_rest
try:
gs_catalog = Catalog(url, _user, _password)
gs_resource = gs_catalog.get_resource(instance.name)
except (FailedRequestError, EnvironmentError) as e:
msg = ('Could not connect to geoserver at "%s"'
'to save information for layer "%s"' % (
ogc_server_settings.LOCATION, instance.name.encode('utf-8'))
)
logger.warn(msg, e)
# If geoserver is not online, there is no need to continue
return
# If there is no resource returned it could mean one of two things:
# a) There is a synchronization problem in geoserver
# b) The unit tests are running and another geoserver is running in the
# background.
# For both cases it is sensible to stop processing the layer
if gs_resource is None:
logger.warn('Could not get geoserver resource for %s' % instance)
return
gs_resource.keywords = instance.keyword_list()
#gs_resource should only be called if ogc_server_settings.BACKEND_WRITE_ENABLED == True
if getattr(ogc_server_settings,"BACKEND_WRITE_ENABLED", True):
gs_catalog.save(gs_resource)
bbox = gs_resource.latlon_bbox
dx = float(bbox[1]) - float(bbox[0])
dy = float(bbox[3]) - float(bbox[2])
dataAspect = 1 if dy == 0 else dx / dy
height = 550
width = int(height * dataAspect)
# Set download links for WMS, WCS or WFS and KML
links = wms_links(ogc_server_settings.public_url + 'wms?',
instance.typename.encode('utf-8'), instance.bbox_string,
instance.srid, height, width)
for ext, name, mime, wms_url in links:
Link.objects.get_or_create(resource= instance.resourcebase_ptr,
name=ugettext(name),
defaults=dict(
extension=ext,
url=wms_url,
mime=mime,
link_type='image',
)
)
if instance.storeType == "dataStore":
links = wfs_links(ogc_server_settings.public_url + 'wfs?', instance.typename.encode('utf-8'))
for ext, name, mime, wfs_url in links:
Link.objects.get_or_create(resource= instance.resourcebase_ptr,
url=wfs_url,
defaults=dict(
extension=ext,
name=name,
mime=mime,
url=wfs_url,
link_type='data',
)
)
elif instance.storeType == 'coverageStore':
#FIXME(Ariel): This works for public layers, does it work for restricted too?
# would those end up with no geotiff links, like, forever?
permissions = {}
permissions['anonymous'] = instance.get_gen_level(ANONYMOUS_USERS)
permissions['authenticated'] = instance.get_gen_level(AUTHENTICATED_USERS)
instance.set_gen_level(ANONYMOUS_USERS,'layer_readonly')
links = wcs_links(ogc_server_settings.public_url + 'wcs?', instance.typename.encode('utf-8'),
bbox=instance.bbox[:-1], crs=instance.bbox[-1], height=height, width=width)
for ext, name, mime, wcs_url in links:
Link.objects.get_or_create(resource= instance.resourcebase_ptr,
url=wcs_url,
defaults=dict(
extension=ext,
name=name,
mime=mime,
link_type='data',
)
)
instance.set_gen_level(ANONYMOUS_USERS,permissions['anonymous'])
instance.set_gen_level(AUTHENTICATED_USERS,permissions['authenticated'])
kml_reflector_link_download = ogc_server_settings.public_url + "wms/kml?" + urllib.urlencode({
'layers': instance.typename.encode('utf-8'),
'mode': "download"
})
Link.objects.get_or_create(resource= instance.resourcebase_ptr,
url=kml_reflector_link_download,
defaults=dict(
extension='kml',
name=_("KML"),
mime='text/xml',
link_type='data',
)
)
kml_reflector_link_view = ogc_server_settings.public_url + "wms/kml?" + urllib.urlencode({
'layers': instance.typename.encode('utf-8'),
'mode': "refresh"
})
Link.objects.get_or_create(resource= instance.resourcebase_ptr,
url=kml_reflector_link_view,
defaults=dict(
extension='kml',
name=_("View in Google Earth"),
mime='text/xml',
link_type='data',
)
)
tile_url = ('%sgwc/service/gmaps?' % ogc_server_settings.public_url +
'layers=%s' % instance.typename.encode('utf-8') +
'&zoom={z}&x={x}&y={y}' +
'&format=image/png8'
)
Link.objects.get_or_create(resource= instance.resourcebase_ptr,
url=tile_url,
defaults=dict(
extension='tiles',
name=_("Tiles"),
mime='image/png',
link_type='image',
)
)
html_link_url = '%s%s' % (settings.SITEURL[:-1], instance.get_absolute_url())
Link.objects.get_or_create(resource= instance.resourcebase_ptr,
url=html_link_url,
defaults=dict(
extension='html',
name=instance.typename,
mime='text/html',
link_type='html',
)
)
#remove links that belong to and old address
for link in instance.link_set.all():
if not urlparse(settings.SITEURL).hostname == urlparse(link.url).hostname and not \
urlparse(ogc_server_settings.public_url).hostname == urlparse(link.url).hostname:
link.delete()
#Save layer attributes
set_attributes(instance)
#Save layer styles
set_styles(instance, gs_catalog)
def set_styles(layer, gs_catalog):
style_set = []
gs_layer = gs_catalog.get_layer(layer.name)
default_style = gs_layer.default_style
layer.default_style = save_style(default_style)
style_set.append(layer.default_style)
alt_styles = gs_layer.styles
for alt_style in alt_styles:
style_set.append(save_style(alt_style))
layer.styles = style_set
return layer
def save_style(gs_style):
style, created = Style.objects.get_or_create(name = gs_style.sld_name)
style.sld_title = gs_style.sld_title
style.sld_body = gs_style.sld_body
style.sld_url = gs_style.body_href()
style.save()
return style
def is_layer_attribute_aggregable(store_type, field_name, field_type):
"""
Decipher whether layer attribute is suitable for statistical derivation
"""
# must be vector layer
if store_type != 'dataStore':
return False
# must be a numeric data type
if field_type not in LAYER_ATTRIBUTE_NUMERIC_DATA_TYPES:
return False
# must not be an identifier type field
if field_name.lower() in ['id', 'identifier']:
return False
return True
def get_attribute_statistics(layer_name, field):
"""
Generate statistics (range, mean, median, standard deviation, unique values)
for layer attribute
"""
logger.debug('Deriving aggregate statistics for attribute %s', field)
if not ogc_server_settings.WPS_ENABLED:
return None
try:
return wps_execute_layer_attribute_statistics(layer_name, field)
except Exception:
logger.exception('Error generating layer aggregate statistics')
def set_attributes(layer, overwrite=False):
"""
Retrieve layer attribute names & types from Geoserver,
then store in GeoNode database using Attribute model
"""
#Appending authorizations seems necessary to avoid 'layer not found' from GeoServer
http = httplib2.Http()
http.add_credentials(_user, _password)
_netloc = urlparse(ogc_server_settings.LOCATION).netloc
http.authorizations.append(
httplib2.BasicAuthentication(
(_user, _password),
_netloc,
ogc_server_settings.LOCATION,
{},
None,
None,
http
)
)
attribute_map = []
if layer.storeType == "dataStore":
dft_url = ogc_server_settings.LOCATION + "wfs?" + urllib.urlencode({
"service": "wfs",
"version": "1.0.0",
"request": "DescribeFeatureType",
"typename": layer.typename.encode('utf-8'),
})
try:
body = http.request(dft_url)[1]
doc = etree.fromstring(body)
path = ".//{xsd}extension/{xsd}sequence/{xsd}element".format(xsd="{http://www.w3.org/2001/XMLSchema}")
attribute_map = [[n.attrib["name"],n.attrib["type"]] for n in doc.findall(path)]
except Exception:
attribute_map = []
elif layer.storeType == "coverageStore":
dc_url = ogc_server_settings.LOCATION + "wcs?" + urllib.urlencode({
"service": "wcs",
"version": "1.1.0",
"request": "DescribeCoverage",
"identifiers": layer.typename.encode('utf-8')
})
try:
response, body = http.request(dc_url)
doc = etree.fromstring(body)
path = ".//{wcs}Axis/{wcs}AvailableKeys/{wcs}Key".format(wcs="{http://www.opengis.net/wcs/1.1.1}")
attribute_map = [[n.text,"raster"] for n in doc.findall(path)]
except Exception:
attribute_map = []
attributes = layer.attribute_set.all()
# Delete existing attributes if they no longer exist in an updated layer
for la in attributes:
lafound = False
for field, ftype in attribute_map:
if field == la.attribute:
lafound = True
if overwrite or not lafound:
logger.debug("Going to delete [%s] for [%s]", la.attribute, layer.name.encode('utf-8'))
la.delete()
# Add new layer attributes if they don't already exist
if attribute_map is not None:
iter = len(Attribute.objects.filter(layer=layer)) + 1
for field, ftype in attribute_map:
if field is not None:
la, created = Attribute.objects.get_or_create(layer=layer, attribute=field, attribute_type=ftype)
if created:
if is_layer_attribute_aggregable(layer.storeType, field, ftype):
logger.debug("Generating layer attribute statistics")
result = get_attribute_statistics(layer.name, field)
if result is not None:
la.count = result['Count']
la.min = result['Min']
la.max = result['Max']
la.average = result['Average']
la.median = result['Median']
la.stddev = result['StandardDeviation']
la.sum = result['Sum']
la.unique_values = result['unique_values']
la.last_stats_updated = datetime.now()
la.attribute_label = field.title()
la.visible = ftype.find("gml:") != 0
la.display_order = iter
la.save()
iter += 1
logger.debug("Created [%s] attribute for [%s]", field, layer.name.encode('utf-8'))
else:
logger.debug("No attributes found")
signals.pre_save.connect(pre_save_layer, sender=Layer)
signals.pre_save.connect(geoserver_pre_save, sender=Layer)
signals.pre_delete.connect(geoserver_pre_delete, sender=Layer)
signals.post_save.connect(geoserver_post_save, sender=Layer)
signals.pre_delete.connect(pre_delete_layer, sender=Layer)
signals.post_delete.connect(post_delete_layer, sender=Layer)
signals.post_save.connect(resourcebase_post_save, sender=Layer)
signals.post_delete.connect(resourcebase_post_delete, sender=Layer)
|
AnnalisaS/migration_geonode
|
geonode/layers/models.py
|
Python
|
gpl-3.0
| 29,589 | 0.004968 |
__author__ = 'tauren'
from flask import abort
from flask_restful import Resource
from flask.ext.restful import fields, marshal, reqparse
from flask_login import current_user
from models import User, db
user_fields = {
'username': fields.String,
'id': fields.Integer,
'uri': fields.Url('user')
}
class UserApi(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('username', type=str, required=True,
help='No username provided', location='json')
self.reqparse.add_argument('password', type=str, required=True,
help='No password provided', location='json')
super(UserApi, self).__init__()
def post(self):
args = self.reqparse.parse_args()
new_user = User(args['username'], args['password'])
db.session.add(new_user)
db.session.commit()
return 201
def get(self):
user = User.query.filter_by(id=current_user.id).all()
if not user:
return abort(404)
return {'results': marshal(user, user_fields)}
|
taurenk/Flask-Angular-TaskList
|
backend/app/user_api.py
|
Python
|
mit
| 1,159 | 0.001726 |
from flask.ext.wtf import Form
from wtforms import StringField, BooleanField, PasswordField, SelectField, DateTimeField, TextAreaField
|
MansoorMajeed/encrypted-notes
|
app/forms.py
|
Python
|
gpl-2.0
| 138 | 0.014493 |
#!/usr/bin/python
'''
Wrapper for the SRTM module (srtm.py)
It will grab the altitude of a long,lat pair from the SRTM database
Created by Stephen Dade (stephen_dade@hotmail.com)
'''
import os
import sys
import time
import numpy
from MAVProxy.modules.mavproxy_map import srtm
class ElevationModel():
'''Elevation Model. Only SRTM for now'''
def __init__(self, database='srtm', offline=0):
'''Use offline=1 to disable any downloading of tiles, regardless of whether the
tile exists'''
self.database = database
if self.database == 'srtm':
self.downloader = srtm.SRTMDownloader(offline=offline)
self.downloader.loadFileList()
self.tileDict = dict()
'''Use the Geoscience Australia database instead - watch for the correct database path'''
if self.database == 'geoscience':
from MAVProxy.modules.mavproxy_map import GAreader
self.mappy = GAreader.ERMap()
self.mappy.read_ermapper(os.path.join(os.environ['HOME'], './Documents/Elevation/Canberra/GSNSW_P756demg'))
def GetElevation(self, latitude, longitude, timeout=0):
'''Returns the altitude (m ASL) of a given lat/long pair, or None if unknown'''
if self.database == 'srtm':
TileID = (numpy.floor(latitude), numpy.floor(longitude))
if TileID in self.tileDict:
alt = self.tileDict[TileID].getAltitudeFromLatLon(latitude, longitude)
else:
tile = self.downloader.getTile(numpy.floor(latitude), numpy.floor(longitude))
if tile == 0:
if timeout > 0:
t0 = time.time()
while time.time() < t0+timeout and tile == 0:
tile = self.downloader.getTile(numpy.floor(latitude), numpy.floor(longitude))
if tile == 0:
time.sleep(0.1)
if tile == 0:
return None
self.tileDict[TileID] = tile
alt = tile.getAltitudeFromLatLon(latitude, longitude)
if self.database == 'geoscience':
alt = self.mappy.getAltitudeAtPoint(latitude, longitude)
return alt
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser("mp_elevation.py [options]")
parser.add_option("--lat", type='float', default=-35.052544, help="start latitude")
parser.add_option("--lon", type='float', default=149.509165, help="start longitude")
parser.add_option("--database", type='string', default='srtm', help="elevation database")
(opts, args) = parser.parse_args()
EleModel = ElevationModel(opts.database)
lat = opts.lat
lon = opts.lon
'''Do a few lat/long pairs to demonstrate the caching
Note the +0.000001 to the time. On faster PCs, the two time periods
may in fact be equal, so we add a little extra time on the end to account for this'''
t0 = time.time()
alt = EleModel.GetElevation(lat, lon, timeout=10)
if alt is None:
print("Tile not available")
sys.exit(1)
t1 = time.time()+.000001
print("Altitude at (%.6f, %.6f) is %u m. Pulled at %.1f FPS" % (lat, lon, alt, 1/(t1-t0)))
lat = opts.lat+0.001
lon = opts.lon+0.001
t0 = time.time()
alt = EleModel.GetElevation(lat, lon, timeout=10)
t1 = time.time()+.000001
print("Altitude at (%.6f, %.6f) is %u m. Pulled at %.1f FPS" % (lat, lon, alt, 1/(t1-t0)))
lat = opts.lat-0.001
lon = opts.lon-0.001
t0 = time.time()
alt = EleModel.GetElevation(lat, lon, timeout=10)
t1 = time.time()+.000001
print("Altitude at (%.6f, %.6f) is %u m. Pulled at %.1f FPS" % (lat, lon, alt, 1/(t1-t0)))
|
bugobliterator/MAVProxy
|
MAVProxy/modules/mavproxy_map/mp_elevation.py
|
Python
|
gpl-3.0
| 3,785 | 0.004756 |
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from base import *
class Quagga(Container):
CONTAINER_NAME = None
GUEST_DIR = '/root/config'
def __init__(self, host_dir, conf, image='bgperf/quagga'):
super(Quagga, self).__init__(self.CONTAINER_NAME, image, host_dir, self.GUEST_DIR, conf)
@classmethod
def build_image(cls, force=False, tag='bgperf/quagga', checkout='HEAD', nocache=False):
cls.dockerfile = '''
FROM ubuntu:latest
WORKDIR /root
RUN useradd -M quagga
RUN mkdir /var/log/quagga && chown quagga:quagga /var/log/quagga
RUN mkdir /var/run/quagga && chown quagga:quagga /var/run/quagga
RUN apt-get update && apt-get install -qy git autoconf libtool gawk make telnet libreadline6-dev
RUN git clone git://git.sv.gnu.org/quagga.git quagga
RUN cd quagga && git checkout {0} && ./bootstrap.sh && \
./configure --disable-doc --localstatedir=/var/run/quagga && make && make install
RUN ldconfig
'''.format(checkout)
super(Quagga, cls).build_image(force, tag, nocache)
class QuaggaTarget(Quagga, Target):
CONTAINER_NAME = 'bgperf_quagga_target'
CONFIG_FILE_NAME = 'bgpd.conf'
def write_config(self, scenario_global_conf):
config = """hostname bgpd
password zebra
router bgp {0}
bgp router-id {1}
""".format(self.conf['as'], self.conf['router-id'])
def gen_neighbor_config(n):
local_addr = n['local-address']
c = """neighbor {0} remote-as {1}
neighbor {0} advertisement-interval 1
neighbor {0} route-server-client
neighbor {0} timers 30 90
""".format(local_addr, n['as'])
if 'filter' in n:
for p in (n['filter']['in'] if 'in' in n['filter'] else []):
c += 'neighbor {0} route-map {1} export\n'.format(local_addr, p)
return c
with open('{0}/{1}'.format(self.host_dir, self.CONFIG_FILE_NAME), 'w') as f:
f.write(config)
for n in list(flatten(t.get('neighbors', {}).values() for t in scenario_global_conf['testers'])) + [scenario_global_conf['monitor']]:
f.write(gen_neighbor_config(n))
if 'policy' in scenario_global_conf:
seq = 10
for k, v in scenario_global_conf['policy'].iteritems():
match_info = []
for i, match in enumerate(v['match']):
n = '{0}_match_{1}'.format(k, i)
if match['type'] == 'prefix':
f.write(''.join('ip prefix-list {0} deny {1}\n'.format(n, p) for p in match['value']))
f.write('ip prefix-list {0} permit any\n'.format(n))
elif match['type'] == 'as-path':
f.write(''.join('ip as-path access-list {0} deny _{1}_\n'.format(n, p) for p in match['value']))
f.write('ip as-path access-list {0} permit .*\n'.format(n))
elif match['type'] == 'community':
f.write(''.join('ip community-list standard {0} permit {1}\n'.format(n, p) for p in match['value']))
f.write('ip community-list standard {0} permit\n'.format(n))
elif match['type'] == 'ext-community':
f.write(''.join('ip extcommunity-list standard {0} permit {1} {2}\n'.format(n, *p.split(':', 1)) for p in match['value']))
f.write('ip extcommunity-list standard {0} permit\n'.format(n))
match_info.append((match['type'], n))
f.write('route-map {0} permit {1}\n'.format(k, seq))
for info in match_info:
if info[0] == 'prefix':
f.write('match ip address prefix-list {0}\n'.format(info[1]))
elif info[0] == 'as-path':
f.write('match as-path {0}\n'.format(info[1]))
elif info[0] == 'community':
f.write('match community {0}\n'.format(info[1]))
elif info[0] == 'ext-community':
f.write('match extcommunity {0}\n'.format(info[1]))
seq += 10
def get_startup_cmd(self):
return '\n'.join(
['#!/bin/bash',
'ulimit -n 65536',
'bgpd -u root -f {guest_dir}/{config_file_name}']
).format(
guest_dir=self.guest_dir,
config_file_name=self.CONFIG_FILE_NAME)
|
osrg/bgperf
|
quagga.py
|
Python
|
apache-2.0
| 5,099 | 0.003334 |
#!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Check RPC argument consistency."""
from collections import defaultdict
import os
import re
import sys
# Source files (relative to root) to scan for dispatch tables
SOURCES = [
"src/rpc/server.cpp",
"src/rpc/blockchain.cpp",
"src/rpc/mining.cpp",
"src/rpc/misc.cpp",
"src/rpc/net.cpp",
"src/rpc/rawtransaction.cpp",
"src/wallet/rpcwallet.cpp",
]
# Source file (relative to root) containing conversion mapping
SOURCE_CLIENT = 'src/rpc/client.cpp'
# Argument names that should be ignored in consistency checks
IGNORE_DUMMY_ARGS = {'dummy', 'arg0', 'arg1', 'arg2', 'arg3', 'arg4', 'arg5', 'arg6', 'arg7', 'arg8', 'arg9'}
class RPCCommand:
def __init__(self, name, args):
self.name = name
self.args = args
class RPCArgument:
def __init__(self, names, idx):
self.names = names
self.idx = idx
self.convert = False
def parse_string(s):
assert s[0] == '"'
assert s[-1] == '"'
return s[1:-1]
def process_commands(fname):
"""Find and parse dispatch table in implementation file `fname`."""
cmds = []
in_rpcs = False
with open(fname, "r", encoding="utf8") as f:
for line in f:
line = line.rstrip()
if not in_rpcs:
if re.match("static const CRPCCommand .*\[\] =", line):
in_rpcs = True
else:
if line.startswith('};'):
in_rpcs = False
elif '{' in line and '"' in line:
m = re.search('{ *("[^"]*"), *("[^"]*"), *&([^,]*), *{([^}]*)} *},', line)
assert m, 'No match to table expression: %s' % line
name = parse_string(m.group(2))
args_str = m.group(4).strip()
if args_str:
args = [RPCArgument(parse_string(x.strip()).split('|'), idx) for idx, x in enumerate(args_str.split(','))]
else:
args = []
cmds.append(RPCCommand(name, args))
assert not in_rpcs and cmds, "Something went wrong with parsing the C++ file: update the regexps"
return cmds
def process_mapping(fname):
"""Find and parse conversion table in implementation file `fname`."""
cmds = []
in_rpcs = False
with open(fname, "r", encoding="utf8") as f:
for line in f:
line = line.rstrip()
if not in_rpcs:
if line == 'static const CRPCConvertParam vRPCConvertParams[] =':
in_rpcs = True
else:
if line.startswith('};'):
in_rpcs = False
elif '{' in line and '"' in line:
m = re.search('{ *("[^"]*"), *([0-9]+) *, *("[^"]*") *},', line)
assert m, 'No match to table expression: %s' % line
name = parse_string(m.group(1))
idx = int(m.group(2))
argname = parse_string(m.group(3))
cmds.append((name, idx, argname))
assert not in_rpcs and cmds
return cmds
def main():
root = sys.argv[1]
# Get all commands from dispatch tables
cmds = []
for fname in SOURCES:
cmds += process_commands(os.path.join(root, fname))
cmds_by_name = {}
for cmd in cmds:
cmds_by_name[cmd.name] = cmd
# Get current convert mapping for client
client = SOURCE_CLIENT
mapping = set(process_mapping(os.path.join(root, client)))
print('* Checking consistency between dispatch tables and vRPCConvertParams')
# Check mapping consistency
errors = 0
for (cmdname, argidx, argname) in mapping:
try:
rargnames = cmds_by_name[cmdname].args[argidx].names
except IndexError:
print('ERROR: %s argument %i (named %s in vRPCConvertParams) is not defined in dispatch table' % (cmdname, argidx, argname))
errors += 1
continue
if argname not in rargnames:
print('ERROR: %s argument %i is named %s in vRPCConvertParams but %s in dispatch table' % (cmdname, argidx, argname, rargnames), file=sys.stderr)
errors += 1
# Check for conflicts in vRPCConvertParams conversion
# All aliases for an argument must either be present in the
# conversion table, or not. Anything in between means an oversight
# and some aliases won't work.
for cmd in cmds:
for arg in cmd.args:
convert = [((cmd.name, arg.idx, argname) in mapping) for argname in arg.names]
if any(convert) != all(convert):
print('ERROR: %s argument %s has conflicts in vRPCConvertParams conversion specifier %s' % (cmd.name, arg.names, convert))
errors += 1
arg.convert = all(convert)
# Check for conversion difference by argument name.
# It is preferable for API consistency that arguments with the same name
# have the same conversion, so bin by argument name.
all_methods_by_argname = defaultdict(list)
converts_by_argname = defaultdict(list)
for cmd in cmds:
for arg in cmd.args:
for argname in arg.names:
all_methods_by_argname[argname].append(cmd.name)
converts_by_argname[argname].append(arg.convert)
for argname, convert in converts_by_argname.items():
if all(convert) != any(convert):
if argname in IGNORE_DUMMY_ARGS:
# these are testing or dummy, don't warn for them
continue
print('WARNING: conversion mismatch for argument named %s (%s)' %
(argname, list(zip(all_methods_by_argname[argname], converts_by_argname[argname]))))
sys.exit(errors > 0)
if __name__ == '__main__':
main()
|
digibyte/digibyte
|
test/lint/check-rpc-mappings.py
|
Python
|
mit
| 6,062 | 0.003299 |
# Linker - the linker for assembled objects
#
# Input: one or more ObjectFile objects
# Output: an executable suitable for loading into the Luz
# simulator or CPU memory.
#
# Luz micro-controller assembler
# Eli Bendersky (C) 2008-2010
#
import pprint, os, sys, string
from collections import defaultdict
from ..commonlib.utils import (
word2bytes, bytes2word, extract_bitfield,
build_bitfield, num_fits_in_nbits)
from ..commonlib.luz_opcodes import *
from .asm_common_types import ImportType, RelocType
from .assembler import Assembler
class LinkerError(Exception): pass
class Linker(object):
""" Links together several object files, adding a startup
object, and produces a binary image of the linked
executable. This binary image, when loaded at the initial
offset address, is ready to be executed by the CPU.
A Linker is created with the following parameters:
initial_offset:
The initial offset in memory where the image will be
placed. This is important for resolving relocations
and imports.
mem_size:
The total memory size available for the executable.
This is used to initialize the stack pointer.
Calling the link() method results in the binary image as
a list of bytes.
"""
def __init__(self, initial_offset=0, mem_size=128*1024):
self.initial_offset = initial_offset
self.mem_size = mem_size
def link(self, object_files=[]):
""" Link the given objects. object_files is a list of
ObjectFile. The objects are linked with the special
startup object (see LINKER_STARTUP_CODE).
Note: object files may be modified as a result of this
call, to resolve import and relocations.
"""
# Throughout the linking code we refer to objects by their offset in the
# object_files list. This offset uniquely identifies an object.
self.object_files = object_files
startup_object = self._assemble_startup_code()
self.object_files.append(startup_object)
segment_map, total_size = self._compute_segment_map(
object_files=self.object_files,
offset=self.initial_offset)
exports = self._collect_exports(
object_files=self.object_files)
self._resolve_imports(
object_files=self.object_files,
exports=exports,
segment_map=segment_map)
self._resolve_relocations(
object_files=self.object_files,
segment_map=segment_map)
image = self._build_memory_image(
object_files=self.object_files,
segment_map=segment_map,
total_size=total_size)
return image
######################-- PRIVATE --#####################
def _assemble_startup_code(self):
sp_ptr = self.initial_offset + self.mem_size - 4
startup_code = LINKER_STARTUP_CODE.substitute(SP_POINTER=sp_ptr)
asm = Assembler()
startup_object = asm.assemble(str=startup_code)
return startup_object
def _compute_segment_map(self, object_files, offset=0):
""" Compute a segment memory map from the list of object
files and a given offset.
A "segment map" is a list of:
dict[segment] = address
The ith item holds such a dictionary for the ith
object file.
Each dictionary maps the segments found in this
object file into the addresses to which they are
placed in the memory layout created by the linker.
For example, if several objects have a 'text' segment,
this function collects all the 'text' segments into
a contiguous region. However, the 'text' segment of
each object will point to a different offset inside
this region (since they're placed one after another).
The 'offset' argument allows to shift the whole memory
map by some constant amount.
Linker-created segments like __startup and __heap are
treated specially.
Returns the pair segment_map, total_size
total_size is the total size of memory occupied by
all the objects.
"""
# Step 1: Compute the total sizes of all segments that
# exist in the object files
segment_size = defaultdict(int)
for obj in object_files:
for segment in obj.seg_data:
segment_size[segment] += len(obj.seg_data[segment])
# Step 2: Initialize the pointers that point to the start
# of each combined segment.
# Note: the order of allocation of segments (what comes
# after what) isn't really important and could be totally
# arbitrary. To make it more predictable, segments are
# allocated one after another sorted by name in increasing
# lexicographical order.
# The __startup segment is placed before all others (i.e.
# it's mapped at 'offset'), and the __heap segment is
# placed after all others.
segment_ptr = {}
ptr = offset
if '__startup' in segment_size:
segment_ptr['__startup'] = ptr
ptr += segment_size['__startup']
for segment in sorted(segment_size):
if segment not in ('__startup', '__heap'):
segment_ptr[segment] = ptr
ptr += segment_size[segment]
if '__heap' in segment_size:
segment_ptr['__heap'] = ptr
ptr += segment_size['__heap']
total_size = ptr - offset
# Step 3: Create the segment map. For each segment in each
# object, record the memory offset where it will be
# mapped.
segment_map = []
for obj in object_files:
obj_segment_map = {}
for segment in obj.seg_data:
obj_segment_map[segment] = segment_ptr[segment]
segment_ptr[segment] += len(obj.seg_data[segment])
segment_map.append(obj_segment_map)
return segment_map, total_size
def _collect_exports(self, object_files):
""" Collects the exported symbols from all the objects.
Verifies that exported symbols are unique and
notifies of collisions.
The returned data structure is a dict mapping export
symbol names to a pair: (object_index, addr)
where object_index is the index in object_files of the
object that exports this symbol, and addr is
the address of the symbol (SegAddr) taken from the
export table of that object.
"""
exports = {}
for idx, obj in enumerate(object_files):
for export in obj.export_table:
sym_name = export.export_symbol
if sym_name in exports:
other_idx = exports[sym_name][0]
self._linker_error(
"Duplicated export symbol '%s' at objects [%s] and [%s]" % (
sym_name,
self._object_id(object_files[idx]),
self._object_id(object_files[other_idx])))
exports[sym_name] = (idx, export.addr)
return exports
def _resolve_relocations(self, object_files, segment_map):
""" Resolves the relocations in object files according to
their relocation tables and the updated segment_map
information.
"""
# Look at the relocation tables of all objects
#
for idx, obj in enumerate(object_files):
for reloc_seg, type, addr in obj.reloc_table:
# The requested relocation segment should exist
# in the segment map for this object.
#
if not reloc_seg in segment_map[idx]:
self._linker_error("Relocation entry in object [%t] refers to unknown segment %s" % (
self._object_id(obj), reloc_seg))
# This is where the relocated segment was mapped
#
mapped_address = segment_map[idx][reloc_seg]
# Patch the instruction asking for relocation with
# the mapped address of the requested segment.
#
self._patch_segment_data(
seg_data=obj.seg_data[addr.segment],
instr_offset=addr.offset,
type=type,
mapped_address=mapped_address,
name=reloc_seg)
def _resolve_imports(self, object_files, segment_map, exports):
""" Resolves the imports in object files according to the
exported symbols collected in exports and the mapping
of segments into memory (segment_map).
"""
# Look at the import tables of all objects
#
for idx, obj in enumerate(object_files):
import_table = object_files[idx].import_table
# All imported symbols
#
for sym, import_type, import_addr in import_table:
# Make sure this symbol was indeed exported by
# some object
#
if not sym in exports:
self._linker_error("Failed import of symbol '%s' at object [%s]" % (
sym, self._object_id(obj)))
exp_obj_idx, exp_address = exports[sym]
# From the export table, build the final mapped
# address of this symbol.
# It is the mapped value of the segment in which
# this symbol is located, plus its offset in that
# segment.
#
mapped_address = segment_map[exp_obj_idx][exp_address.segment]
mapped_address += exp_address.offset
# Now patch the segment data of this object.
# The instruction(s) to patch and the patch type
# are taken from the import table, and the address
# to insert is the mapped_address computed from
# the matching exported symbol.
#
self._patch_segment_data(
seg_data=obj.seg_data[import_addr.segment],
instr_offset=import_addr.offset,
type=import_type,
mapped_address=mapped_address,
name=sym)
def _patch_segment_data(self,
seg_data,
instr_offset,
type,
mapped_address,
name='<unknown>'):
""" Performs a patch of segment data.
seg_data:
The segment data of the relevant segment
instr_offset:
Offset of the instruction that is to be patched in
the segment.
type:
Patch type (one of types listed in ImportType or
RelocType)
mapped_address:
The address that will be patched into the
instruction(s).
name:
Symbol/segment name used for debugging
The segment data is modified as a result of this call.
"""
if instr_offset > len(seg_data) - 4:
self._linker_error("Patching (%s) of '%s', bad offset into segment" % (
type, name))
# At the moment only CALL and LI patches are supported
#
patch_call = type in (ImportType.CALL, RelocType.CALL)
# For import patches, the address stored in the
# instruction is replaced with the mapped address.
# For reloc patches, the two addresses are added
#
do_replace = type in (ImportType.CALL, ImportType.LI)
if patch_call:
orig_instr_bytes = seg_data[instr_offset:instr_offset+4]
orig_instr_word = bytes2word(orig_instr_bytes)
# Break the instruction into opcode and destination
# address. Make sure it's indeed a CALL
#
opcode = extract_opcode(orig_instr_word)
if opcode != OP_CALL:
self._linker_error("Patching (%s) of '%s': expected CALL, got %d" % (
type, name, opcode))
# CALL destinations are in words
#
mapped_address //= 4
# Patch the address
#
if do_replace:
destination = mapped_address
else:
destination = extract_bitfield(orig_instr_word, 25, 0)
destination += mapped_address
if not num_fits_in_nbits(destination, 26):
self._linker_error("Patching (%s) of '%s': patched destination address %x too large" % (
type, name, destination))
# Build the new instruction and shove it back into
# the segment data
#
new_instr_bytes = word2bytes(
build_bitfield(31, 26, opcode) |
build_bitfield(25, 0, destination))
seg_data[instr_offset:instr_offset+4] = new_instr_bytes
else:
# Patch LI
# Handled similarly to patching CALL, except that the
# instructions that replaced LI (LUI followed by ORI)
# have to be patched.
#
orig_bytes = seg_data[instr_offset:instr_offset+8]
orig_lui_word = bytes2word(orig_bytes[0:4])
orig_ori_word = bytes2word(orig_bytes[4:8])
opcode_lui = extract_opcode(orig_lui_word)
opcode_ori = extract_opcode(orig_ori_word)
if opcode_lui != OP_LUI and opcode_ori != OP_ORI:
self._linker_error("Patching (%s) of '%s': expected LI, got %d,%d" % (
type, name, opcode_lui, opcode_ori))
if do_replace:
destination = mapped_address
else:
# Build the original destination address by combining
# the high and low parts from the two instructions
#
destination = extract_bitfield(orig_lui_word, 15, 0) << 16
destination += extract_bitfield(orig_ori_word, 15, 0)
destination += mapped_address
if not num_fits_in_nbits(destination, 32):
self._linker_error("Patching (%s) of '%s': patched destination address %x too large" % (
type, sym_name, destination))
orig_lui_rd = extract_bitfield(orig_lui_word, 25, 21)
new_lui_bytes = word2bytes(
build_bitfield(31, 26, opcode_lui) |
build_bitfield(25, 21, orig_lui_rd) |
build_bitfield(15, 0, destination >> 16))
# in LUI created from LI Rd is in both Rd and Rs
# fields
#
orig_ori_rd = extract_bitfield(orig_ori_word, 25, 21)
new_ori_bytes = word2bytes(
build_bitfield(31, 26, opcode_ori) |
build_bitfield(25, 21, orig_ori_rd) |
build_bitfield(20, 16, orig_ori_rd) |
build_bitfield(15, 0, destination & 0xFFFF))
seg_data[instr_offset:instr_offset+4] = new_lui_bytes
seg_data[instr_offset+4:instr_offset+8] = new_ori_bytes
def _build_memory_image(self, object_files, segment_map, total_size):
""" Builds a linked memory image of the objects mapped
according to segment map.
Returns a list of bytes that should be loaded to
self.initial_offset in the CPU's memory.
"""
SENTINEL = -999
image = [SENTINEL] * total_size
for idx, obj in enumerate(object_files):
for segment in obj.seg_data:
seg_data = obj.seg_data[segment]
start = segment_map[idx][segment] - self.initial_offset
end = start + len(seg_data)
# sanity check: the segments don't trample over
# each other
#
for i in range(start, end):
assert image[i] == SENTINEL, 'segment %s at %d' % (segment, i)
image[start:end] = seg_data
# sanity check: no sentinels left
#
for i in range(total_size):
assert image[i] != SENTINEL, 'at %d' % i
return image
def _object_id(self, object_file):
""" Returns a string identification of the given object
file. If it has a name, that's returned. Otherwise,
its id is returned as a string.
"""
if object_file.name:
return object_file.name
else:
return hex(id(object_file))
def _linker_error(self, msg):
raise LinkerError(msg)
# The special segments added by the linker.
#
# __startup: 3 words -- this segment is mapped to the initial offset.
# __heap: 1 word
LINKER_STARTUP_CODE = string.Template(r'''
.segment __startup
LI $$sp, ${SP_POINTER}
CALL asm_main
.segment __heap
.global __heap
__heap:
.word 0
''')
|
eliben/luz-cpu
|
luz_asm_sim/lib/asmlib/linker.py
|
Python
|
unlicense
| 17,370 | 0.001267 |
#!/usr/bin/env python
"""Distutils installer for extras."""
from setuptools import setup
import os.path
import extras
testtools_cmd = extras.try_import('testtools.TestCommand')
def get_version():
"""Return the version of extras that we are building."""
version = '.'.join(
str(component) for component in extras.__version__[0:3])
return version
def get_long_description():
readme_path = os.path.join(
os.path.dirname(__file__), 'README.rst')
return open(readme_path).read()
cmdclass = {}
if testtools_cmd is not None:
cmdclass['test'] = testtools_cmd
setup(name='extras',
author='Testing cabal',
author_email='testtools-dev@lists.launchpad.net',
url='https://github.com/testing-cabal/extras',
description=('Useful extra bits for Python - things that shold be '
'in the standard library'),
long_description=get_long_description(),
version=get_version(),
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
packages=[
'extras',
'extras.tests',
],
cmdclass=cmdclass)
|
testing-cabal/extras
|
setup.py
|
Python
|
mit
| 1,687 | 0.000593 |
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the SegWit changeover logic."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import sha256, CTransaction, CTxIn, COutPoint, CTxOut, COIN, ToHex, FromHex
from test_framework.address import script_to_p2sh, key_to_p2pkh, key_to_p2sh_p2wpkh, key_to_p2wpkh, script_to_p2sh_p2wsh, script_to_p2wsh, program_to_witness
from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE
from io import BytesIO
NODE_0 = 0
NODE_2 = 2
WIT_V0 = 0
WIT_V1 = 1
# Create a scriptPubKey corresponding to either a P2WPKH output for the
# given pubkey, or a P2WSH output of a 1-of-1 multisig for the given
# pubkey. Returns the hex encoding of the scriptPubKey.
def witness_script(use_p2wsh, pubkey):
if (use_p2wsh == False):
# P2WPKH instead
pubkeyhash = hash160(hex_str_to_bytes(pubkey))
pkscript = CScript([OP_0, pubkeyhash])
else:
# 1-of-1 multisig
witness_program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
scripthash = sha256(witness_program)
pkscript = CScript([OP_0, scripthash])
return bytes_to_hex_str(pkscript)
# Return a transaction (in hex) that spends the given utxo to a segwit output,
# optionally wrapping the segwit output using P2SH.
def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount):
if use_p2wsh:
program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
addr = script_to_p2sh_p2wsh(program) if encode_p2sh else script_to_p2wsh(program)
else:
addr = key_to_p2sh_p2wpkh(pubkey) if encode_p2sh else key_to_p2wpkh(pubkey)
if not encode_p2sh:
assert_equal(node.validateaddress(addr)['scriptPubKey'], witness_script(use_p2wsh, pubkey))
return node.createrawtransaction([utxo], {addr: amount})
# Create a transaction spending a given utxo to a segwit output corresponding
# to the given pubkey: use_p2wsh determines whether to use P2WPKH or P2WSH;
# encode_p2sh determines whether to wrap in P2SH.
# sign=True will have the given node sign the transaction.
# insert_redeem_script will be added to the scriptSig, if given.
def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
tx_to_witness = create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransaction(tx_to_witness)
assert("errors" not in signed or len(["errors"]) == 0)
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx = FromHex(CTransaction(), tx_to_witness)
tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)])
tx_to_witness = ToHex(tx)
return node.sendrawtransaction(tx_to_witness)
def getutxo(txid):
utxo = {}
utxo["vout"] = 0
utxo["txid"] = txid
return utxo
def find_unspent(node, min_value):
for utxo in node.listunspent():
if utxo['amount'] >= min_value:
return utxo
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [["-rpcserialversion=0", "-vbparams=segwit:0:999999999999", "-addresstype=legacy"],
["-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-rpcserialversion=1", "-vbparams=segwit:0:999999999999", "-addresstype=legacy"],
["-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-vbparams=segwit:0:999999999999", "-addresstype=legacy"]]
def setup_network(self):
super().setup_network()
connect_nodes(self.nodes[0], 2)
self.sync_all()
def success_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 2)
sync_blocks(self.nodes)
def skip_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 1)
sync_blocks(self.nodes)
def fail_accept(self, node, error_msg, txid, sign, redeem_script=""):
assert_raises_rpc_error(-26, error_msg, send_to_witness, 1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
def fail_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
assert_raises_rpc_error(-1, "CreateNewBlock: TestBlockValidity failed", node.generate, 1)
sync_blocks(self.nodes)
def run_test(self):
self.nodes[0].generate(161) #block 161
self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
self.nodes[0].generate(1) #block 162
balance_presetup = self.nodes[0].getbalance()
self.pubkey = []
p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh
wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness
for i in range(3):
newaddress = self.nodes[i].getnewaddress()
self.pubkey.append(self.nodes[i].validateaddress(newaddress)["pubkey"])
multiaddress = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]])
multiscript = CScript([OP_1, hex_str_to_bytes(self.pubkey[-1]), OP_1, OP_CHECKMULTISIG])
p2sh_addr = self.nodes[i].addwitnessaddress(newaddress)
bip173_addr = self.nodes[i].addwitnessaddress(newaddress, False)
p2sh_ms_addr = self.nodes[i].addwitnessaddress(multiaddress)
bip173_ms_addr = self.nodes[i].addwitnessaddress(multiaddress, False)
assert_equal(p2sh_addr, key_to_p2sh_p2wpkh(self.pubkey[-1]))
assert_equal(bip173_addr, key_to_p2wpkh(self.pubkey[-1]))
assert_equal(p2sh_ms_addr, script_to_p2sh_p2wsh(multiscript))
assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript))
p2sh_ids.append([])
wit_ids.append([])
for v in range(2):
p2sh_ids[i].append([])
wit_ids[i].append([])
for i in range(5):
for n in range(3):
for v in range(2):
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999")))
p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999")))
self.nodes[0].generate(1) #block 163
sync_blocks(self.nodes)
# Make sure all nodes recognize the transactions as theirs
assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50)
assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999"))
assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999"))
self.nodes[0].generate(260) #block 423
sync_blocks(self.nodes)
self.log.info("Verify default node can't accept any witness format txs before fork")
# unsigned, no scriptsig
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False)
# unsigned with redeem script
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0]))
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0]))
# signed
self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V0][0], True)
self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V1][0], True)
self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V0][0], True)
self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V1][0], True)
self.log.info("Verify witness txs are skipped for mining before the fork")
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427
# TODO: An old node would see these txs without witnesses and be able to mine them
self.log.info("Verify unsigned bare witness txs in versionbits-setting blocks are valid before the fork")
self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][1], False) #block 428
self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][1], False) #block 429
self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid")
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V0][1], False)
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V1][1], False)
self.log.info("Verify unsigned p2sh witness txs with a redeem script in versionbits-settings blocks are valid before the fork")
self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False, witness_script(False, self.pubkey[2])) #block 430
self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False, witness_script(True, self.pubkey[2])) #block 431
self.log.info("Verify previous witness txs skipped for mining can now be mined")
assert_equal(len(self.nodes[2].getrawmempool()), 4)
block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[2].getrawmempool()), 0)
segwit_tx_list = self.nodes[2].getblock(block[0])["tx"]
assert_equal(len(segwit_tx_list), 5)
self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False))
assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False))
for i in range(len(segwit_tx_list)):
tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness()))
self.log.info("Verify witness txs without witness data are invalid after the fork")
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][2], False)
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][2], False)
self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][2], False, witness_script(False, self.pubkey[2]))
self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][2], False, witness_script(True, self.pubkey[2]))
self.log.info("Verify default node can now use witness txs")
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435
self.log.info("Verify sigops are counted in GBT with BIP141 rules after the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] >= 3999577) # actual maximum size is lower due to minimum mandatory non-witness data
assert(tmpl['weightlimit'] == 4000000)
assert(tmpl['sigoplimit'] == 80000)
assert(tmpl['transactions'][0]['txid'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 8)
self.nodes[0].generate(1) # Mine a block to clear the gbt cache
self.log.info("Non-segwit miners are able to use GBT response after activation.")
# Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) ->
# tx2 (segwit input, paying to a non-segwit output) ->
# tx3 (non-segwit input, paying to a non-segwit output).
# tx1 is allowed to appear in the block, but no others.
txid1 = send_to_witness(1, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996"))
hex_tx = self.nodes[0].gettransaction(txid)['hex']
tx = FromHex(CTransaction(), hex_tx)
assert(tx.wit.is_null()) # This should not be a segwit input
assert(txid1 in self.nodes[0].getrawmempool())
# Now create tx2, which will spend from txid1.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b''))
tx.vout.append(CTxOut(int(49.99*COIN), CScript([OP_TRUE])))
tx2_hex = self.nodes[0].signrawtransaction(ToHex(tx))['hex']
txid2 = self.nodes[0].sendrawtransaction(tx2_hex)
tx = FromHex(CTransaction(), tx2_hex)
assert(not tx.wit.is_null())
# Now create tx3, which will spend from txid2
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b""))
tx.vout.append(CTxOut(int(49.95*COIN), CScript([OP_TRUE]))) # Huge fee
tx.calc_sha256()
txid3 = self.nodes[0].sendrawtransaction(ToHex(tx))
assert(tx.wit.is_null())
assert(txid3 in self.nodes[0].getrawmempool())
# Now try calling getblocktemplate() without segwit support.
template = self.nodes[0].getblocktemplate()
# Check that tx1 is the only transaction of the 3 in the template.
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid2 not in template_txids and txid3 not in template_txids)
assert(txid1 in template_txids)
# Check that running with segwit support results in all 3 being included.
template = self.nodes[0].getblocktemplate({"rules": ["segwit"]})
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid1 in template_txids)
assert(txid2 in template_txids)
assert(txid3 in template_txids)
# Check that wtxid is properly reported in mempool entry
assert_equal(int(self.nodes[0].getmempoolentry(txid3)["wtxid"], 16), tx.calc_sha256(True))
# Mine a block to clear the gbt cache again.
self.nodes[0].generate(1)
self.log.info("Verify behaviour of importaddress, addwitnessaddress and listunspent")
# Some public keys to be used later
pubkeys = [
"0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb
"02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97
"04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV
"02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd
"036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66
"0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K
"0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ
]
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn")
uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"]
self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR")
compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"]
assert ((self.nodes[0].validateaddress(uncompressed_spendable_address[0])['iscompressed'] == False))
assert ((self.nodes[0].validateaddress(compressed_spendable_address[0])['iscompressed'] == True))
self.nodes[0].importpubkey(pubkeys[0])
compressed_solvable_address = [key_to_p2pkh(pubkeys[0])]
self.nodes[0].importpubkey(pubkeys[1])
compressed_solvable_address.append(key_to_p2pkh(pubkeys[1]))
self.nodes[0].importpubkey(pubkeys[2])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])]
spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress
spendable_after_importaddress = [] # These outputs should be seen after importaddress
solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable
unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]]))
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]]))
unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx"]
# Test multisig_without_privkey
# We have 2 public keys without private keys, use addmultisigaddress to add to wallet.
# Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.
multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])
script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))
for i in compressed_spendable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# bare and p2sh multisig with compressed keys should always be spendable
spendable_anytime.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with compressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
# P2WPKH and P2SH_P2WPKH with compressed keys should always be spendable
spendable_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in uncompressed_spendable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# bare and p2sh multisig with uncompressed keys should always be spendable
spendable_anytime.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# Witness output types with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in compressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
# Multisig without private is not seen after addmultisigaddress, but seen after importaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH, P2PK, P2WPKH and P2SH_P2WPKH with compressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk, p2wpkh, p2sh_p2wpkh])
# P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in uncompressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress
solvable_after_importaddress.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# Witness output types with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
op1 = CScript([OP_1])
op0 = CScript([OP_0])
# 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V
unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)]
unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])
unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])
p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])
p2wshop1 = CScript([OP_0, sha256(op1)])
unsolvable_after_importaddress.append(unsolvablep2pkh)
unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)
unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script
unsolvable_after_importaddress.append(p2wshop1)
unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided
unsolvable_after_importaddress.append(p2shop0)
spendable_txid = []
solvable_txid = []
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1))
self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0)
importlist = []
for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
bare = hex_str_to_bytes(v['hex'])
importlist.append(bytes_to_hex_str(bare))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)])))
else:
pubkey = hex_str_to_bytes(v['pubkey'])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
importlist.append(bytes_to_hex_str(p2pk))
importlist.append(bytes_to_hex_str(p2pkh))
importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)])))
importlist.append(bytes_to_hex_str(unsolvablep2pkh))
importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh))
importlist.append(bytes_to_hex_str(op1))
importlist.append(bytes_to_hex_str(p2wshop1))
for i in importlist:
# import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC
# exceptions and continue.
try_rpc(-4, "The wallet already contains the private key for this address or script", self.nodes[0].importaddress, i, "", False, True)
self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only
self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# note that no witness address should be returned by unsolvable addresses
for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address:
assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
# addwitnessaddress should return a witness addresses even if keys are not in the wallet
self.nodes[0].addwitnessaddress(multisig_without_privkey_address)
for i in compressed_spendable_address + compressed_solvable_address:
witaddress = self.nodes[0].addwitnessaddress(i)
# addwitnessaddress should return the same address if it is a known P2SH-witness address
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# Repeat some tests. This time we don't add witness scripts with importaddress
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH")
uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"]
self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw")
compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"]
self.nodes[0].importpubkey(pubkeys[5])
compressed_solvable_address = [key_to_p2pkh(pubkeys[5])]
self.nodes[0].importpubkey(pubkeys[6])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])]
spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress
solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable
unseen_anytime = [] # These outputs should never be seen
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]]))
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))
premature_witaddress = []
for i in compressed_spendable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress
spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH are always spendable
spendable_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in uncompressed_spendable_address + uncompressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in compressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
# P2WSH multisig without private key are seen after addwitnessaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2SH_P2PK, P2SH_P2PKH with compressed keys are always solvable
solvable_anytime.extend([p2wpkh, p2sh_p2wpkh])
self.mine_and_test_listunspent(spendable_anytime, 2)
self.mine_and_test_listunspent(solvable_anytime, 1)
self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress
# premature_witaddress are not accepted until the script is added with addwitnessaddress first
for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress:
# This will raise an exception
assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
# after importaddress it should pass addwitnessaddress
v = self.nodes[0].validateaddress(compressed_solvable_address[1])
self.nodes[0].importaddress(v['hex'],"",False,True)
for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress:
witaddress = self.nodes[0].addwitnessaddress(i)
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress + spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress + solvable_anytime, 1))
self.mine_and_test_listunspent(unseen_anytime, 0)
# Check that createrawtransaction/decoderawtransaction with non-v0 Bech32 works
v1_addr = program_to_witness(1, [3,5])
v1_tx = self.nodes[0].createrawtransaction([getutxo(spendable_txid[0])],{v1_addr: 1})
v1_decoded = self.nodes[1].decoderawtransaction(v1_tx)
assert_equal(v1_decoded['vout'][0]['scriptPubKey']['addresses'][0], v1_addr)
assert_equal(v1_decoded['vout'][0]['scriptPubKey']['hex'], "51020305")
# Check that spendable outputs are really spendable
self.create_and_mine_tx_from_txids(spendable_txid)
# import all the private keys so solvable addresses become spendable
self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb")
self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97")
self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV")
self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd")
self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66")
self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K")
self.create_and_mine_tx_from_txids(solvable_txid)
# Test that importing native P2WPKH/P2WSH scripts works
for use_p2wsh in [False, True]:
if use_p2wsh:
scriptPubKey = "00203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a"
transaction = "01000000000100e1f505000000002200203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a00000000"
else:
scriptPubKey = "a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d87"
transaction = "01000000000100e1f5050000000017a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d8700000000"
self.nodes[1].importaddress(scriptPubKey, "", False)
rawtxfund = self.nodes[1].fundrawtransaction(transaction)['hex']
rawtxfund = self.nodes[1].signrawtransaction(rawtxfund)["hex"]
txid = self.nodes[1].sendrawtransaction(rawtxfund)
assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid)
assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid)
# Assert it is properly saved
self.stop_node(1)
self.start_node(1)
assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid)
assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid)
def mine_and_test_listunspent(self, script_list, ismine):
utxo = find_unspent(self.nodes[0], 50)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout'])))
for i in script_list:
tx.vout.append(CTxOut(10000000, i))
tx.rehash()
signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
txid = self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
watchcount = 0
spendcount = 0
for i in self.nodes[0].listunspent():
if (i['txid'] == txid):
watchcount += 1
if (i['spendable'] == True):
spendcount += 1
if (ismine == 2):
assert_equal(spendcount, len(script_list))
elif (ismine == 1):
assert_equal(watchcount, len(script_list))
assert_equal(spendcount, 0)
else:
assert_equal(watchcount, 0)
return txid
def p2sh_address_to_script(self,v):
bare = CScript(hex_str_to_bytes(v['hex']))
p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2wsh = CScript([OP_0, sha256(bare)])
p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])
return([bare, p2sh, p2wsh, p2sh_p2wsh])
def p2pkh_address_to_script(self,v):
pubkey = hex_str_to_bytes(v['pubkey'])
p2wpkh = CScript([OP_0, hash160(pubkey)])
p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])
p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])
p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])
p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])
p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])
p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])
return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]
def create_and_mine_tx_from_txids(self, txids, success = True):
tx = CTransaction()
for i in txids:
txtmp = CTransaction()
txraw = self.nodes[0].getrawtransaction(i)
f = BytesIO(hex_str_to_bytes(txraw))
txtmp.deserialize(f)
for j in range(len(txtmp.vout)):
tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j)))
tx.vout.append(CTxOut(0, CScript()))
tx.rehash()
signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
if __name__ == '__main__':
SegWitTest().main()
|
laudaa/bitcoin
|
test/functional/segwit.py
|
Python
|
mit
| 42,732 | 0.007208 |
"""Yelp API setup and random business selection function"""
import io
import json
import random
from yelp.client import Client
from yelp.oauth1_authenticator import Oauth1Authenticator
with io.open('config_yelp_secret.json') as cred:
creds = json.load(cred)
auth = Oauth1Authenticator(**creds)
yelp_client = Client(auth)
group_activity = ['arcades', 'amusementparks', 'lasertag', 'rock_climbing', 'gokarts',
'escapegames', 'mini_golf', 'trampoline', 'zoos', 'bowling', 'galleries']
fitness_activity = ['yoga', 'pilates', 'hiking', 'cyclingclasses']
relax_activity = ['spas', 'hair', 'skincare', 'othersalons', 'massage',
'outlet_stores', 'shoppingcenters', 'massage_therapy',
'acupuncture', 'ayurveda', 'chiropractors', 'venues', 'galleries',
'landmarks', 'gardens', 'museums', 'paintandsip', 'beaches']
night_activity = ['cabaret', 'movietheaters', 'musicvenues', 'opera', 'theater',
'cocktailbars', 'lounges', 'sportsbars', 'wine_bar',
'poolhalls', 'pianobars', 'karaoke', 'jazzandblues',
'danceclubs']
eat_activity = ['wineries', 'farmersmarket', 'cafes', 'bakeries', 'bubbletea', 'coffee',
'restaurants','beer_and_wine', 'icecream', 'gourmet', 'juicebars',
'asianfusion', 'japanese', 'seafood', 'breweries']
def yelp_random_pick(event, city):
"""Generate a top business pick for user."""
if event == 'food':
category_filter = random.choice(eat_activity)
elif event == 'friends':
category_filter = random.choice(group_activity)
elif event == 'relax':
category_filter = random.choice(relax_activity)
elif event == 'nightlife':
category_filter = random.choice(night_activity)
elif event == 'fitness':
category_filter = random.choice(fitness_activity)
params = {
'sort': 2,
'category_filter': category_filter
}
response = yelp_client.search(city, **params)
biz = response.businesses[0]
return biz
|
neonbadger/DestinationUnknown
|
yelp_api.py
|
Python
|
mit
| 2,082 | 0.004803 |
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, UnexpectedTagNameException
class Select:
def __init__(self, webelement):
"""
Constructor. A check is made that the given element is, indeed, a SELECT tag. If it is not,
then an UnexpectedTagNameException is thrown.
:Args:
- webelement - element SELECT element to wrap
Example:
from selenium.webdriver.support.ui import Select \n
Select(driver.find_element_by_tag_name("select")).select_by_index(2)
"""
if webelement.tag_name.lower() != "select":
raise UnexpectedTagNameException(
"Select only works on <select> elements, not on <%s>" %
webelement.tag_name)
self._el = webelement
multi = self._el.get_attribute("multiple")
self.is_multiple = multi and multi != "false"
@property
def options(self):
"""Returns a list of all options belonging to this select tag"""
return self._el.find_elements(By.TAG_NAME, 'option')
@property
def all_selected_options(self):
"""Returns a list of all selected options belonging to this select tag"""
ret = []
for opt in self.options:
if opt.is_selected():
ret.append(opt)
return ret
@property
def first_selected_option(self):
"""The first selected option in this select tag (or the currently selected option in a
normal select)"""
for opt in self.options:
if opt.is_selected():
return opt
raise NoSuchElementException("No options are selected")
def select_by_value(self, value):
"""Select all options that have a value matching the argument. That is, when given "foo" this
would select an option like:
<option value="foo">Bar</option>
:Args:
- value - The value to match against
throws NoSuchElementException If there is no option with specisied value in SELECT
"""
css = "option[value =%s]" % self._escapeString(value)
opts = self._el.find_elements(By.CSS_SELECTOR, css)
matched = False
for opt in opts:
self._setSelected(opt)
if not self.is_multiple:
return
matched = True
if not matched:
raise NoSuchElementException("Cannot locate option with value: %s" % value)
def select_by_index(self, index):
"""Select the option at the given index. This is done by examing the "index" attribute of an
element, and not merely by counting.
:Args:
- index - The option at this index will be selected
throws NoSuchElementException If there is no option with specisied index in SELECT
"""
match = str(index)
for opt in self.options:
if opt.get_attribute("index") == match:
self._setSelected(opt)
return
raise NoSuchElementException("Could not locate element with index %d" % index)
def select_by_visible_text(self, text):
"""Select all options that display text matching the argument. That is, when given "Bar" this
would select an option like:
<option value="foo">Bar</option>
:Args:
- text - The visible text to match against
throws NoSuchElementException If there is no option with specisied text in SELECT
"""
xpath = ".//option[normalize-space(.) = %s]" % self._escapeString(text)
opts = self._el.find_elements(By.XPATH, xpath)
matched = False
for opt in opts:
self._setSelected(opt)
if not self.is_multiple:
return
matched = True
if len(opts) == 0 and " " in text:
subStringWithoutSpace = self._get_longest_token(text)
if subStringWithoutSpace == "":
candidates = self.options
else:
xpath = ".//option[contains(.,%s)]" % self._escapeString(subStringWithoutSpace)
candidates = self._el.find_elements(By.XPATH, xpath)
for candidate in candidates:
if text == candidate.text:
self._setSelected(candidate)
if not self.is_multiple:
return
matched = True
if not matched:
raise NoSuchElementException("Could not locate element with visible text: %s" % text)
def deselect_all(self):
"""Clear all selected entries. This is only valid when the SELECT supports multiple selections.
throws NotImplementedError If the SELECT does not support multiple selections
"""
if not self.is_multiple:
raise NotImplementedError("You may only deselect all options of a multi-select")
for opt in self.options:
self._unsetSelected(opt)
def deselect_by_value(self, value):
"""Deselect all options that have a value matching the argument. That is, when given "foo" this
would deselect an option like:
<option value="foo">Bar</option>
:Args:
- value - The value to match against
throws NoSuchElementException If there is no option with specisied value in SELECT
"""
if not self.is_multiple:
raise NotImplementedError("You may only deselect options of a multi-select")
matched = False
css = "option[value = %s]" % self._escapeString(value)
opts = self._el.find_elements(By.CSS_SELECTOR, css)
for opt in opts:
self._unsetSelected(opt)
matched = True
if not matched:
raise NoSuchElementException("Could not locate element with value: %s" % value)
def deselect_by_index(self, index):
"""Deselect the option at the given index. This is done by examing the "index" attribute of an
element, and not merely by counting.
:Args:
- index - The option at this index will be deselected
throws NoSuchElementException If there is no option with specisied index in SELECT
"""
if not self.is_multiple:
raise NotImplementedError("You may only deselect options of a multi-select")
for opt in self.options:
if opt.get_attribute("index") == str(index):
self._unsetSelected(opt)
return
raise NoSuchElementException("Could not locate element with index %d" % index)
def deselect_by_visible_text(self, text):
"""Deselect all options that display text matching the argument. That is, when given "Bar" this
would deselect an option like:
<option value="foo">Bar</option>
:Args:
- text - The visible text to match against
"""
if not self.is_multiple:
raise NotImplementedError("You may only deselect options of a multi-select")
matched = False
xpath = ".//option[normalize-space(.) = %s]" % self._escapeString(text)
opts = self._el.find_elements(By.XPATH, xpath)
for opt in opts:
self._unsetSelected(opt)
matched = True
if not matched:
raise NoSuchElementException("Could not locate element with visible text: %s" % text)
def _setSelected(self, option):
if not option.is_selected():
option.click()
def _unsetSelected(self, option):
if option.is_selected():
option.click()
def _escapeString(self, value):
if '"' in value and "'" in value:
substrings = value.split("\"")
result = ["concat("]
for substring in substrings:
result.append("\"%s\"" % substring)
result.append(", '\"', ")
result = result[0:-1]
if value.endswith('"'):
result.append(", '\"'")
return "".join(result) + ")"
if '"' in value:
return "'%s'" % value
return "\"%s\"" % value
def _get_longest_token(self, value):
items = value.split(" ")
longest = ""
for item in items:
if len(item) > len(longest):
longest = item
return longest
|
tkingless/webtesting
|
venvs/dev/lib/python2.7/site-packages/selenium/webdriver/support/select.py
|
Python
|
mit
| 9,249 | 0.003027 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..minc import Voliso
def test_Voliso_inputs():
input_map = dict(args=dict(argstr='%s',
),
avgstep=dict(argstr='--avgstep',
),
clobber=dict(argstr='--clobber',
usedefault=True,
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
input_file=dict(argstr='%s',
mandatory=True,
position=-2,
),
maxstep=dict(argstr='--maxstep %s',
),
minstep=dict(argstr='--minstep %s',
),
output_file=dict(argstr='%s',
genfile=True,
hash_files=False,
name_source=['input_file'],
name_template='%s_voliso.mnc',
position=-1,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
verbose=dict(argstr='--verbose',
),
)
inputs = Voliso.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Voliso_outputs():
output_map = dict(output_file=dict(),
)
outputs = Voliso.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
mick-d/nipype
|
nipype/interfaces/minc/tests/test_auto_Voliso.py
|
Python
|
bsd-3-clause
| 1,379 | 0.023205 |
from decimal import Decimal
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.sql import AreaField
from django.contrib.gis.measure import (
Area as AreaMeasure, Distance as DistanceMeasure,
)
from django.core.exceptions import FieldError
from django.db.models import FloatField, IntegerField, TextField
from django.db.models.expressions import Func, Value
from django.utils import six
NUMERIC_TYPES = six.integer_types + (float, Decimal)
class GeoFunc(Func):
function = None
output_field_class = None
geom_param_pos = 0
def __init__(self, *expressions, **extra):
if 'output_field' not in extra and self.output_field_class:
extra['output_field'] = self.output_field_class()
super(GeoFunc, self).__init__(*expressions, **extra)
@property
def name(self):
return self.__class__.__name__
@property
def srid(self):
expr = self.source_expressions[self.geom_param_pos]
if hasattr(expr, 'srid'):
return expr.srid
try:
return expr.field.srid
except (AttributeError, FieldError):
return None
def as_sql(self, compiler, connection):
if self.function is None:
self.function = connection.ops.spatial_function_name(self.name)
return super(GeoFunc, self).as_sql(compiler, connection)
def resolve_expression(self, *args, **kwargs):
res = super(GeoFunc, self).resolve_expression(*args, **kwargs)
base_srid = res.srid
if not base_srid:
raise TypeError("Geometry functions can only operate on geometric content.")
for pos, expr in enumerate(res.source_expressions[1:], start=1):
if isinstance(expr, GeomValue) and expr.srid != base_srid:
# Automatic SRID conversion so objects are comparable
res.source_expressions[pos] = Transform(expr, base_srid).resolve_expression(*args, **kwargs)
return res
def _handle_param(self, value, param_name='', check_types=None):
if not hasattr(value, 'resolve_expression'):
if check_types and not isinstance(value, check_types):
raise TypeError(
"The %s parameter has the wrong type: should be %s." % (
param_name, str(check_types))
)
return value
class GeomValue(Value):
geography = False
@property
def srid(self):
return self.value.srid
def as_sql(self, compiler, connection):
if self.geography:
self.value = connection.ops.Adapter(self.value, geography=self.geography)
else:
self.value = connection.ops.Adapter(self.value)
return super(GeomValue, self).as_sql(compiler, connection)
def as_mysql(self, compiler, connection):
return 'GeomFromText(%%s, %s)' % self.srid, [connection.ops.Adapter(self.value)]
def as_sqlite(self, compiler, connection):
return 'GeomFromText(%%s, %s)' % self.srid, [connection.ops.Adapter(self.value)]
def as_oracle(self, compiler, connection):
return 'SDO_GEOMETRY(%%s, %s)' % self.srid, [connection.ops.Adapter(self.value)]
class GeoFuncWithGeoParam(GeoFunc):
def __init__(self, expression, geom, *expressions, **extra):
if not hasattr(geom, 'srid') or not geom.srid:
raise ValueError("Please provide a geometry attribute with a defined SRID.")
super(GeoFuncWithGeoParam, self).__init__(expression, GeomValue(geom), *expressions, **extra)
class SQLiteDecimalToFloatMixin(object):
"""
By default, Decimal values are converted to str by the SQLite backend, which
is not acceptable by the GIS functions expecting numeric values.
"""
def as_sqlite(self, compiler, connection):
for expr in self.get_source_expressions():
if hasattr(expr, 'value') and isinstance(expr.value, Decimal):
expr.value = float(expr.value)
return super(SQLiteDecimalToFloatMixin, self).as_sql(compiler, connection)
class OracleToleranceMixin(object):
tolerance = 0.05
def as_oracle(self, compiler, connection):
tol = self.extra.get('tolerance', self.tolerance)
self.template = "%%(function)s(%%(expressions)s, %s)" % tol
return super(OracleToleranceMixin, self).as_sql(compiler, connection)
class Area(OracleToleranceMixin, GeoFunc):
output_field_class = AreaField
arity = 1
def as_sql(self, compiler, connection):
if connection.ops.geography:
self.output_field.area_att = 'sq_m'
else:
# Getting the area units of the geographic field.
source_fields = self.get_source_fields()
if len(source_fields):
source_field = source_fields[0]
if source_field.geodetic(connection):
# TODO: Do we want to support raw number areas for geodetic fields?
raise NotImplementedError('Area on geodetic coordinate systems not supported.')
units_name = source_field.units_name(connection)
if units_name:
self.output_field.area_att = AreaMeasure.unit_attname(units_name)
return super(Area, self).as_sql(compiler, connection)
def as_oracle(self, compiler, connection):
self.output_field = AreaField('sq_m') # Oracle returns area in units of meters.
return super(Area, self).as_oracle(compiler, connection)
class AsGeoJSON(GeoFunc):
output_field_class = TextField
def __init__(self, expression, bbox=False, crs=False, precision=8, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', six.integer_types))
options = 0
if crs and bbox:
options = 3
elif bbox:
options = 1
elif crs:
options = 2
if options:
expressions.append(options)
super(AsGeoJSON, self).__init__(*expressions, **extra)
class AsGML(GeoFunc):
geom_param_pos = 1
output_field_class = TextField
def __init__(self, expression, version=2, precision=8, **extra):
expressions = [version, expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', six.integer_types))
super(AsGML, self).__init__(*expressions, **extra)
class AsKML(AsGML):
def as_sqlite(self, compiler, connection):
# No version parameter
self.source_expressions.pop(0)
return super(AsKML, self).as_sql(compiler, connection)
class AsSVG(GeoFunc):
output_field_class = TextField
def __init__(self, expression, relative=False, precision=8, **extra):
relative = relative if hasattr(relative, 'resolve_expression') else int(relative)
expressions = [
expression,
relative,
self._handle_param(precision, 'precision', six.integer_types),
]
super(AsSVG, self).__init__(*expressions, **extra)
class BoundingCircle(GeoFunc):
def __init__(self, expression, num_seg=48, **extra):
super(BoundingCircle, self).__init__(*[expression, num_seg], **extra)
class Centroid(OracleToleranceMixin, GeoFunc):
arity = 1
class Difference(OracleToleranceMixin, GeoFuncWithGeoParam):
arity = 2
class DistanceResultMixin(object):
def source_is_geography(self):
return self.get_source_fields()[0].geography and self.srid == 4326
def convert_value(self, value, expression, connection, context):
if value is None:
return None
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if geo_field.geodetic(connection):
dist_att = 'm'
else:
units = geo_field.units_name(connection)
if units:
dist_att = DistanceMeasure.unit_attname(units)
else:
dist_att = None
if dist_att:
return DistanceMeasure(**{dist_att: value})
return value
class Distance(DistanceResultMixin, OracleToleranceMixin, GeoFuncWithGeoParam):
output_field_class = FloatField
spheroid = None
def __init__(self, expr1, expr2, spheroid=None, **extra):
expressions = [expr1, expr2]
if spheroid is not None:
self.spheroid = spheroid
expressions += (self._handle_param(spheroid, 'spheroid', bool),)
super(Distance, self).__init__(*expressions, **extra)
def as_postgresql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if self.source_is_geography():
# Set parameters as geography if base field is geography
for pos, expr in enumerate(
self.source_expressions[self.geom_param_pos + 1:], start=self.geom_param_pos + 1):
if isinstance(expr, GeomValue):
expr.geography = True
elif geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need special distance functions
if self.spheroid:
self.function = 'ST_Distance_Spheroid' # More accurate, resource intensive
# Replace boolean param by the real spheroid of the base field
self.source_expressions[2] = Value(geo_field._spheroid)
else:
self.function = 'ST_Distance_Sphere'
return super(Distance, self).as_sql(compiler, connection)
def as_oracle(self, compiler, connection):
if self.spheroid:
self.source_expressions.pop(2)
return super(Distance, self).as_oracle(compiler, connection)
class Envelope(GeoFunc):
arity = 1
class ForceRHR(GeoFunc):
arity = 1
class GeoHash(GeoFunc):
output_field_class = TextField
def __init__(self, expression, precision=None, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', six.integer_types))
super(GeoHash, self).__init__(*expressions, **extra)
class Intersection(OracleToleranceMixin, GeoFuncWithGeoParam):
arity = 2
class Length(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
output_field_class = FloatField
def __init__(self, expr1, spheroid=True, **extra):
self.spheroid = spheroid
super(Length, self).__init__(expr1, **extra)
def as_sql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if geo_field.geodetic(connection) and not connection.features.supports_length_geodetic:
raise NotImplementedError("This backend doesn't support Length on geodetic fields")
return super(Length, self).as_sql(compiler, connection)
def as_postgresql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if self.source_is_geography():
self.source_expressions.append(Value(self.spheroid))
elif geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need length_spheroid
self.function = 'ST_Length_Spheroid'
self.source_expressions.append(Value(geo_field._spheroid))
else:
dim = min(f.dim for f in self.get_source_fields() if f)
if dim > 2:
self.function = connection.ops.length3d
return super(Length, self).as_sql(compiler, connection)
def as_sqlite(self, compiler, connection):
geo_field = GeometryField(srid=self.srid)
if geo_field.geodetic(connection):
if self.spheroid:
self.function = 'GeodesicLength'
else:
self.function = 'GreatCircleLength'
return super(Length, self).as_sql(compiler, connection)
class MemSize(GeoFunc):
output_field_class = IntegerField
arity = 1
class NumGeometries(GeoFunc):
output_field_class = IntegerField
arity = 1
class NumPoints(GeoFunc):
output_field_class = IntegerField
arity = 1
def as_sqlite(self, compiler, connection):
if self.source_expressions[self.geom_param_pos].output_field.geom_type != 'LINESTRING':
raise TypeError("Spatialite NumPoints can only operate on LineString content")
return super(NumPoints, self).as_sql(compiler, connection)
class Perimeter(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
output_field_class = FloatField
arity = 1
def as_postgresql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if geo_field.geodetic(connection) and not self.source_is_geography():
raise NotImplementedError("ST_Perimeter cannot use a non-projected non-geography field.")
dim = min(f.dim for f in self.get_source_fields())
if dim > 2:
self.function = connection.ops.perimeter3d
return super(Perimeter, self).as_sql(compiler, connection)
def as_sqlite(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if geo_field.geodetic(connection):
raise NotImplementedError("Perimeter cannot use a non-projected field.")
return super(Perimeter, self).as_sql(compiler, connection)
class PointOnSurface(OracleToleranceMixin, GeoFunc):
arity = 1
class Reverse(GeoFunc):
arity = 1
class Scale(SQLiteDecimalToFloatMixin, GeoFunc):
def __init__(self, expression, x, y, z=0.0, **extra):
expressions = [
expression,
self._handle_param(x, 'x', NUMERIC_TYPES),
self._handle_param(y, 'y', NUMERIC_TYPES),
]
if z != 0.0:
expressions.append(self._handle_param(z, 'z', NUMERIC_TYPES))
super(Scale, self).__init__(*expressions, **extra)
class SnapToGrid(SQLiteDecimalToFloatMixin, GeoFunc):
def __init__(self, expression, *args, **extra):
nargs = len(args)
expressions = [expression]
if nargs in (1, 2):
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args]
)
elif nargs == 4:
# Reverse origin and size param ordering
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[2:]]
)
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[0:2]]
)
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `SnapToGrid`.')
super(SnapToGrid, self).__init__(*expressions, **extra)
class SymDifference(OracleToleranceMixin, GeoFuncWithGeoParam):
arity = 2
class Transform(GeoFunc):
def __init__(self, expression, srid, **extra):
expressions = [
expression,
self._handle_param(srid, 'srid', six.integer_types),
]
super(Transform, self).__init__(*expressions, **extra)
@property
def srid(self):
# Make srid the resulting srid of the transformation
return self.source_expressions[self.geom_param_pos + 1].value
def convert_value(self, value, expression, connection, context):
value = super(Transform, self).convert_value(value, expression, connection, context)
if not connection.ops.postgis and not value.srid:
# Some backends do not set the srid on the returning geometry
value.srid = self.srid
return value
class Translate(Scale):
def as_sqlite(self, compiler, connection):
func_name = connection.ops.spatial_function_name(self.name)
if func_name == 'ST_Translate' and len(self.source_expressions) < 4:
# Always provide the z parameter for ST_Translate (Spatialite >= 3.1)
self.source_expressions.append(Value(0))
elif func_name == 'ShiftCoords' and len(self.source_expressions) > 3:
raise ValueError("This version of Spatialite doesn't support 3D")
return super(Translate, self).as_sqlite(compiler, connection)
class Union(OracleToleranceMixin, GeoFuncWithGeoParam):
arity = 2
|
yephper/django
|
django/contrib/gis/db/models/functions.py
|
Python
|
bsd-3-clause
| 16,825 | 0.002377 |
"""
fields - Lists the variables stored in the file, a default value, and a description
"""
from . import _fileutils as fu
import warnings
# List of file data variables, default values, and documentation
# This is used in both reader and writer
fields = [
(
"tsaisq",
0.0,
"total chi2 from magnetic probes, flux loops, Rogowski and external coils",
),
("rcencm", 100.0, "major radius in cm for vacuum field BCENTR"),
("bcentr", 1.0, "vacuum toroidal magnetic field in Tesla at RCENCM"),
("pasmat", 1e6, "measured plasma toroidal current in Ampere"),
("cpasma", 1e6, "fitted plasma toroidal current in Ampere-turn"),
("rout", 100.0, "major radius of geometric center in cm"),
("zout", 0.0, "Z of geometric center in cm"),
("aout", 50.0, "plasma minor radius in cm"),
("eout", 1.0, "Plasma boundary elongation"),
("doutu", 1.0, "upper triangularity"),
("doutl", 1.0, "lower triangularity"),
("vout", 1000.0, "plasma volume in cm3"),
("rcurrt", 100.0, "major radius in cm of current centroid"),
("zcurrt", 0.0, "Z in cm at current centroid"),
("qsta", 5.0, "equivalent safety factor q*"),
("betat", 1.0, "toroidal beta in %"),
(
"betap",
1.0,
"poloidal beta with normalization average poloidal magnetic BPOLAV defined through Ampere's law",
),
(
"ali",
0.0,
"li with normalization average poloidal magnetic defined through Ampere's law",
),
("oleft", 10.0, "plasma inner gap in cm"),
("oright", 10.0, "plasma outer gap in cm"),
("otop", 10.0, "plasma top gap in cm"),
("obott", 10.0, "plasma bottom gap in cm"),
("qpsib", 5.0, "q at 95% of poloidal flux"),
("vertn", 1.0, "vacuum field (index? -- seems to be float) at current centroid"),
# fmt_1040 = r '^\s*' + 4 * r '([\s\-]\d+\.\d+[Ee][\+\-]\d\d)'
# read(neqdsk, 1040)(rco2v(k, jj), k = 1, mco2v)
(None, None, None), # New line
(
"rco2v",
lambda data: [0.0] * data["mco2v"],
"1D array : path length in cm of vertical CO2 density chord",
),
# read(neqdsk, 1040)(dco2v(jj, k), k = 1, mco2v)
(None, None, None), # New line
(
"dco2v",
lambda data: [0.0] * data["mco2v"],
"line average electron density in cm3 from vertical CO2 chord",
),
# read(neqdsk, 1040)(rco2r(k, jj), k = 1, mco2r)
(None, None, None), # New line
(
"rco2r",
lambda data: [0.0] * data["mco2r"],
"path length in cm of radial CO2 density chord",
),
# read(neqdsk, 1040)(dco2r(jj, k), k = 1, mco2r)
(None, None, None), # New line
(
"dco2r",
lambda data: [0.0] * data["mco2r"],
"line average electron density in cm3 from radial CO2 chord",
),
(None, None, None), # New line
("shearb", 0.0, ""),
(
"bpolav",
1.0,
"average poloidal magnetic field in Tesla defined through Ampere's law",
),
("s1", 0.0, "Shafranov boundary line integrals"),
("s2", 0.0, "Shafranov boundary line integrals"),
("s3", 0.0, "Shafranov boundary line integrals"),
("qout", 0.0, "q at plasma boundary"),
("olefs", 0.0, ""),
("orighs", 0.0, "outer gap of external second separatrix in cm"),
("otops", 0.0, "top gap of external second separatrix in cm"),
("sibdry", 1.0, ""),
("areao", 100.0, "cross sectional area in cm2"),
("wplasm", 0.0, ""),
("terror", 0.0, "equilibrium convergence error"),
("elongm", 0.0, "elongation at magnetic axis"),
("qqmagx", 0.0, "axial safety factor q(0)"),
("cdflux", 0.0, "computed diamagnetic flux in Volt-sec"),
("alpha", 0.0, "Shafranov boundary line integral parameter"),
("rttt", 0.0, "Shafranov boundary line integral parameter"),
("psiref", 1.0, "reference poloidal flux in VS/rad"),
(
"xndnt",
0.0,
"vertical stability parameter, vacuum field index normalized to critical index value",
),
("rseps1", 1.0, "major radius of x point in cm"),
("zseps1", -1.0, ""),
("rseps2", 1.0, "major radius of x point in cm"),
("zseps2", 1.0, ""),
("sepexp", 0.0, "separatrix radial expansion in cm"),
("obots", 0.0, "bottom gap of external second separatrix in cm"),
("btaxp", 1.0, "toroidal magnetic field at magnetic axis in Tesla"),
("btaxv", 1.0, "vacuum toroidal magnetic field at magnetic axis in Tesla"),
("aaq1", 100.0, "minor radius of q=1 surface in cm, 100 if not found"),
("aaq2", 100.0, "minor radius of q=2 surface in cm, 100 if not found"),
("aaq3", 100.0, "minor radius of q=3 surface in cm, 100 if not found"),
(
"seplim",
0.0,
"> 0 for minimum gap in cm in divertor configurations, < 0 absolute value for minimum distance to external separatrix in limiter configurations",
),
("rmagx", 100.0, "major radius in cm at magnetic axis"),
("zmagx", 0.0, ""),
("simagx", 0.0, "Poloidal flux at the magnetic axis"),
("taumhd", 0.0, "energy confinement time in ms"),
("betapd", 0.0, "diamagnetic poloidal b"),
("betatd", 0.0, "diamagnetic toroidal b in %"),
("wplasmd", 0.0, "diamagnetic plasma stored energy in Joule"),
("diamag", 0.0, "measured diamagnetic flux in Volt-sec"),
("vloopt", 0.0, "measured loop voltage in volt"),
("taudia", 0.0, "diamagnetic energy confinement time in ms"),
(
"qmerci",
0.0,
"Mercier stability criterion on axial q(0), q(0) > QMERCI for stability",
),
("tavem", 0.0, "average time in ms for magnetic and MSE data"),
# ishot > 91000
# The next section is dependent on the EFIT version
# New version of EFIT on 05/24/97 writes aeqdsk that includes
# data values for parameters nsilop,magpri,nfcoil and nesum.
(None, True, None), # New line
(
"nsilop",
lambda data: len(data.get("csilop", [])),
"Number of flux loop signals, len(csilop)",
),
(
"magpri",
lambda data: len(data.get("cmpr2", [])),
"Number of flux loop signals, len(cmpr2) (added to nsilop)",
),
(
"nfcoil",
lambda data: len(data.get("ccbrsp", [])),
"Number of calculated external coil currents, len(ccbrsp)",
),
(
"nesum",
lambda data: len(data.get("eccurt", [])),
"Number of measured E-coil currents",
),
(None, None, None), # New line
(
"csilop",
lambda data: [0.0] * data.get("nsilop", 0),
"computed flux loop signals in Weber",
),
("cmpr2", lambda data: [0.0] * data.get("magpri", 0), ""),
(
"ccbrsp",
lambda data: [0.0] * data.get("nfcoil", 0),
"computed external coil currents in Ampere",
),
(
"eccurt",
lambda data: [0.0] * data.get("nesum", 0),
"measured E-coil current in Ampere",
),
("pbinj", 0.0, "neutral beam injection power in Watts"),
("rvsin", 0.0, "major radius of vessel inner hit spot in cm"),
("zvsin", 0.0, "Z of vessel inner hit spot in cm"),
("rvsout", 0.0, "major radius of vessel outer hit spot in cm"),
("zvsout", 0.0, "Z of vessel outer hit spot in cm"),
("vsurfa", 0.0, "plasma surface loop voltage in volt, E EQDSK only"),
("wpdot", 0.0, "time derivative of plasma stored energy in Watt, E EQDSK only"),
("wbdot", 0.0, "time derivative of poloidal magnetic energy in Watt, E EQDSK only"),
("slantu", 0.0, ""),
("slantl", 0.0, ""),
("zuperts", 0.0, ""),
("chipre", 0.0, "total chi2 pressure"),
("cjor95", 0.0, ""),
("pp95", 0.0, "normalized P'(y) at 95% normalized poloidal flux"),
("ssep", 0.0, ""),
("yyy2", 0.0, "Shafranov Y2 current moment"),
("xnnc", 0.0, ""),
("cprof", 0.0, "current profile parametrization parameter"),
("oring", 0.0, "not used"),
(
"cjor0",
0.0,
"normalized flux surface average current density at 99% of normalized poloidal flux",
),
("fexpan", 0.0, "flux expansion at x point"),
("qqmin", 0.0, "minimum safety factor qmin"),
("chigamt", 0.0, "total chi2 MSE"),
("ssi01", 0.0, "magnetic shear at 1% of normalized poloidal flux"),
("fexpvs", 0.0, "flux expansion at outer lower vessel hit spot"),
(
"sepnose",
0.0,
"radial distance in cm between x point and external field line at ZNOSE",
),
("ssi95", 0.0, "magnetic shear at 95% of normalized poloidal flux"),
("rqqmin", 0.0, "normalized radius of qmin , square root of normalized volume"),
("cjor99", 0.0, ""),
(
"cj1ave",
0.0,
"normalized average current density in plasma outer 5% normalized poloidal flux region",
),
("rmidin", 0.0, "inner major radius in m at Z=0.0"),
("rmidout", 0.0, "outer major radius in m at Z=0.0"),
]
def write(data, fh):
"""
data [dict] - keys are given with documentation in the `fields` list.
Also includes
shot [int] - The shot number
time - in ms
"""
# First line identification string
# Default to date > 1997 since that format includes nsilop etc.
fh.write("{0:11s}\n".format(data.get("header", " 26-OCT-98 09/07/98 ")))
# Second line shot number
fh.write(" {:d} 1\n".format(data.get("shot", 0)))
# Third line time
fh.write(" " + fu.f2s(data.get("time", 0.0)) + "\n")
# Fourth line
# time(jj),jflag(jj),lflag,limloc(jj), mco2v,mco2r,qmflag
# jflag = 0 if error (? Seems to contradict example)
# lflag > 0 if error (? Seems to contradict example)
# limloc IN/OUT/TOP/BOT: limiter inside/outside/top/bot SNT/SNB: single null top/bottom DN: double null
# mco2v number of vertical CO2 density chords
# mco2r number of radial CO2 density chords
# qmflag axial q(0) flag, FIX if constrained and CLC for float
fh.write(
"*{:s} {:d} {:d} {:s} {:d} {:d} {:s}\n".format(
fu.f2s(data.get("time", 0.0)).strip(),
data.get("jflag", 1),
data.get("lflag", 0),
data.get("limloc", "DN"),
data.get("mco2v", 0),
data.get("mco2r", 0),
data.get("qmflag", "CLC"),
)
)
# Output data in lines of 4 values each
with fu.ChunkOutput(fh, chunksize=4) as output:
for key, default, description in fields:
if callable(default):
# Replace the default function with the value, which may depend on previously read data
default = default(data)
if key is None:
output.newline() # Ensure on a new line
else:
output.write(data.get(key, default))
def read(fh):
"""
Read an AEQDSK file, returning a dictionary of data
"""
# First line label. Date.
header = fh.readline()
# Second line shot number
shot = int(fh.readline().split()[0])
# Third line time [ms]
time = float(fh.readline())
# Fourth line has (up to?) 9 entries
# time(jj),jflag(jj),lflag,limloc(jj), mco2v,mco2r,qmflag
words = fh.readline().split()
# Dictionary to hold result
data = {
"header": header,
"shot": shot,
"time": time,
"jflag": int(words[1]),
"lflag": int(words[2]),
"limloc": words[3], # e.g. "SNB"
"mco2v": int(words[4]),
"mco2r": int(words[5]),
"qmflag": words[6],
} # e.g. "CLC"
# Read each value from the file, and put into variables
values = fu.next_value(fh)
for key, default, doc in fields:
if key is None:
continue # skip
if callable(default):
default = default(data)
if isinstance(default, list):
# Read a list the same length as the default
data[key] = [next(values) for elt in default]
else:
value = next(values)
if isinstance(default, int) and not isinstance(value, int):
# Expecting an integer, but didn't get one
warnings.warn("Expecting an integer for '" + key + "' in aeqdsk file")
break
data[key] = value
return data
|
bendudson/freegs
|
freegs/_aeqdsk.py
|
Python
|
lgpl-3.0
| 12,250 | 0.001551 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from appengine_module.test_results.handlers import redirector
class RedirectorTest(unittest.TestCase):
def test_url_from_commit_positions(self):
def mock_load_url(url):
if url == 'https://cr-rev.appspot.com/_ah/api/crrev/v1/redirect/1':
git_sha = 'aaaaaaa'
else:
git_sha = 'bbbbbbb'
return '''{
"git_sha": "%s",
"repo": "chromium/src",
"redirect_url": "https://chromium.googlesource.com/chromium/src/+/%s",
"project": "chromium",
"redirect_type": "GIT_FROM_NUMBER",
"repo_url": "https://chromium.googlesource.com/chromium/src/",
"kind": "crrev#redirectItem",
"etag": "\\\"vOastG91kaV9uxC3-P-4NolRM6s/U8-bHfeejPZOn0ELRGhed-nrIX4\\\""
}''' % (git_sha, git_sha)
old_load_url = redirector.load_url
try:
redirector.load_url = mock_load_url
expected = ('https://chromium.googlesource.com/chromium/src/+log/'
'aaaaaaa^..bbbbbbb?pretty=fuller')
self.assertEqual(redirector.url_from_commit_positions(1, 2), expected)
finally:
redirector.load_url = old_load_url
|
nicko96/Chrome-Infra
|
appengine/test_results/appengine_module/test_results/handlers/test/redirector_test.py
|
Python
|
bsd-3-clause
| 1,228 | 0.008143 |
#!/usr/bin/env python
# -*- coding: utf-8-*-
import getopt
import time
import re
import os,sys
reload(sys)
sys.setdefaultencoding('utf-8')
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from bs4 import BeautifulSoup
import requests
import webbrowser
import subprocess
class Outline():
def getToc(self, pdfPath):
infile = open(pdfPath, 'rb')
parser = PDFParser(infile)
document = PDFDocument(parser)
toc = list()
for (level,title,dest,a,structelem) in document.get_outlines():
toc.append((level, title))
return toc
def toOutline(self, source):
if source.endswith('.pdf') and source.startswith('http') == False:
items = ''
for item in self.getToc(source):
items += item[1] + '\n'
return items
elif source.startswith('http'):
#url = 'https://gsnedders.html5.org/outliner/process.py?url=' + source
#webbrowser.open(url)
r = requests.get('https://gsnedders.html5.org/outliner/process.py?url=' + source)
return r.text
#soup = BeautifulSoup(r.text)
#for li in soup.find_all('li'):
# print li.text.strip()
'''
r = requests.get(source)
#script = "var data = new Array();"
#for line in r.text:
# script += "data.push('" + line + "')"
script = ''
script += "var HTML5Outline = require('h5o');"
script += "var outline = HTML5Outline('<html></html>');"
output = subprocess.check_output('node -p "' + script + '"' , shell=True)
return output
'''
return ''
def main(argv):
source = ''
try:
opts, args = getopt.getopt(sys.argv[1:],'i:', ['input'])
except getopt.GetoptError, err:
print str(err)
sys.exit(2)
for o, a in opts:
if o in ('-i', '--input'):
source = a
outline = Outline()
print outline.toOutline(source)
if __name__ == '__main__':
main(sys.argv)
|
roscopecoltran/scraper
|
.staging/meta-engines/xlinkBook/outline.py
|
Python
|
mit
| 2,141 | 0.012611 |
import numpy as np
import matplotlib.pyplot as plt
def bernstein(t, n, i):
cn = 1.0
ci = 1.0
cni = 1.0
for k in range(2, n, 1):
cn = cn * k
for k in range(1, i, 1):
if i == 1:
break
ci = ci * k
for k in range(1, n - i + 1, 1):
if n == i:
break
cni = cni * k
j = t**(i - 1) * (1 - t)**(n - i) * cn / (ci * cni)
return j
def bezierplot(t, cp):
n = len(cp)
r = np.zeros([len(t), 2])
for k in range(len(t)):
sum1 = 0.0
sum2 = 0.0
for i in range(1, n + 1, 1):
bt = bernstein(t[k], n, i)
sum1 += cp[i - 1, 0] * bt
sum2 += cp[i - 1, 1] * bt
r[k, :] = [sum1, sum2]
return np.array(r)
cp = np.array([[0, -2], [1, -3], [2, -2], [3, 2], [4, 2], [5, 0]])
t = np.arange(0, 1 + 0.01, 0.01)
p = bezierplot(t, cp)
plt.figure()
plt.plot(p[:, 0], p[:, 1])
plt.plot(cp[:, 0], cp[:, 1], ls=':', marker='o')
plt.show()
|
o-kei/design-computing-aij
|
ch5/curve.py
|
Python
|
mit
| 983 | 0.001017 |
from pygraz_website import filters
class TestFilters(object):
def test_url_detection(self):
"""
Test that urls are found correctly.
"""
no_urls_string = '''This is a test without any urls in it.'''
urls_string = '''This string has one link in it: http://pygraz.org . But it also has some text after it :D'''
assert filters.urlize(no_urls_string) == no_urls_string
assert filters.urlize(urls_string) == '''This string has one link in it: <a href="http://pygraz.org">http://pygraz.org</a> . But it also has some text after it :D'''
assert filters.urlize(urls_string, True).matches == {'urls': set(['http://pygraz.org'])}
assert filters.urlize(None) == u''
assert filters.urlize("'http://test.com'") == """'<a href="http://test.com">http://test.com</a>'"""
def test_namehandles(self):
"""
Tests the discory of linkable names.
"""
string_with_handles = 'Hallo @pygraz.'
assert filters.urlize(string_with_handles) == 'Hallo <a href="http://twitter.com/pygraz">@pygraz</a>.'
assert filters.urlize(string_with_handles, True).matches == {'handles': set(['pygraz'])}
def test_hashtags(self):
string_with_tags = 'This is a #test for #hashtags'
assert filters.urlize(string_with_tags) == 'This is a <a href="http://twitter.com/search?q=%23test">#test</a> for <a href="http://twitter.com/search?q=%23hashtags">#hashtags</a>'
assert filters.urlize(string_with_tags, True).matches == {'hashtags': set(['test', 'hashtags'])}
|
pygraz/old-flask-website
|
pygraz_website/tests/test_filters.py
|
Python
|
bsd-3-clause
| 1,577 | 0.005707 |
#!/usr/bin/env python3
import configparser, subprocess, platform
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GLib, Gdk
class SleepTimer(Gtk.Builder):
def __init__(self):
super().__init__()
self.add_from_file("main.glade")
self.connect_signals(self)
self.spin_buttons = (
self.get_object("spinbutton_h"),
self.get_object("spinbutton_min"),
self.get_object("spinbutton_s"),
)
self.css_provider = Gtk.CssProvider()
self.get_object("togglebutton1").get_style_context().add_provider(
self.css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
self.start_seconds_left = 0
self.config = configparser.ConfigParser()
self.config.read('settings.ini')
if 'default' in self.config.sections():
try:
self.spin_buttons[2].set_value(int(self.config['default']['seconds']))
self.get_object(self.config['default']['mode']).set_active(True)
if self.config['default']['mute'] == 'True':
self.get_object('checkbutton1').set_active(True)
except ValueError as err:
print(err)
except KeyError as err:
print('KeyError: {}'.format(err))
else:
self.config['default'] = {}
self.spin_buttons[0].set_value(1)
self.window = self.get_object("window1")
self.window.show_all()
def on_timer(self):
"""
Deincreases by one second
"""
if not self.get_object("togglebutton1").get_active():
return False
seconds = self.spin_buttons[2].get_value_as_int()
if seconds == 0:
seconds = 60
minutes = self.spin_buttons[1].get_value_as_int()
if minutes == 0:
minutes = 60
hours = self.spin_buttons[0].get_value_as_int()
if hours == 0:
try:
if self.get_object("checkbutton1").get_active():
if platform.system() == "Windows":
subprocess.check_output("nircmd.exe mutesysvolume 1")
else:
subprocess.check_output("pactl set-sink-mute 0 1", shell=True)
verb = "hibernate"
if platform.system() == "Windows":
if self.get_object("standby").get_active():
verb = "standby"
elif self.get_object("shutdown").get_active():
verb = "exitwin poweroff"
subprocess.check_output("nircmd.exe " + verb)
else:
if self.get_object("standby").get_active():
verb = "suspend"
elif self.get_object("shutdown").get_active():
verb = "poweroff"
subprocess.check_output("systemctl " + verb + " -i", shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
dialog = Gtk.MessageDialog(
parent=self.window, message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.CLOSE,
text="`{}` failed with exit code {}".format(err.cmd, err.returncode))
dialog.format_secondary_text(err.stdout.decode('utf-8', 'ignore').strip())
dialog.run()
dialog.destroy()
Gtk.main_quit()
return False
self.spin_buttons[0].set_value(hours - 1)
self.spin_buttons[1].set_value(minutes - 1)
self.spin_buttons[2].set_value(seconds - 1)
self.css_provider.load_from_data(".install-progress {{ background-size: {}%; }}".format(
int(self.get_seconds_left() * 100 / self.start_seconds_left)
).encode())
return True
def on_toggled(self, button):
"""
Start button toggled
"""
self.spin_buttons[2].set_sensitive(not button.get_active()) # seconds
context = button.get_style_context()
if button.get_active():
context.add_class("install-progress")
context.remove_class("suggested-action")
self.css_provider.load_from_data(b".install-progress { background-size: 100%; }")
self.start_seconds_left = self.get_seconds_left()
with open('settings.ini', 'w') as file:
self.config['default']['seconds'] = str(int(self.start_seconds_left))
self.config['default']['mode'] = 'standby'
if self.get_object('hibernate').get_active():
self.config['default']['mode'] = 'hibernate'
elif self.get_object('shutdown').get_active():
self.config['default']['mode'] = 'shutdown'
self.config['default']['mute'] = str(self.get_object("checkbutton1").get_active())
self.config.write(file)
self.previous_label = button.get_label()
button.set_label("_Stop")
else:
context.remove_class("install-progress")
context.add_class("suggested-action")
button.set_label(self.previous_label)
if button.get_active():
GLib.timeout_add(1000, self.on_timer)
def on_time_changed(self):
self.get_object("togglebutton1").set_sensitive(
self.spin_buttons[0].get_value() != 0 or
self.spin_buttons[1].get_value() != 0 or
self.spin_buttons[2].get_value() != 0
)
# If the user increases the time while it's running this could result in a negative
# percentage for the progress bar. Adjust the start time so that it never happens:
self.start_seconds_left = max(self.start_seconds_left, self.get_seconds_left())
def on_h_changed(self, spin_button):
self.on_time_changed()
def on_min_changed(self, spin_button):
"""
When minutes drop below 0 deincrease hours and when they get above 59 increase hours
"""
while spin_button.get_value() < 0:
if self.spin_buttons[0].get_value() == 0:
spin_button.set_value(0)
else:
spin_button.set_value(spin_button.get_value() + 60)
self.spin_buttons[0].set_value(self.spin_buttons[0].get_value() - 1)
while spin_button.get_value() > 59:
spin_button.set_value(spin_button.get_value() - 60)
self.spin_buttons[0].set_value(self.spin_buttons[0].get_value() + 1)
self.on_time_changed()
def on_s_changed(self, spin_button):
"""
When seconds drop below 0 deincrease minutes and when they get above 59 increase minutes
"""
while spin_button.get_value() < 0:
if self.spin_buttons[0].get_value() == 0 and self.spin_buttons[1].get_value() == 0:
spin_button.set_value(0)
else:
spin_button.set_value(spin_button.get_value() + 60)
self.spin_buttons[1].set_value(self.spin_buttons[1].get_value() - 1)
while spin_button.get_value() > 59:
spin_button.set_value(spin_button.get_value() - 60)
self.spin_buttons[1].set_value(self.spin_buttons[1].get_value() + 1)
self.on_time_changed()
def on_delete_window(self, *args):
Gtk.main_quit(*args)
def get_seconds_left(self):
return self.spin_buttons[0].get_value() * 3600 + self.spin_buttons[1].get_value() * 60 + \
self.spin_buttons[2].get_value()
style_provider = Gtk.CssProvider()
style_provider.load_from_data(b""".install-progress {
background-image: linear-gradient(to top, @theme_selected_bg_color 2px, alpha(@theme_selected_bg_color, 0) 2px);
background-repeat: no-repeat;
background-position: 0 bottom;
transition: none;
}
.install-progress { background-position: 100% bottom; }
""")
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(), style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
SleepTimer()
Gtk.main()
|
jhasse/sleeptimer
|
main.py
|
Python
|
gpl-3.0
| 8,446 | 0.004026 |
#!/usr/bin/env python
"""
Send message to '#gis.lab' IRC chat room.
Requires to run script 'utils/join-gislab-network.py' first to get connection
with server.
USAGE: send-message.py <message>
"""
import os, sys
import re
import socket
try:
message = sys.argv[1]
except IndexError:
print __doc__
sys.exit(0)
DIR=os.path.dirname(os.path.abspath(__file__))
def get_config(variable):
c = open(os.path.join(os.path.dirname(DIR), "config.cfg"), "ro")
for line in c:
if re.match("^" + variable, line):
value = line.split("=")[1].replace("'", "").replace('"', '')
c.close()
break
c = open(os.path.join(os.path.dirname(DIR), "config-user.cfg"), "ro")
for line in c:
if re.match("^" + variable, line):
value = line.split("=")[1].replace("'", "").replace('"', '')
c.close()
break
return value.strip()
GISLAB_NETWORK = get_config("GISLAB_NETWORK")
HOST="{0}.5".format(GISLAB_NETWORK)
PORT=6667
NICK=IDENT=os.environ['USER']
REALNAME="script"
CHANNEL="gis.lab"
s=socket.socket( socket.AF_INET, socket.SOCK_STREAM )
s.connect((HOST, PORT))
print s.recv(4096)
s.send("NICK %s\r\n" % NICK)
s.send("USER %s %s bla :%s\r\n" % (IDENT, HOST, REALNAME))
s.send("JOIN #%s\r\n" % CHANNEL)
s.send("PRIVMSG #gislab :%s\r\n" % message)
s.send("QUIT: End of message.\r\n")
s.recv(4096)
s.close()
print "Done."
# vim: ts=8 sts=4 sw=4 et:
|
imincik/gis-lab
|
utils/send-message.py
|
Python
|
gpl-3.0
| 1,440 | 0.008333 |
#!/usr/bin/python3
import logging
from operator import itemgetter
from timeit import default_timer as timer
import rdflib
from .abstract_instruction_set import AbstractInstructionSet
from readers import rdf
from writers import rule_set, pickler
from samplers import by_definition as sampler
from algorithms.semantic_rule_learning import generate_semantic_association_rules,\
generate_semantic_item_sets,\
generate_common_behaviour_sets,\
support_of,\
confidence_of
class PakbonLD(AbstractInstructionSet):
def __init__(self, time=""):
self.time = time
self.logger = logging.getLogger(__name__)
def print_header(self):
header = "PAKBON: Context ('Sporen') with 12 attributes"
print(header)
print('-' * len(header))
def load_dataset(self, abox, tbox):
"""
# pakbonLD SPARQL endpoint
endpoint = "http://pakbon-ld.spider.d2s.labs.vu.nl/sparql/"
# query
query_string = "" "
prefix pbont: <http://pakbon-ld.spider.d2s.labs.vu.nl/ont/>
prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
SELECT DISTINCT ?s ?p ?o
WHERE {
?s a pbont:SIKB0102S_Vondstcontext;
?p ?o.
FILTER (?p != rdf:type)
} LIMIT 1000"" "
# perform query and return a KnowledgeGraph instance
kg_i = rdf.query(query_string, endpoint)
"""
# read graphs
kg_i = rdf.read(local_path=abox)
kg_s = rdf.read(local_path=tbox)
# sample by pattern
pattern = (None,
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_grondspoortype"),
None)
# define context
# spoor with vulling
context = [rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_grondspoortype"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P53i_is_former_or_current_location_of"),
(rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P89_falls_within"),
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_contexttype")),
(rdflib.URIRef("http://purl.org/crmeh#EHP3i"),
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_kleur")),
(rdflib.URIRef("http://purl.org/crmeh#EHP3i"),
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_textuur")),
(rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P53i_is_former_or_current_location_of"),
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_structuurtype")),
(rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_diepte"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P40_observed_dimension"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P90_has_value")),
(rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_diepte"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P40_observed_dimension"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P91_has_unit")),
(rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P140i_was_attributed_by"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P141_assigned"),
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_beginperiode")),
(rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P140i_was_attributed_by"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P141_assigned"),
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_eindperiode")),
(rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P53i_is_former_or_current_location_of"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P140i_was_attributed_by"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P141_assigned"),
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_beginperiode")),
(rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P53i_is_former_or_current_location_of"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P140i_was_attributed_by"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P141_assigned"),
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_eindperiode"))]
kg_i_sampled = kg_i.sample(sampler, patterns=[pattern], context=context)
return (kg_i_sampled, kg_s)
def run_program(self, dataset, hyperparameters):
self.logger.info("Starting run\nParameters:\n{}".format(
"\n".join(["\t{}: {}".format(k,v) for k,v in hyperparameters.items()])))
kg_i, kg_s = dataset
# fit model
t0 = timer()
# generate semantic item sets from sampled graph
si_sets = generate_semantic_item_sets(kg_i)
# generate common behaviour sets
cbs_sets = generate_common_behaviour_sets(si_sets,
hyperparameters["similarity_threshold"],
hyperparameters["max_cbs_size"])
# generate semantic association rules
rules = generate_semantic_association_rules(kg_i,
kg_s,
cbs_sets,
hyperparameters["minimal_local_support"])
# calculate support and confidence, skip those not meeting minimum requirements
final_rule_set = []
for rule in rules:
support = support_of(kg_i, rule)
confidence = confidence_of(kg_i, rule)
if support >= hyperparameters["minimal_support"] and\
confidence >= hyperparameters["minimal_confidence"]:
final_rule_set.append((rule, support, confidence))
# sorting rules on both support and confidence
final_rule_set.sort(key=itemgetter(2, 1), reverse=True)
# time took
t1 = timer()
dt = t1 - t0
print(" Program completed in {:.3f} ms".format(dt))
print(" Found {} rules".format(len(final_rule_set)))
return final_rule_set
def write_to_file(self, path="./of/latest", output=[]):
overwrite = False
print(" Writing output to {}...".format(path))
rule_set.pretty_write(output, path, overwrite)
pickler.write(output, path+".pickle", overwrite)
def run(self, abox, tbox, output_path):
self.print_header()
print(" {}\n".format(self.time))
hyperparameters = {}
hyperparameters["similarity_threshold"] = .8
hyperparameters["max_cbs_size"] = 4
hyperparameters["minimal_local_support"] = 0.0
hyperparameters["minimal_support"] = 0.0
hyperparameters["minimal_confidence"] = 0.0
print(" Importing Data Sets...")
dataset = self.load_dataset(abox, tbox)
print(" Initiated Pattern Learning...")
output = self.run_program(dataset, hyperparameters)
if len(output) > 0:
self.write_to_file(output_path, output)
|
wxwilcke/MINOS
|
directives/pakbonLD_B3.py
|
Python
|
gpl-3.0
| 7,661 | 0.00496 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# #Copyright (C) 2015, Delft University of Technology, Faculty of Electrical Engineering, Mathematics and Computer Science, Network Architectures and Services and TNO, ICT - Service Enabling and Management, Mani Prashanth Varma Manthena, Niels van Adrichem, Casper van den Broek and F. A. Kuipers
#
# This file is part of NaaSPlatform.
#
# NaaSPlatform is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NaaSPlatform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NaaSPlatform. If not, see <http://www.gnu.org/licenses/>.
# Network-as-a-Service (NaaS) platform's load balancing application
# Importing Python modules
import sys # Python module for system (i.e. interpreter) specific parameters and functions
import select # Python module for I/O completion waiting
import time # Python module to perform various time related functions
# Importing NaaS platform's main application for performing NaaS related operations and functions
from Main_App import *
# Importing NaaS platform's sFlow based edge flow monitoring application for monitoring and detecting large traffic flows at the network edge
from sFlow_Edge_Flow_Monitoring_App import *
# Importing NaaS platform's sFlow based Core interface monitoring application for monitoring and detecting high bandwidth interface utilizations and failures in the network core of Testbed network 1 (i.e. network (i.e. edge + core) with sFlow enabled open (i.e. OF/OVS) switches)
from sFlow_Core_Interface_Monitoring_App import *
# Importing NaaS platform's SNMP based Core interface monitoring application for monitoring and detecting high bandwidth interface utilizations and failures in the network core of Testbed network 2 (i.e. network core with legacy (i.e. vendor-specific) switches)
from SNMP_Core_Interface_Monitoring_App import *
# Importing NaaS platform's optimal path computation application for optimal path computations and selections
from Optimal_Path_Computation_App import *
class load_balance_testbed_network_1():
# Starting a load balancing application for a network (i.e. edge + core) of open (i.e. OF/OVS) switches
def __init__(self):
try:
odl_base = naas_arch().odl_base_url()
url_odl = odl_base['URL']
ip_odl = odl_base['Host IP']
odl_header = naas_arch().odl_api_header()
cred = naas_arch().odl_user_cred()
name = cred['User Name']
password = cred['Password']
edge_sflow_base = naas_arch().edge_sflow_base_url()
url_edge_sflow = edge_sflow_base['URL']
ip_edge_sflow = edge_sflow_base['Host IP']
core_sflow_base = naas_arch().core_sflow_base_url()
url_core_sflow = core_sflow_base['URL']
ip_core_sflow = core_sflow_base['Host IP']
sflow_header = naas_arch().sflow_api_header()
odl_switches = naas_arch().odl_switches()
odl_switches_ip = naas_arch().odl_switches_ip()
edge_sflow_agents = naas_arch().edge_sflow_agents()
core_sflow_agents = naas_arch().core_sflow_agents()
testbed_1_topo = naas_arch().testbed_1_topology()
testbed_1_lsps = naas_arch().testbed_1_path_bindings()
sflow_if_map = naas_arch().sflow_interface_mapping()
flow = odl_api_json_formats()
stat = odl_api_flow_stat()
odl = odl_api_calls()
sflow = sflow_api_calls()
print '\n\n\n'
edge_flow = network_edge_flow_monitoring()
print '\n\n\n'
core_mon = sflow_network_core_interface_monitoring()
while True:
print '\n\n\n\nEnter the above configured sFlow based edge flow monitoring application name...\n\n'
flow_name = raw_input('sFlow based Edge Flow Monitoring Application Name (Required): ')
url_sflow = url_edge_sflow
flow_def = sflow.sflow_flow_def(url_sflow, flow_name)
if flow_def == {}:
print '\n\nThere is no such sFlow based edge flow monitoring application that is currently running in the NaaS platform...\n\n'
print '\n\nRe-configure and Re-enter the sFlow based edge flow monitoring application name...\n\n'
else:
break
flow_keys = flow_def['keys']
keys = re.sub(r'\s', '', flow_keys).split(',')
source_key = keys[0]
destination_key = keys[1]
print '\n\n\n\nEnter the priority value for this load balancing application and its corresponding actions...\n\n'
priority_load_balance = raw_input('Load Balancing Priority Value (Default Value = 100): ')
if priority_load_balance == '':
priority_load_balance = '100'
print '\n\n\n\nEnter the load balancing query timeout/interval (i.e. in seconds)...\n\n'
timeout = raw_input('Load Balancing Query Timeout/Interval (Default Value: 20): ')
if timeout == '':
timeout = 10
timeout = int(timeout)
print '\n\nStarting the load balancing application...\n\n'
while True:
print '\n\nQuerying for network core interface monitoring triggered events...\n\n'
print '\n\nChecking for Interface/Link Failures in the Network Core..\n\n'
delete_links = {}
update_link_weights = {}
int_failures = {}
high_utis = {}
int_failures = core_mon.int_fail_events()
if int_failures != {}:
for key in int_failures:
agent_node = int_failures[key]['Agent']
agent_interface_id = int_failures[key]['Interface ID']
agent_interface = sflow_if_map[agent_interface_id]
if delete_links != {}:
m = 0
for key in delete_links:
if key == agent_node:
m += 1
if m != 0:
old_link_list = delete_links[agent_node]
old_link_list.append(agent_interface)
delete_links[agent_node] = old_link_list
else:
new_link_list = []
new_link_list.append(agent_interface)
delete_links[agent_node] = new_link_list
else:
new_link_list = []
new_link_list.append(agent_interface)
delete_links[agent_node] = new_link_list
paths = optimal_testbed_network_1().optimal_path(delete_links, update_link_weights)
if paths != {}:
all_paths_right = paths['All Paths Right']
all_paths_left = paths['All Paths Left']
shortest_path_right = paths['Shortest Path Right']
shortest_path_left = paths['Shortest Path Left']
no_path_labels = []
shortest_path_right_label = ''
shortest_path_left_label = ''
for key in testbed_1_lsps:
if testbed_1_lsps[key] == shortest_path_right:
shortest_path_right_label = key
if testbed_1_lsps[key] == shortest_path_left:
shortest_path_left_label = key
for key in testbed_1_lsps:
m = 0
for apr in all_paths_right:
if testbed_1_lsps[key] == apr:
m += 1
for apl in all_paths_left:
if testbed_1_lsps[key] == apl:
m += 1
if m == 0:
no_path_labels.append(key)
with open("Statistics_Logs/Testbed_1_Basic_Connectivity_Flow_Stats.json") as json_file:
basic_connectivity_flow_stats = json.load(json_file)
installed_path_labels = []
deleted_path_labels = []
for key in basic_connectivity_flow_stats:
installed_path_labels.append(basic_connectivity_flow_stats[key]['MPLS Label'])
deleted_path_labels = set(installed_path_labels).intersection(no_path_labels)
deleted_flows = {}
for dpl in deleted_path_labels:
for key in basic_connectivity_flow_stats:
if basic_connectivity_flow_stats[key]['MPLS Label'] == dpl:
deleted_flows[key] = basic_connectivity_flow_stats[key]
for key in deleted_flows:
flow_id = key
mpls_push_stats = {}
mpls_push_flow_stats = {}
mpls_push_flow = flow.odl_mpls_push_json()
mpls_push_stats = stat.odl_mpls_push_stat()
mpls_push_flow_stats = mpls_push_stats['stat']
mpls_push_flow_counter = mpls_push_stats['counter']
switch_id = deleted_flows[key]['Switch ID']
dst_add = deleted_flows[key]['IP Destination']
src_add = ''
in_port = ''
dl_dst = ''
dl_src = ''
protocol = ''
tcp_src_port = ''
tcp_dst_port = ''
udp_src_port = ''
udp_dst_port = ''
vlan_id = ''
vlan_priority = ''
table_id = '0'
priority = '10'
for key in odl_switches_ip:
if odl_switches_ip[key] == switch_id:
switch_ip = key
if switch_ip == shortest_path_right[0]:
label = shortest_path_right_label
if switch_ip == shortest_path_left[0]:
label = shortest_path_left_label
action_mpls_label = label
con_switch = testbed_1_lsps[label][1]
for key in testbed_1_topo[switch_ip]:
if testbed_1_topo[switch_ip][key] == con_switch:
con_port = key
for key in sflow_if_map:
if sflow_if_map[key] == con_port:
port = key
action_out_port = port
flow_stat = {}
flow_stat = odl.odl_mpls_push_flow_inst(url_odl, name, password, odl_header, mpls_push_flow, mpls_push_flow_counter, switch_id, dst_add, src_add, in_port, dl_dst, dl_src, protocol, tcp_src_port, tcp_dst_port, udp_src_port, udp_dst_port, vlan_id, vlan_priority, action_mpls_label, action_out_port, table_id, priority)
if flow_stat:
flow_name = flow_stat['Flow ID']
mpls_push_flow_stats[flow_name] = flow_stat
basic_connectivity_flow_stats[flow_name] = {'Switch ID': switch_id, 'IP Destination' : dst_add, 'MPLS Label' : label}
with open("Statistics_Logs/MPLS_Push_Flow_Stats.json", "w") as json_file:
json.dump(mpls_push_flow_stats, json_file)
with open("Statistics_Logs/Testbed_1_Basic_Connectivity_Flow_Stats.json", "w") as json_file:
json.dump(basic_connectivity_flow_stats, json_file)
if mpls_push_flow_stats:
del(mpls_push_flow_stats[flow_id])
del(basic_connectivity_flow_stats[flow_id])
with open("Statistics_Logs/MPLS_Push_Flow_Stats.json", "w") as json_file:
json.dump(mpls_push_flow_stats, json_file)
with open("Statistics_Logs/Testbed_1_Basic_Connectivity_Flow_Stats.json", "w") as json_file:
json.dump(basic_connectivity_flow_stats, json_file)
with open("Statistics_Logs/Testbed_1_Load_Balancing_Flow_Stats.json") as json_file:
load_balancing_flow_stats = json.load(json_file)
installed_path_labels = []
deleted_path_labels = []
for key in load_balancing_flow_stats:
installed_path_labels.append(load_balancing_flow_stats[key]['MPLS Label'])
deleted_path_labels = set(installed_path_labels).intersection(no_path_labels)
deleted_flows = {}
for dpl in deleted_path_labels:
for key in load_balancing_flow_stats:
if load_balancing_flow_stats[key]['MPLS Label'] == dpl:
deleted_flows[key] = load_balancing_flow_stats[key]
for key in deleted_flows:
flow_id = key
mpls_push_stats = {}
mpls_push_flow_stats = {}
mpls_push_flow = flow.odl_mpls_push_json()
mpls_push_stats = stat.odl_mpls_push_stat()
mpls_push_flow_stats = mpls_push_stats['stat']
mpls_push_flow_counter = mpls_push_stats['counter']
switch_id = deleted_flows[key]['Switch ID']
add_src = deleted_flows[key]['Source Add']
add_dst = deleted_flows[key]['Destination Add']
priority = deleted_flows[key]['Priority']
with open("sFlow_ODL_Flowkeys_Bindings/Load_Balancing_Flowkeys_Bindings.json") as json_file:
load_balancing_flow_keys = json.load(json_file)
for key in load_balancing_flow_keys:
if key == source_key:
src_match_rule = load_balancing_flow_keys[key]
for key in load_balancing_flow_keys:
if key == destination_key:
dst_match_rule = load_balancing_flow_keys[key]
dst_add = ''
src_add = ''
in_port = ''
dl_dst = ''
dl_src = ''
protocol = ''
tcp_src_port = ''
tcp_dst_port = ''
udp_src_port = ''
udp_dst_port = ''
vlan_id = ''
vlan_priority = ''
if src_match_rule == 'src_add':
src_add = add_src
if src_match_rule == 'dl_src':
dl_src = add_src
if src_match_rule == 'tcp_src_port':
tcp_src_port = add_src
if src_match_rule == 'udp_src_port':
udp_src_port = add_src
if dst_match_rule == 'dst_add':
dst_add = add_dst
if dst_match_rule == 'dl_dst':
dl_dst = add_dst
if dst_match_rule == 'tcp_dst_port':
tcp_dst_port = add_dst
if dst_match_rule == 'udp_dst_port':
udp_dst_port = add_dst
if src_match_rule == 'vlan_id':
vlan_id = add_src
if src_match_rule == 'vlan_priority':
vlan_pirority = add_src
if dst_match_rule == 'vlan_id':
vlan_id = add_dst
if dst_match_rule == 'vlan_priority':
vlan_pirority = add_dst
table_id = '0'
for key in odl_switches_ip:
if odl_switches_ip[key] == switch_id:
switch_ip = key
if switch_ip == shortest_path_right[0]:
label = shortest_path_right_label
if switch_ip == shortest_path_left[0]:
label = shortest_path_left_label
action_mpls_label = label
con_switch = testbed_1_lsps[label][1]
for key in testbed_1_topo[switch_ip]:
if testbed_1_topo[switch_ip][key] == con_switch:
con_port = key
for key in sflow_if_map:
if sflow_if_map[key] == con_port:
port = key
action_out_port = port
flow_stat = {}
flow_stat = odl.odl_mpls_push_flow_inst(url_odl, name, password, odl_header, mpls_push_flow, mpls_push_flow_counter, switch_id, dst_add, src_add, in_port, dl_dst, dl_src, protocol, tcp_src_port, tcp_dst_port, udp_src_port, udp_dst_port, vlan_id, vlan_priority, action_mpls_label, action_out_port, table_id, priority)
if flow_stat:
flow_name = flow_stat['Flow ID']
mpls_push_flow_stats[flow_name] = flow_stat
load_balancing_flow_stats[flow_name] = {'Switch ID': switch_id, 'Source Add' : add_src, 'Destination Add' : add_dst, 'MPLS Label' : label, 'Priority' : priority}
with open("Statistics_Logs/MPLS_Push_Flow_Stats.json", "w") as json_file:
json.dump(mpls_push_flow_stats, json_file)
with open("Statistics_Logs/Testbed_1_Load_Balancing_Flow_Stats.json", "w") as json_file:
json.dump(load_balancing_flow_stats, json_file)
if mpls_push_flow_stats:
del(mpls_push_flow_stats[flow_id])
del(load_balancing_flow_stats[flow_id])
with open("Statistics_Logs/MPLS_Push_Flow_Stats.json", "w") as json_file:
json.dump(mpls_push_flow_stats, json_file)
with open("Statistics_Logs/Testbed_1_Load_Balancing_Flow_Stats.json", "w") as json_file:
json.dump(load_balancing_flow_stats, json_file)
high_utis = core_mon.int_high_uti_events()
if high_utis != {}:
for key in high_utis:
agent_node = high_utis[key]['Agent']
agent_interface_id = high_utis[key]['Interface ID']
direction = high_utis[key]['Metric']
agent_interface = sflow_if_map[agent_interface_id]
links = testbed_1_topo[agent_node]
neighbor_node = links[agent_interface]
if direction == 'ifinutilization':
test = agent_node
agent_node = neighbor_node
neighbor_node = test
if update_link_weights != {}:
m = 0
for key in update_link_weights:
if key == agent_node:
m += 1
if m != 0:
old_links = {}
old_links = update_link_weights[agent_node]
k = 0
for key in old_links:
if neighbor_node == key:
k += 1
if k == 0:
old_links[neighbor_node] = 10
update_link_weights[agent_node] = old_links
else:
new_links = {}
new_links[neighbor_node] = 10
update_link_weights[agent_node] = new_links
else:
new_links = {}
new_links[neighbor_node] = 10
update_link_weights[agent_node] = new_links
paths = optimal_testbed_network_1().optimal_path(delete_links, update_link_weights)
if paths != {}:
optimal_path_right = paths['Optimal Path Right']
optimal_path_left = paths['Optimal Path Left']
optimal_path_right_label = ''
optimal_path_left_label = ''
for key in testbed_1_lsps:
if testbed_1_lsps[key] == optimal_path_right:
optimal_path_right_label = key
if testbed_1_lsps[key] == optimal_path_left:
optimal_path_left_label = key
large_flow_events = edge_flow.network_edge_large_flow_events(flow_name)
print large_flow_events
for key in large_flow_events:
with open("Statistics_Logs/Testbed_1_Load_Balancing_Flow_Stats.json") as json_file:
load_balancing_flow_stats = json.load(json_file)
mpls_push_stats = {}
mpls_push_flow_stats = {}
mpls_push_flow = flow.odl_mpls_push_json()
mpls_push_stats = stat.odl_mpls_push_stat()
mpls_push_flow_stats = mpls_push_stats['stat']
mpls_push_flow_counter = mpls_push_stats['counter']
switch_ip = large_flow_events[key]['Agent']
add_src = large_flow_events[key]['Source Add']
add_dst = large_flow_events[key]['Destination Add']
with open("sFlow_ODL_Flowkeys_Bindings/Load_Balancing_Flowkeys_Bindings.json") as json_file:
load_balancing_flow_keys = json.load(json_file)
for key in load_balancing_flow_keys:
if key == source_key:
src_match_rule = load_balancing_flow_keys[key]
for key in load_balancing_flow_keys:
if key == destination_key:
dst_match_rule = load_balancing_flow_keys[key]
for key in odl_switches_ip:
if key == switch_ip:
switch_id = odl_switches_ip[key]
dst_add = ''
src_add = ''
in_port = ''
dl_dst = ''
dl_src = ''
protocol = ''
tcp_src_port = ''
tcp_dst_port = ''
udp_src_port = ''
udp_dst_port = ''
vlan_id = ''
vlan_priority = ''
if src_match_rule == 'src_add':
src_add = add_src
if src_match_rule == 'dl_src':
dl_src = add_src
if src_match_rule == 'tcp_src_port':
tcp_src_port = add_src
if src_match_rule == 'udp_src_port':
udp_src_port = add_src
if dst_match_rule == 'dst_add':
dst_add = add_dst
if dst_match_rule == 'dl_dst':
dl_dst = add_dst
if dst_match_rule == 'tcp_dst_port':
tcp_dst_port = add_dst
if dst_match_rule == 'udp_dst_port':
udp_dst_port = add_dst
if src_match_rule == 'vlan_id':
vlan_id = add_src
if src_match_rule == 'vlan_priority':
vlan_pirority = add_src
if dst_match_rule == 'vlan_id':
vlan_id = add_dst
if dst_match_rule == 'vlan_priority':
vlan_pirority = add_dst
table_id = '0'
priority = priority_load_balance
for key in odl_switches_ip:
if odl_switches_ip[key] == switch_id:
switch_ip = key
if switch_ip == optimal_path_right[0]:
label = optimal_path_right_label
if switch_ip == optimal_path_left[0]:
label = optimal_path_left_label
action_mpls_label = label
con_switch = testbed_1_lsps[label][1]
for key in testbed_1_topo[switch_ip]:
if testbed_1_topo[switch_ip][key] == con_switch:
con_port = key
for key in sflow_if_map:
if sflow_if_map[key] == con_port:
port = key
action_out_port = port
flow_stat = {}
flow_stat = odl.odl_mpls_push_flow_inst(url_odl, name, password, odl_header, mpls_push_flow, mpls_push_flow_counter, switch_id, dst_add, src_add, in_port, dl_dst, dl_src, protocol, tcp_src_port, tcp_dst_port, udp_src_port, udp_dst_port, vlan_id, vlan_priority, action_mpls_label, action_out_port, table_id, priority)
if flow_stat:
flow_name = flow_stat['Flow ID']
mpls_push_flow_stats[flow_name] = flow_stat
load_balancing_flow_stats[flow_name] = {'Switch ID': switch_id, 'Source Add' : add_src, 'Destination Add' : add_dst, 'MPLS Label' : label, 'Priority' : priority}
with open("Statistics_Logs/MPLS_Push_Flow_Stats.json", "w") as json_file:
json.dump(mpls_push_flow_stats, json_file)
with open("Statistics_Logs/Testbed_1_Load_Balancing_Flow_Stats.json", "w") as json_file:
json.dump(load_balancing_flow_stats, json_file)
time.sleep(timeout)
except KeyboardInterrupt:
print '\n\n\nSaving all the changes...'
print '\nYou are now exiting the NaaS sFlow based edge flow monitoring application...\n'
sys.exit(0)
except:
print '\n\n\nSaving all the changes...'
print '\nYou are now exiting the NaaS platform\'s load balancing application...\n'
sys.exit(0)
class load_balance_testbed_network_2():
# Starting a load balancing application for a network with legacy (i.e. vendor-specific) switches at network core and open (i.e. OF/OVS) switches at the network edge
def __init__(self):
try:
odl_base = naas_arch().odl_base_url()
url_odl = odl_base['URL']
ip_odl = odl_base['Host IP']
odl_header = naas_arch().odl_api_header()
cred = naas_arch().odl_user_cred()
name = cred['User Name']
password = cred['Password']
edge_sflow_base = naas_arch().edge_sflow_base_url()
url_edge_sflow = edge_sflow_base['URL']
ip_edge_sflow = edge_sflow_base['Host IP']
sflow_header = naas_arch().sflow_api_header()
core_snmp_base = naas_arch().core_snmp_base_url()
url_core_snmp = core_snmp_base['URL']
ip_core_snmp = core_snmp_base['Host IP']
odl_switches = naas_arch().odl_switches()
odl_switches_ip = naas_arch().odl_switches_ip()
edge_sflow_agents = naas_arch().edge_sflow_agents()
core_snmp_agents = naas_arch().core_snmp_agents()
testbed_2_topo = naas_arch().testbed_2_topology()
testbed_2_lsps = naas_arch().testbed_2_path_bindings()
sflow_if_map = naas_arch().sflow_interface_mapping()
flow = odl_api_json_formats()
stat = odl_api_flow_stat()
odl = odl_api_calls()
sflow = sflow_api_calls()
snmp = snmp_api_calls()
print '\n\n\n'
edge_flow = network_edge_flow_monitoring()
print '\n\n\n'
core_mon = snmp_network_core_interface_monitoring()
while True:
print '\n\n\n\nEnter the above configured sFlow based edge flow monitoring application name...\n\n'
flow_name = raw_input('sFlow based Edge Flow Monitoring Application Name (Required): ')
url_sflow = url_edge_sflow
flow_def = sflow.sflow_flow_def(url_sflow, flow_name)
if flow_def == {}:
print '\n\nThere is no such sFlow based edge flow monitoring application that is currently running in the NaaS platform...\n\n'
print '\n\nRe-configure and Re-enter the sFlow based edge flow monitoring application name...\n\n'
else:
break
flow_keys = flow_def['keys']
keys = re.sub(r'\s', '', flow_keys).split(',')
source_key = keys[0]
destination_key = keys[1]
print '\n\n\n\nEnter the priority value for this load balancing application and its corresponding actions...\n\n'
priority_load_balance = raw_input('Load Balancing Priority Value (Default Value = 200): ')
if priority_load_balance == '':
priority_load_balance = '200'
print '\n\nEnter the next-hop network core legacy switch interface MAC addresses to the edge OF/OVS switches...'
print 'Note: Default values are as per the configured testbed for my graduation project...\n\n'
next_hop_mac = {}
for key in odl_switches:
mac_add = ''
id_switch = odl_switches[key]
alias_switch = key
print 'Switch ID: ', id_switch
print 'Switch Alias Name: ', alias_switch
for key in odl_switches_ip:
ip_switch = key
if odl_switches_ip[key] == id_switch:
print 'Switch Management IP Address: ', ip_switch
mac_add = raw_input('Next-hop Legacy Switch Interface MAC Address to the above edge OF/OVS switch: ')
if mac_add == '':
if id_switch == 'openflow:5578350727664762986':
mac_add = '00:14:f6:83:30:00'
if id_switch == 'openflow:5578350727664762989':
mac_add = '00:14:f6:82:80:00'
next_hop_mac[id_switch] = mac_add
print '\n\n'
print '\n\n\n\nEnter the load balancing query timeout/interval (i.e. in seconds)...\n\n'
timeout = raw_input('Load Balancing Query Timeout/Interval (Default Value: 20): ')
if timeout == '':
timeout = 10
timeout = int(timeout)
print '\n\nStarting the load balancing application...\n\n'
while True:
print '\n\nQuerying for network core interface monitoring triggered events...\n\n'
print '\n\nChecking for Interface/Link Failures in the Network Core..\n\n'
delete_links = {}
update_link_weights = {}
int_failures = {}
high_utis = {}
int_failures = core_mon.int_fail_events()
if int_failures != {}:
for key in int_failures:
agent_node = int_failures[key]['Agent']
agent_interface_id = int_failures[key]['Interface ID']
for key in sflow_if_map:
if agent_interface_id == key:
agent_interface = sflow_if_map[key]
else:
url_snmp = url_core_snmp
agent = agent_node
snmp_agent_interfaces = snmp.snmp_agent_interfaces(url_snmp, agent)
agent_interface = snmp_agent_interfaces[agent_interface_id]
if delete_links != {}:
m = 0
for key in delete_links:
if key == agent_node:
m += 1
if m != 0:
old_link_list = delete_links[agent_node]
old_link_list.append(agent_interface)
delete_links[agent_node] = old_link_list
else:
new_link_list = []
new_link_list.append(agent_interface)
delete_links[agent_node] = new_link_list
else:
new_link_list = []
new_link_list.append(agent_interface)
delete_links[agent_node] = new_link_list
paths = optimal_testbed_network_2().optimal_path(delete_links, update_link_weights)
if paths != {}:
all_paths_right = paths['All Paths Right']
all_paths_left = paths['All Paths Left']
shortest_path_right = paths['Shortest Path Right']
shortest_path_left = paths['Shortest Path Left']
no_path_labels = []
shortest_path_right_label = ''
shortest_path_left_label = ''
for key in testbed_2_lsps:
if testbed_2_lsps[key] == shortest_path_right:
shortest_path_right_label = key
if testbed_2_lsps[key] == shortest_path_left:
shortest_path_left_label = key
for key in testbed_2_lsps:
m = 0
for apr in all_paths_right:
if testbed_2_lsps[key] == apr:
m += 1
for apl in all_paths_left:
if testbed_2_lsps[key] == apl:
m += 1
if m == 0:
no_path_labels.append(key)
with open("Statistics_Logs/Testbed_2_Basic_Connectivity_Flow_Stats.json") as json_file:
basic_connectivity_flow_stats = json.load(json_file)
installed_path_labels = []
deleted_path_labels = []
for key in basic_connectivity_flow_stats:
installed_path_labels.append(basic_connectivity_flow_stats[key]['MPLS Label'])
deleted_path_labels = set(installed_path_labels).intersection(no_path_labels)
deleted_flows = {}
for dpl in deleted_path_labels:
for key in basic_connectivity_flow_stats:
if basic_connectivity_flow_stats[key]['MPLS Label'] == dpl:
deleted_flows[key] = basic_connectivity_flow_stats[key]
for key in deleted_flows:
flow_id = key
mpls_push_stats = {}
mpls_push_flow_stats = {}
hyb_mpls_push_flow = flow.odl_hyb_mpls_push_json()
mpls_push_stats = stat.odl_mpls_push_stat()
mpls_push_flow_stats = mpls_push_stats['stat']
mpls_push_flow_counter = mpls_push_stats['counter']
switch_id = deleted_flows[key]['Switch ID']
dst_add = deleted_flows[key]['IP Destination']
src_add = ''
in_port = ''
dl_dst = ''
dl_src = ''
protocol = ''
tcp_src_port = ''
tcp_dst_port = ''
udp_src_port = ''
udp_dst_port = ''
vlan_id = ''
vlan_priority = ''
table_id = '0'
priority = '20'
for key in odl_switches_ip:
if odl_switches_ip[key] == switch_id:
switch_ip = key
if switch_ip == shortest_path_right[0]:
label = shortest_path_right_label
if switch_ip == shortest_path_left[0]:
label = shortest_path_left_label
action_mpls_label = label
con_switch = testbed_2_lsps[label][1]
for key in testbed_2_topo[switch_ip]:
if testbed_2_topo[switch_ip][key] == con_switch:
con_port = key
for key in sflow_if_map:
if sflow_if_map[key] == con_port:
port = key
action_out_port = port
action_dl_dst = next_hop_mac[switch_id]
flow_stat = {}
flow_stat = odl.odl_hyb_mpls_push_flow_inst(url_odl, name, password, odl_header, hyb_mpls_push_flow, mpls_push_flow_counter, switch_id, dst_add, src_add, in_port, dl_dst, dl_src, protocol, tcp_src_port, tcp_dst_port, udp_src_port, udp_dst_port, vlan_id, vlan_priority, action_mpls_label, action_out_port, action_dl_dst, table_id, priority)
if flow_stat:
flow_name = flow_stat['Flow ID']
mpls_push_flow_stats[flow_name] = flow_stat
basic_connectivity_flow_stats[flow_name] = {'Switch ID': switch_id, 'IP Destination' : dst_add, 'MPLS Label' : label}
with open("Statistics_Logs/MPLS_Push_Flow_Stats.json", "w") as json_file:
json.dump(mpls_push_flow_stats, json_file)
with open("Statistics_Logs/Testbed_2_Basic_Connectivity_Flow_Stats.json", "w") as json_file:
json.dump(basic_connectivity_flow_stats, json_file)
if mpls_push_flow_stats:
del(mpls_push_flow_stats[flow_id])
del(basic_connectivity_flow_stats[flow_id])
with open("Statistics_Logs/MPLS_Push_Flow_Stats.json", "w") as json_file:
json.dump(mpls_push_flow_stats, json_file)
with open("Statistics_Logs/Testbed_2_Basic_Connectivity_Flow_Stats.json", "w") as json_file:
json.dump(basic_connectivity_flow_stats, json_file)
with open("Statistics_Logs/Testbed_2_Load_Balancing_Flow_Stats.json") as json_file:
load_balancing_flow_stats = json.load(json_file)
installed_path_labels = []
deleted_path_labels = []
for key in load_balancing_flow_stats:
installed_path_labels.append(load_balancing_flow_stats[key]['MPLS Label'])
deleted_path_labels = set(installed_path_labels).intersection(no_path_labels)
deleted_flows = {}
for dpl in deleted_path_labels:
for key in load_balancing_flow_stats:
if load_balancing_flow_stats[key]['MPLS Label'] == dpl:
deleted_flows[key] = load_balancing_flow_stats[key]
for key in deleted_flows:
flow_id = key
mpls_push_stats = {}
mpls_push_flow_stats = {}
hyb_mpls_push_flow = flow.odl_hyb_mpls_push_json()
mpls_push_stats = stat.odl_mpls_push_stat()
mpls_push_flow_stats = mpls_push_stats['stat']
mpls_push_flow_counter = mpls_push_stats['counter']
switch_id = deleted_flows[key]['Switch ID']
add_src = deleted_flows[key]['Source Add']
add_dst = deleted_flows[key]['Destination Add']
priority = deleted_flows[key]['Priority']
with open("sFlow_ODL_Flowkeys_Bindings/Load_Balancing_Flowkeys_Bindings.json") as json_file:
load_balancing_flow_keys = json.load(json_file)
for key in load_balancing_flow_keys:
if key == source_key:
src_match_rule = load_balancing_flow_keys[key]
for key in load_balancing_flow_keys:
if key == destination_key:
dst_match_rule = load_balancing_flow_keys[key]
dst_add = ''
src_add = ''
in_port = ''
dl_dst = ''
dl_src = ''
protocol = ''
tcp_src_port = ''
tcp_dst_port = ''
udp_src_port = ''
udp_dst_port = ''
vlan_id = ''
vlan_priority = ''
if src_match_rule == 'src_add':
src_add = add_src
if src_match_rule == 'dl_src':
dl_src = add_src
if src_match_rule == 'tcp_src_port':
tcp_src_port = add_src
if src_match_rule == 'udp_src_port':
udp_src_port = add_src
if dst_match_rule == 'dst_add':
dst_add = add_dst
if dst_match_rule == 'dl_dst':
dl_dst = add_dst
if dst_match_rule == 'tcp_dst_port':
tcp_dst_port = add_dst
if dst_match_rule == 'udp_dst_port':
udp_dst_port = add_dst
if src_match_rule == 'vlan_id':
vlan_id = add_src
if src_match_rule == 'vlan_priority':
vlan_pirority = add_src
if dst_match_rule == 'vlan_id':
vlan_id = add_dst
if dst_match_rule == 'vlan_priority':
vlan_pirority = add_dst
table_id = '0'
for key in odl_switches_ip:
if odl_switches_ip[key] == switch_id:
switch_ip = key
if switch_ip == shortest_path_right[0]:
label = shortest_path_right_label
if switch_ip == shortest_path_left[0]:
label = shortest_path_left_label
action_mpls_label = label
con_switch = testbed_2_lsps[label][1]
for key in testbed_2_topo[switch_ip]:
if testbed_2_topo[switch_ip][key] == con_switch:
con_port = key
for key in sflow_if_map:
if sflow_if_map[key] == con_port:
port = key
action_out_port = port
action_dl_dst = next_hop_mac[switch_id]
flow_stat = {}
flow_stat = odl.odl_hyb_mpls_push_flow_inst(url_odl, name, password, odl_header, hyb_mpls_push_flow, mpls_push_flow_counter, switch_id, dst_add, src_add, in_port, dl_dst, dl_src, protocol, tcp_src_port, tcp_dst_port, udp_src_port, udp_dst_port, vlan_id, vlan_priority, action_mpls_label, action_out_port, action_dl_dst, table_id, priority)
if flow_stat:
flow_name = flow_stat['Flow ID']
mpls_push_flow_stats[flow_name] = flow_stat
load_balancing_flow_stats[flow_name] = {'Switch ID': switch_id, 'Source Add' : add_src, 'Destination Add' : add_dst, 'MPLS Label' : label, 'Priority' : priority}
with open("Statistics_Logs/MPLS_Push_Flow_Stats.json", "w") as json_file:
json.dump(mpls_push_flow_stats, json_file)
with open("Statistics_Logs/Testbed_2_Load_Balancing_Flow_Stats.json", "w") as json_file:
json.dump(load_balancing_flow_stats, json_file)
if mpls_push_flow_stats:
del(mpls_push_flow_stats[flow_id])
del(load_balancing_flow_stats[flow_id])
with open("Statistics_Logs/MPLS_Push_Flow_Stats.json", "w") as json_file:
json.dump(mpls_push_flow_stats, json_file)
with open("Statistics_Logs/Testbed_2_Load_Balancing_Flow_Stats.json", "w") as json_file:
json.dump(load_balancing_flow_stats, json_file)
high_utis = core_mon.int_high_uti_events()
if high_utis != {}:
for key in high_utis:
agent_node = high_utis[key]['Agent']
agent_interface_id = high_utis[key]['Interface ID']
direction = high_utis[key]['Metric']
for key in sflow_if_map:
if agent_interface_id == key:
agent_interface = sflow_if_map[key]
else:
url_snmp = url_core_snmp
agent = agent_node
snmp_agent_interfaces = snmp.snmp_agent_interfaces(url_snmp, agent)
agent_interface = snmp_agent_interfaces[agent_interface_id]
links = testbed_2_topo[agent_node]
neighbor_node = links[agent_interface]
if direction == 'ifinutilization':
test = agent_node
agent_node = neighbor_node
neighbor_node = test
if update_link_weights != {}:
m = 0
for key in update_link_weights:
if key == agent_node:
m += 1
if m != 0:
old_links = {}
old_links = update_link_weights[agent_node]
k = 0
for key in old_links:
if neighbor_node == key:
k += 1
if k == 0:
old_links[neighbor_node] = 10
update_link_weights[agent_node] = old_links
else:
new_links = {}
new_links[neighbor_node] = 10
update_link_weights[agent_node] = new_links
else:
new_links = {}
new_links[neighbor_node] = 10
update_link_weights[agent_node] = new_links
paths = optimal_testbed_network_2().optimal_path(delete_links, update_link_weights)
if paths != {}:
optimal_path_right = paths['Optimal Path Right']
optimal_path_left = paths['Optimal Path Left']
optimal_path_right_label = ''
optimal_path_left_label = ''
for key in testbed_2_lsps:
if testbed_2_lsps[key] == optimal_path_right:
optimal_path_right_label = key
if testbed_2_lsps[key] == optimal_path_left:
optimal_path_left_label = key
large_flow_events = edge_flow.network_edge_large_flow_events(flow_name)
for key in large_flow_events:
with open("Statistics_Logs/Testbed_2_Load_Balancing_Flow_Stats.json") as json_file:
load_balancing_flow_stats = json.load(json_file)
mpls_push_stats = {}
mpls_push_flow_stats = {}
hyb_mpls_push_flow = flow.odl_hyb_mpls_push_json()
mpls_push_stats = stat.odl_mpls_push_stat()
mpls_push_flow_stats = mpls_push_stats['stat']
mpls_push_flow_counter = mpls_push_stats['counter']
switch_ip = large_flow_events[key]['Agent']
add_src = large_flow_events[key]['Source Add']
add_dst = large_flow_events[key]['Destination Add']
with open("sFlow_ODL_Flowkeys_Bindings/Load_Balancing_Flowkeys_Bindings.json") as json_file:
load_balancing_flow_keys = json.load(json_file)
for key in load_balancing_flow_keys:
if key == source_key:
src_match_rule = load_balancing_flow_keys[key]
for key in load_balancing_flow_keys:
if key == destination_key:
dst_match_rule = load_balancing_flow_keys[key]
for key in odl_switches_ip:
if key == switch_ip:
switch_id = odl_switches_ip[key]
dst_add = ''
src_add = ''
in_port = ''
dl_dst = ''
dl_src = ''
protocol = ''
tcp_src_port = ''
tcp_dst_port = ''
udp_src_port = ''
udp_dst_port = ''
vlan_id = ''
vlan_priority = ''
if src_match_rule == 'src_add':
src_add = add_src
if src_match_rule == 'dl_src':
dl_src = add_src
if src_match_rule == 'tcp_src_port':
tcp_src_port = add_src
if src_match_rule == 'udp_src_port':
udp_src_port = add_src
if dst_match_rule == 'dst_add':
dst_add = add_dst
if dst_match_rule == 'dl_dst':
dl_dst = add_dst
if dst_match_rule == 'tcp_dst_port':
tcp_dst_port = add_dst
if dst_match_rule == 'udp_dst_port':
udp_dst_port = add_dst
if src_match_rule == 'vlan_id':
vlan_id = add_src
if src_match_rule == 'vlan_priority':
vlan_pirority = add_src
if dst_match_rule == 'vlan_id':
vlan_id = add_dst
if dst_match_rule == 'vlan_priority':
vlan_pirority = add_dst
table_id = '0'
priority = priority_load_balance
for key in odl_switches_ip:
if odl_switches_ip[key] == switch_id:
switch_ip = key
if switch_ip == optimal_path_right[0]:
label = optimal_path_right_label
if switch_ip == optimal_path_left[0]:
label = optimal_path_left_label
action_mpls_label = label
con_switch = testbed_2_lsps[label][1]
for key in testbed_2_topo[switch_ip]:
if testbed_2_topo[switch_ip][key] == con_switch:
con_port = key
for key in sflow_if_map:
if sflow_if_map[key] == con_port:
port = key
action_out_port = port
action_dl_dst = next_hop_mac[switch_id]
flow_stat = {}
flow_stat = odl.odl_hyb_mpls_push_flow_inst(url_odl, name, password, odl_header, hyb_mpls_push_flow, mpls_push_flow_counter, switch_id, dst_add, src_add, in_port, dl_dst, dl_src, protocol, tcp_src_port, tcp_dst_port, udp_src_port, udp_dst_port, vlan_id, vlan_priority, action_mpls_label, action_out_port, action_dl_dst, table_id, priority)
if flow_stat:
flow_name = flow_stat['Flow ID']
mpls_push_flow_stats[flow_name] = flow_stat
load_balancing_flow_stats[flow_name] = {'Switch ID': switch_id, 'Source Add' : add_src, 'Destination Add' : add_dst, 'MPLS Label' : label, 'Priority' : priority}
with open("Statistics_Logs/MPLS_Push_Flow_Stats.json", "w") as json_file:
json.dump(mpls_push_flow_stats, json_file)
with open("Statistics_Logs/Testbed_2_Load_Balancing_Flow_Stats.json", "w") as json_file:
json.dump(load_balancing_flow_stats, json_file)
time.sleep(timeout)
except KeyboardInterrupt:
print '\n\n\nSaving all the changes...'
print '\nYou are now exiting the NaaS sFlow based edge flow monitoring application...\n'
sys.exit(0)
except:
print '\n\n\nSaving all the changes...'
print '\nYou are now exiting the NaaS platform\'s load balancing application...\n'
sys.exit(0)
|
TUDelftNAS/SDN-NaaSPlatform
|
NaaSPlatform/Load_Balancing_App.py
|
Python
|
gpl-3.0
| 62,701 | 0.003397 |
import itertools
import numpy
import math
def ncr(n, r):
f = math.factorial
return f(n) // f(r) // f(n-r)
def subset_pairs(s):
for a_size in range(1, len(s)):
for a in itertools.combinations(s, a_size):
remaining = s.difference(a)
for b_size in range(1, len(remaining) + 1):
for b in itertools.combinations(remaining, b_size):
yield a, b
[11, 18, 19, 20, 22, 25]
|
simonolander/euler
|
euler-106.py
|
Python
|
mit
| 450 | 0.002222 |
from PyQt5 import QtCore
from src.business.configuration.constants import project as p
from src.ui.commons.verification import cb
class ConfigProject:
def __init__(self):
self._settings = QtCore.QSettings(p.CONFIG_FILE, QtCore.QSettings.IniFormat)
def get_value(self, menu, value):
return self._settings.value(menu + '/' + value)
def set_site_settings(self, name, site_id, imager_id):
self._settings.beginGroup(p.SITE_TITLE)
self._settings.setValue(p.NAME, name)
self._settings.setValue(p.SITE_ID, site_id)
self._settings.setValue(p.IMAGER_ID, imager_id)
self._settings.endGroup()
def set_geographic_settings(self, lat, long, elev, press, temp):
self._settings.beginGroup(p.GEOGRAPHIC_TITLE)
self._settings.setValue(p.LATITUDE, lat)
self._settings.setValue(p.LONGITUDE, long)
self._settings.setValue(p.ELEVATION, elev)
self._settings.setValue(p.PRESSURE, press)
self._settings.setValue(p.TEMPERATURE, temp)
self._settings.endGroup()
def set_moonsun_settings(self, solarelev, ignoreLunar, lunarph, lunarpos):
self._settings.beginGroup(p.SUN_MOON_TITLE)
self._settings.setValue(p.MAX_SOLAR_ELEVATION, solarelev)
self._settings.setValue(p.IGNORE_LUNAR_POSITION, ignoreLunar)
self._settings.setValue(p.MAX_LUNAR_PHASE, lunarph)
self._settings.setValue(p.MAX_LUNAR_ELEVATION, lunarpos)
self._settings.endGroup()
def save_settings(self):
self._settings.sync()
def get_site_settings(self):
return self.get_value(p.SITE_TITLE, p.NAME),\
self.get_value(p.SITE_TITLE, p.SITE_ID),\
self.get_value(p.SITE_TITLE, p.IMAGER_ID)
def get_geographic_settings(self):
m = p.GEOGRAPHIC_TITLE
return self.get_value(m, p.LATITUDE),\
self.get_value(m, p.LONGITUDE),\
self.get_value(m, p.ELEVATION),\
self.get_value(m, p.PRESSURE),\
self.get_value(m, p.TEMPERATURE)
def get_moonsun_settings(self):
m = p.SUN_MOON_TITLE
return self.get_value(m, p.MAX_SOLAR_ELEVATION),\
cb(self.get_value(m, p.IGNORE_LUNAR_POSITION)),\
self.get_value(m, p.MAX_LUNAR_PHASE),\
self.get_value(m, p.MAX_LUNAR_ELEVATION)
|
pliniopereira/ccd10
|
src/business/configuration/configProject.py
|
Python
|
gpl-3.0
| 2,363 | 0.003386 |
from Scouting2017.model.models2017 import ScoreResult
from django.db.models.aggregates import Avg
from django.db.models.expressions import Case, When
import json
import math
import collections
def get_statistics(regional_code, teams_at_competition, team=0):
'''
The get_statistics function() returns two lists of metrics.
The first thing it returns, stats, is a dictionary containing the values of overall averages for all score results, along with standard deviations for those same score results along the mean.
The function also returns a list called skills, which contains data for each team including their z-scores, calculated fuel scores for autonomous, teleop, and overall, and their accuracy in climbing the rope.
'''
skills = []
competition_srs = ScoreResult.objects.filter(competition__code=regional_code)
competition_averages = competition_srs.aggregate(Avg('auto_gears'),
Avg('auto_fuel_high_score'),
Avg('tele_gears'),
Avg('tele_fuel_high_score'),
rope__avg=Avg(Case(When(rope=True, then=1), When(rope=False, then=0))))
rope_avg = competition_averages['rope__avg']
gear_avg = competition_averages['tele_gears__avg']
if competition_averages['auto_fuel_high_score__avg'] and competition_averages['tele_fuel_high_score__avg']:
fuel_avg = competition_averages['auto_fuel_high_score__avg'] + (competition_averages['tele_fuel_high_score__avg'] / 3)
else:
fuel_avg = 0
# This part of the function (above) obtains overall averages for all score results
gear_v2 = 0
fuel_v2 = 0
rope_v2 = 0
num_srs = 0
for sr in competition_srs:
if sr.rope:
sr_rope = 1 - rope_avg
else:
sr_rope = 0 - rope_avg
sr_gear = sr.tele_gears - gear_avg
sr_fuel = ((sr.auto_fuel_high_score) + (sr.tele_fuel_high_score / 3)) - fuel_avg
gear_v2 += sr_gear * sr_gear
fuel_v2 += sr_fuel * sr_fuel
rope_v2 += sr_rope * sr_rope
num_srs += 1
if num_srs == 0:
gear_stdev = 0
fuel_stdev = 0
rope_stdev = 0
else:
gear_stdev = math.sqrt(gear_v2 / num_srs)
fuel_stdev = math.sqrt(fuel_v2 / num_srs)
rope_stdev = math.sqrt(rope_v2 / num_srs)
team_avgs = collections.defaultdict(int)
# This part of the function (above) obtains overall standard deviations for all score results
teams = team if bool(team) else teams_at_competition
for team in teams:
teams_srs = team.scoreresult_set.filter(competition__code=regional_code)
team_avgs = teams_srs.aggregate(Avg('tele_gears'),
Avg('tele_fuel_high_score'),
Avg('auto_fuel_high_score'),
team_rope__avg=Avg(Case(When(rope=True, then=1), When(rope=False, then=0))))
team.skills = {}
team.skills['fuel_z'] = 'NA'
team.skills['gear_z'] = 'NA'
team.skills['rope_z'] = 'NA'
team.skills['rope_pct'] = 'NA'
if len(teams_srs) != 0:
team.skills['fuel_score'] = ((team_avgs['auto_fuel_high_score__avg']) + (team_avgs['tele_fuel_high_score__avg'] / 3))
team.skills['gear_z'] = (team_avgs['tele_gears__avg'] - gear_avg) / gear_stdev if gear_stdev != 0 else 0
team.skills['fuel_z'] = (((team_avgs['auto_fuel_high_score__avg']) + (team_avgs['tele_fuel_high_score__avg'] / 3)) - fuel_avg) / fuel_stdev if fuel_stdev != 0 else 0
team.skills['rope_z'] = (team_avgs['team_rope__avg'] - rope_avg) / rope_stdev if rope_stdev != 0 else 0
team.skills['rope_pct'] = team_avgs['team_rope__avg'] * 100
skills.append({'team': team.teamNumber, 'skills': team.skills})
stats = {'gear_avg': gear_avg, 'rope_avg': rope_avg, 'fuel_avg': fuel_avg, 'fuel_hi_avg': team_avgs['tele_fuel_high_score__avg'],
'fuel_hi_auto_avg': team_avgs['auto_fuel_high_score__avg'], 'auto_gear_avg': competition_averages['auto_gears__avg'], 'gear_stdev': gear_stdev, 'rope_stdev': rope_stdev, 'fuel_stdev': fuel_stdev}
return (stats, json.dumps(skills))
|
ArcticWarriors/scouting-app
|
ScoutingWebsite/Scouting2017/model/get_stastics.py
|
Python
|
mit
| 4,360 | 0.004358 |
# Copyright 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
import re
from django import forms
from django.utils.translation import ugettext as _
from identityprovider.widgets import CommaSeparatedWidget
class CommaSeparatedField(forms.MultipleChoiceField):
widget = CommaSeparatedWidget
def clean(self, value):
return ','.join(super(CommaSeparatedField, self).clean(value))
class OATHPasswordField(forms.CharField):
"""A string of between 6 or 8 digits."""
widget = forms.widgets.TextInput(attrs={
'autocomplete': 'off',
'autofocus': 'autofocus'
})
SIX = re.compile('[0-9]{6}$')
EIGHT = re.compile('[0-9]{8}$')
def clean(self, value):
"""Validate otp and detect type"""
# remove any whitespace from the string
if value:
value = value.strip().replace(' ', '')
value = super(OATHPasswordField, self).clean(value)
if self.SIX.match(value):
return value
elif self.EIGHT.match(value):
return value
raise forms.ValidationError(
_('Please enter a 6-digit or 8-digit one-time password.'))
|
miing/mci_migo
|
identityprovider/fields.py
|
Python
|
agpl-3.0
| 1,229 | 0 |
from GangaCore.testlib.GangaUnitTest import GangaUnitTest
from GangaGUI.api import internal
# ******************** Test Class ******************** #
# Templates API Tests
class TestGangaGUIInternalTemplatesAPI(GangaUnitTest):
# Setup
def setUp(self, extra_opts=[]):
super(TestGangaGUIInternalTemplatesAPI, self).setUp(extra_opts=[])
# App config and database creation
internal.config["TESTING"] = True
# Flask test client
self.app = internal.test_client()
# Templates API - GET Method
def test_GET_method_templates_list(self):
from GangaCore.GPI import templates, JobTemplate, GenericSplitter, Local
# Create 20 test templates
for i in range(0, 20):
t = JobTemplate()
t.name = f"Template Test {i}"
t.application.exe = "sleep"
t.splitter = GenericSplitter()
t.splitter.attribute = 'application.args'
t.splitter.values = [['3'] for _ in range(0, 3)]
t.backend = Local()
# GET request
res = self.app.get(f"/internal/templates")
self.assertTrue(res.status_code == 200)
self.assertTrue(len(res.json) == 20)
# Response data assertions
supported_attributes = ["id", "fqid", "name", "application", "backend", "comment", "backend.actualCE"]
for i in range(0, 20):
for attribute in supported_attributes:
self.assertTrue(attribute in res.json[i])
self.assertTrue(res.json[i]["name"] == f"Template Test {i}")
# Templates API - DELETE Method, ID Out of Index
def test_DELETE_method_id_out_of_range(self):
res = self.app.delete(f"/internal/templates/1")
self.assertTrue(res.status_code == 400)
# Templates API - DELETE Method, ID is Negative
def test_DELETE_method_id_negative(self):
res = self.app.delete(f"/internal/templates/-1")
self.assertTrue(res.status_code == 404)
# Templates API - DELETE Method, ID is String
def test_DELETE_method_id_string(self):
res = self.app.delete(f"/internal/templates/test")
self.assertTrue(res.status_code == 404)
# Templates API - DELETE Method
def test_DELETE_method_templates_list(self):
from GangaCore.GPI import templates, JobTemplate, GenericSplitter, Local
# Clean template repository check
self.assertTrue(len(templates) == 0)
# Create 20 test templates
created_template_ids = []
for i in range(0, 20):
t = JobTemplate()
t.name = f"Template Test {i}"
created_template_ids.append(t.id)
self.assertTrue(len(templates) == 20)
self.assertTrue(len(created_template_ids) == 20)
# Delete one template every request and assert the deletion
for i in range(0,20):
self.assertTrue(created_template_ids[i] in templates.ids())
res = self.app.delete(f"/internal/templates/{created_template_ids[i]}")
self.assertTrue(res.status_code == 200)
self.assertTrue(len(templates) == (20-(i+1)))
self.assertTrue(created_template_ids[i] not in templates.ids())
# Tear down
def tearDown(self):
super(TestGangaGUIInternalTemplatesAPI, self).tearDown()
# ******************** EOF ******************** #
|
ganga-devs/ganga
|
ganga/GangaGUI/test/test_internal_templates_api.py
|
Python
|
gpl-3.0
| 3,358 | 0.001489 |
# CamJam EduKit 3 - Robotics
# Worksheet 7 - Controlling the motors with PWM
import RPi.GPIO as GPIO # Import the GPIO Library
import time # Import the Time library
# Set the GPIO modes
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# Set variables for the GPIO motor pins
pinMotorAForwards = 10
pinMotorABackwards = 9
pinMotorBForwards = 8
pinMotorBBackwards = 7
# How many times to turn the pin on and off each second
Frequency = 20
# How long the pin stays on each cycle, as a percent (here, it's 30%)
DutyCycle = 30
# Setting the duty cycle to 0 means the motors will not turn
Stop = 0
# Set the GPIO Pin mode to be Output
GPIO.setup(pinMotorAForwards, GPIO.OUT)
GPIO.setup(pinMotorABackwards, GPIO.OUT)
GPIO.setup(pinMotorBForwards, GPIO.OUT)
GPIO.setup(pinMotorBBackwards, GPIO.OUT)
# Set the GPIO to software PWM at 'Frequency' Hertz
pwmMotorAForwards = GPIO.PWM(pinMotorAForwards, Frequency)
pwmMotorABackwards = GPIO.PWM(pinMotorABackwards, Frequency)
pwmMotorBForwards = GPIO.PWM(pinMotorBForwards, Frequency)
pwmMotorBBackwards = GPIO.PWM(pinMotorBBackwards, Frequency)
# Start the software PWM with a duty cycle of 0 (i.e. not moving)
pwmMotorAForwards.start(Stop)
pwmMotorABackwards.start(Stop)
pwmMotorBForwards.start(Stop)
pwmMotorBBackwards.start(Stop)
# Turn all motors off
def stopmotors():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn both motors forwards
def forwards():
pwmMotorAForwards.ChangeDutyCycle(DutyCycle)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(DutyCycle)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn both motors backwards
def backwards():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(DutyCycle)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(DutyCycle)
# Turn left
def left():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(DutyCycle)
pwmMotorBForwards.ChangeDutyCycle(DutyCycle)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn Right
def right():
pwmMotorAForwards.ChangeDutyCycle(DutyCycle)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(DutyCycle)
# Your code to control the robot goes below this line
forwards()
time.sleep(1) # Pause for 1 second
left()
time.sleep(0.5) # Pause for half a second
forwards()
time.sleep(1)
right()
time.sleep(0.5)
backwards()
time.sleep(0.5)
stopmotors()
GPIO.cleanup()
|
CamJam-EduKit/EduKit3
|
CamJam Edukit 3 - RPi.GPIO/Code/7-pwm.py
|
Python
|
mit
| 2,662 | 0 |
#!/usr/bin/python
# Script to Check the difference in 2 files
# 1 fevereiro de 2015
# https://github.com/thezakman
file1 = raw_input('[file1:] ')
modified = open(file1,"r").readlines()[0]
file2 = raw_input('[file2:] ')
pi = open(file2, "r").readlines()[0] # [:len(modified)]
resultado = "".join( x for x,y in zip(modified, pi) if x != y)
resultado2 = "".join( x for x,y in zip(pi, modified) if x != y)
print "[Differ:]
print '\n-------------------------------------'
print "[file1] -> [file2]", resultado
print '-------------------------------------'
print "[file2] -> [file1]", resultado2
|
thezakman/CTF-Scripts
|
Differ.py
|
Python
|
artistic-2.0
| 631 | 0.011094 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.