repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
prologic/kdb
|
kdb/plugins/rss.py
|
1
|
7377
|
from itertools import chain
from time import mktime, time
from pickle import dumps, loads
from traceback import format_exc
from circuits.protocols.irc import PRIVMSG
from circuits import handler, task, Event, Timer, Component
from feedparser import parse as parse_feed
from funcy import first, second
from html2text import html2text
from ..utils import log
from ..plugin import BasePlugin
def check_feed(feed):
return feed()
class check_feeds(Event):
"""check_feeds Event"""
class Feed(object):
def __init__(self, url, target, interval=60):
self.url = url
self.target = target
self.interval = interval
self.entries = []
self.title = ""
self.link = ""
self.next = 0
self.reset()
def reset(self):
self.next = time() + (self.interval * 60)
def __call__(self):
d = parse_feed(self.url)
if self.title == "" and self.link == "":
self.title = getattr(d.feed, "title", "")
self.link = getattr(d.feed, "link", "")
new = []
for v in d.entries:
e = {
"time": mktime(v.updated_parsed),
"title": v.title,
"summary": html2text(v.summary).strip().split("\n")[0],
"link": v.links[0].href
}
if e not in self.entries:
self.entries.append(e)
new.append(e)
if not new == []:
s = []
s.append("RSS: {0:s} ({1:s})".format(self.title, self.link))
for e in new[:3]:
x = sum([len(e["title"]), len(e["summary"]), len(e["link"])])
if x > 450:
y = sum([len(e["title"]), len(e["link"])])
s.append(
" * {0:s}: {1:s} ... <{2:s}>".format(
e["title"],
e["summary"][:(450 - y)],
e["link"]
)
)
else:
s.append(
" * {0:s}: {1:s} <{2:s}>".format(
e["title"],
e["summary"],
e["link"]
)
)
return s
else:
return []
class Commands(Component):
channel = "commands"
def radd(self, source, target, args):
"""Add a new RSS feed to be checked at the given interval.
Intervan is in minutes.
Syntax: RADD <url> [<interval>]
"""
if not args:
yield "No URL specified."
tokens = args.split(" ", 2)
url = first(tokens)
interval = second(tokens) or "60"
try:
interval = int(interval)
except Exception, error:
log("ERROR: {0:s}\n{1:s}", error, format_exc())
yield "Invalid interval specified."
if interval > 60:
yield "Interval must be less than 60 minutres."
feeds = self.parent.data["feeds"]
feed = Feed(url, target, interval)
if target in feeds:
feeds[target].append(feed)
else:
feeds[target] = [feed]
value = yield self.call(
task(
check_feed,
feed,
),
"workerprocesses"
)
yield value.value
def rdel(self, source, target, args):
"""Delete an RSS feed.
Syntax: RDEL <n>
"""
if not args:
return "No feed number. specified."
n = args
feeds = self.parent.data["feeds"]
if target in feeds:
try:
n = int(n)
except Exception, error:
log("ERROR: {0:s}\n{1:s}", error, format_exc())
return "Invalid feed number specified."
if n > 0 and n <= len(feeds[target]):
del feeds[target][(n - 1)]
msg = "Feed {0:d} deleted.".format(n)
else:
msg = "Invalid feed number specified."
else:
msg = "No feeds to delete."
return msg
def read(self, source, target, args):
"""Read an RSS feed.
Syntax: READ <n>
"""
if not args:
return "No feed number. specified."
n = args
feeds = self.parent.data["feeds"]
if target in feeds:
try:
n = int(n)
except Exception, error:
log("ERROR: {0:s}\n{1:s}", error, format_exc())
return "Invalid feed number specified."
if n > 0 and n <= len(feeds[target]):
feed = feeds[target][n]
msg = feed()
feed.reset()
else:
msg = "Invalid feed number specified."
else:
msg = "No feeds to read."
return msg
def rlist(self, source, target, args):
"""List all active RSS feeds.
Syntax: RLIST
"""
feeds = self.parent.data["feeds"]
if target in feeds:
msg = ["RSS Feeds ({0:s}):".format(target)]
for i, feed in enumerate(feeds[target]):
msg.append((
" {0:d}. {1:s} ({2:s}) / {3:d}mins "
"(Next Update in {4:d}mins)").format(
(i + 1), feed.title, feed.url,
feed.interval, int((feed.next - time()) / 60)
)
)
else:
msg = "No feeds available."
return msg
class RSS(BasePlugin):
"""RSS Aggregator plugin
Provides RSS aggregation functions allowing you to
create public or private RSS feeds that are downlaoded
at regular intervals and checked and display.
See: COMMANDS rss
"""
__version__ = "0.0.8"
__author__ = "James Mills, prologic at shortcircuit dot net dot au"
def init(self, *args, **kwargs):
super(RSS, self).init(*args, **kwargs)
filename = self.config.get("rss", {}).get("filename", None)
if filename is not None:
self.data.init(
{
"feeds": self.load(filename)
}
)
else:
self.data.init(
{
"feeds": {}
}
)
Commands().register(self)
Timer(60, check_feeds(), persist=True).register(self)
def cleanup(self):
filename = self.config.get("rss", {}).get("filename", None)
if filename is not None:
self.save(filename)
def load(self, filename):
with open(filename, "rb") as f:
try:
return loads(f.read())
except Exception, error:
log("ERROR: {0:s}\n{1:s}", error, format_exc())
return {}
def save(self, filename):
with open(filename, "wb") as f:
f.write(dumps(self.data["feeds"]))
@handler("check_feeds")
def _on_check_feeds(self):
feeds = self.data["feeds"]
for feed in chain(*feeds.values()):
if feed.next < time():
for line in feed():
self.fire(PRIVMSG(feed.target, line))
feed.reset()
|
mit
| -5,629,226,765,630,201,000 | 24.884211 | 77 | 0.459672 | false |
briancurtin/python-openstacksdk
|
openstack/tests/functional/network/v2/test_auto_allocated_topology.py
|
1
|
2426
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.functional import base
class TestAutoAllocatedTopology(base.BaseFunctionalTest):
NETWORK_NAME = 'auto_allocated_network'
NETWORK_ID = None
PROJECT_ID = None
@classmethod
def setUpClass(cls):
super(TestAutoAllocatedTopology, cls).setUpClass()
projects = [o.project_id for o in cls.conn.network.networks()]
cls.PROJECT_ID = projects[0]
@classmethod
def tearDownClass(cls):
res = cls.conn.network.delete_auto_allocated_topology(cls.PROJECT_ID)
cls.assertIs(None, res)
def test_dry_run_option_pass(self):
# Dry run will only pass if there is a public network
networks = self.conn.network.networks()
self._set_network_external(networks)
# Dry run option will return "dry-run=pass" in the 'id' resource
top = self.conn.network.validate_auto_allocated_topology(
self.PROJECT_ID)
self.assertEqual(self.PROJECT_ID, top.project)
self.assertEqual('dry-run=pass', top.id)
def test_show_no_project_option(self):
top = self.conn.network.get_auto_allocated_topology()
project = self.conn.session.get_project_id()
network = self.conn.network.get_network(top.id)
self.assertEqual(top.project_id, project)
self.assertEqual(top.id, network.id)
def test_show_project_option(self):
top = self.conn.network.get_auto_allocated_topology(self.PROJECT_ID)
network = self.conn.network.get_network(top.id)
self.assertEqual(top.project_id, network.project_id)
self.assertEqual(top.id, network.id)
self.assertEqual(network.name, 'auto_allocated_network')
def _set_network_external(self, networks):
for network in networks:
if network.name == 'public':
self.conn.network.update_network(network, is_default=True)
|
apache-2.0
| 3,718,609,966,521,981,400 | 38.770492 | 77 | 0.6892 | false |
gustavofonseca/packtools
|
packtools/domain.py
|
1
|
20489
|
# coding:utf-8
"""The domain-specific adapters.
An adapter is an object that provides domain-specific apis for another object,
called adaptee.
Examples are: XMLValidator and XMLPacker objects, which provide SciELO PS
validation behaviour and packaging functionality, respectively.
"""
from __future__ import unicode_literals
import logging
from copy import deepcopy
try:
import reprlib
except ImportError:
import repr as reprlib
from lxml import etree
from . import utils, catalogs, style_errors, exceptions
__all__ = ['XMLValidator', 'HTMLGenerator']
LOGGER = logging.getLogger(__name__)
def _get_public_ids(sps_version):
"""Returns the set of allowed public ids for the XML based on its version.
"""
if sps_version in ['pre-sps', 'sps-1.1']:
return frozenset(catalogs.ALLOWED_PUBLIC_IDS_LEGACY)
else:
return frozenset(catalogs.ALLOWED_PUBLIC_IDS)
def _init_sps_version(xml_et, supported_versions=None):
"""Returns the SPS spec version for `xml_et` or raises ValueError.
It also checks if the version is currently supported.
:param xml_et: etree instance.
:param supported_versions: (optional) the default value is set by env var `PACKTOOLS_SUPPORTED_SPS_VERSIONS`.
"""
if supported_versions is None:
supported_versions = catalogs.CURRENTLY_SUPPORTED_VERSIONS
doc_root = xml_et.getroot()
version_from_xml = doc_root.attrib.get('specific-use', None)
if version_from_xml is None:
raise exceptions.XMLSPSVersionError('cannot get the SPS version from /article/@specific-use')
if version_from_xml not in supported_versions:
raise exceptions.XMLSPSVersionError('version "%s" is not currently supported' % version_from_xml)
else:
return version_from_xml
def StdSchematron(schema_name):
"""Returns an instance of `isoschematron.Schematron`.
A standard schematron is one bundled with packtools.
The returned instance is cached due to performance reasons.
:param schema_name: The logical name of schematron file in the package `catalog`.
"""
cache = utils.setdefault(StdSchematron, 'cache', lambda: {})
if schema_name in cache:
return cache[schema_name]
else:
try:
schema_path = catalogs.SCHEMAS[schema_name]
except KeyError:
raise ValueError('unrecognized schema: "%s"' % schema_name)
schematron = utils.get_schematron_from_filepath(schema_path)
cache[schema_name] = schematron
return schematron
def XSLT(xslt_name):
"""Returns an instance of `etree.XSLT`.
The returned instance is cached due to performance reasons.
"""
cache = utils.setdefault(XSLT, 'cache', lambda: {})
if xslt_name in cache:
return cache[xslt_name]
else:
try:
xslt_doc = etree.parse(catalogs.HTML_GEN_XSLTS[xslt_name])
except KeyError:
raise ValueError('unrecognized xslt: "%s"' % xslt_name)
xslt = etree.XSLT(xslt_doc)
cache[xslt_name] = xslt
return xslt
#----------------------------------
# validators for etree._ElementTree
#
# a validator is an object that
# provides the method ``validate``,
# with the following signature:
# validate(xmlfile: etree._ElementTree) -> Tuple(bool, list)
#----------------------------------
class PyValidator(object):
"""Style validations implemented in Python.
"""
def __init__(self, pipeline=catalogs.StyleCheckingPipeline, label=u''):
self.ppl = pipeline()
self.label = label
def validate(self, xmlfile):
errors = next(self.ppl.run(xmlfile, rewrap=True))
for error in errors:
error.label = self.label
return bool(errors), errors
class DTDValidator(object):
"""DTD validations.
"""
def __init__(self, dtd, label=u''):
self.dtd = dtd
self.label = label
def validate(self, xmlfile):
"""Validate xmlfile against the given DTD.
Returns a tuple comprising the validation status and the errors list.
"""
result = self.dtd.validate(xmlfile)
errors = [style_errors.SchemaStyleError(err, label=self.label)
for err in self.dtd.error_log]
return result, errors
class SchematronValidator(object):
"""Style validations implemented in Schematron.
"""
def __init__(self, sch, label=u''):
self.sch = sch
self.label = label
@classmethod
def from_catalog(cls, ref, **kwargs):
"""Get an instance based on schema's reference name.
:param ref: The reference name for the schematron file in
:data:`packtools.catalogs.SCH_SCHEMAS`.
"""
return cls(StdSchematron(ref), **kwargs)
def validate(self, xmlfile):
"""Validate xmlfile against the given Schematron schema.
Returns a tuple comprising the validation status and the errors list.
"""
result = self.sch.validate(xmlfile)
errors = [style_errors.SchematronStyleError(err, label=self.label)
for err in self.sch.error_log]
return result, errors
def iter_schematronvalidators(iterable):
"""Returns a generator of :class:`packtools.domain.SchematronValidator`.
:param iterable: an iterable where each item follows one of the forms
``Iterable[isoschematron.Schematron]`` or
``Iterable[Tuple[isoschematron.Schematron, str]]``. The
latter sets the label attribute of the validator instance.
"""
for item in iterable:
try:
sch_obj, sch_label = item
except TypeError:
sch_obj = item
sch_label = u''
validator = SchematronValidator(sch_obj, label=sch_label)
yield validator
#--------------------------------
# adapters for etree._ElementTree
#--------------------------------
class XMLValidator(object):
"""Adapter that performs SPS validations.
SPS validation stages are:
- JATS 1.0 or PMC 3.0 (as bound by the doctype declaration or passed
explicitly)
- SciELO Style - ISO Schematron
- SciELO Style - Python based pipeline
:param file: etree._ElementTree instance.
:param sps_version: the version of the SPS that will be the basis for validation.
:param dtd: (optional) etree.DTD instance. If not provided, we try the external DTD.
:param style_validators: (optional) list of
:class:`packtools.domain.SchematronValidator`
objects.
"""
def __init__(self, file, dtd=None, style_validators=None):
assert isinstance(file, etree._ElementTree)
self.lxml = file
self.doctype = self.lxml.docinfo.doctype
self.dtd = dtd or self.lxml.docinfo.externalDTD
self.source_url = self.lxml.docinfo.URL
self.public_id = self.lxml.docinfo.public_id
self.encoding = self.lxml.docinfo.encoding
if style_validators:
self.style_validators = list(style_validators)
else:
self.style_validators = []
@classmethod
def parse(cls, file, no_doctype=False, sps_version=None,
supported_sps_versions=None, extra_sch_schemas=None, **kwargs):
"""Factory of XMLValidator instances.
If `file` is not an etree instance, it will be parsed using
:func:`packtools.utils.XML`.
If the DOCTYPE is declared, its public id is validated against a white list,
declared by :data:`ALLOWED_PUBLIC_IDS` module variable. The system id is ignored.
By default, the allowed values are:
- SciELO PS >= 1.2:
- ``-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.0 20120330//EN``
- SciELO PS 1.1:
- ``-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.0 20120330//EN``
- ``-//NLM//DTD Journal Publishing DTD v3.0 20080202//EN``
:param file: Path to the XML file, URL, etree or file-object.
:param no_doctype: (optional) if missing DOCTYPE declaration is accepted.
:param sps_version: (optional) force the style validation against a SPS version.
:param supported_sps_versions: (optional) list of supported versions. the
only way to bypass this restriction is by using the arg `sps_version`.
:param extra_sch_schemas: (optional) list of extra Schematron schemas.
"""
try:
et = utils.XML(file)
except TypeError:
# We hope it is an instance of etree.ElementTree. If it is not,
# it will fail in the next lines.
et = file
# can raise exception
sps_version = sps_version or _init_sps_version(et, supported_sps_versions)
# get the right Schematron validator based on the value of ``sps_version``
# and then mix it with the list of schemas supplied by the user.
LOGGER.info('auto-loading style validations for version "%s"', sps_version)
auto_loaded_sch_label = u'@' + sps_version
style_validators = [
SchematronValidator.from_catalog(sps_version,
label=auto_loaded_sch_label),
PyValidator(label=auto_loaded_sch_label), # the python based validation pipeline
]
if extra_sch_schemas:
style_validators += list(
iter_schematronvalidators(extra_sch_schemas))
allowed_public_ids = _get_public_ids(sps_version)
# DOCTYPE declaration must be present by default. This behaviour can
# be changed by the `no_doctype` arg.
LOGGER.info('fetching the DOCTYPE declaration')
doctype = et.docinfo.doctype
if not doctype and not no_doctype:
raise exceptions.XMLDoctypeError(
'cannot get the DOCTYPE declaration')
# if there exists a DOCTYPE declaration, ensure its PUBLIC-ID is
# supported.
LOGGER.info('fetching the PUBLIC-ID in DOCTYPE declaration')
public_id = et.docinfo.public_id
if doctype and public_id not in allowed_public_ids:
raise exceptions.XMLDoctypeError('invalid DOCTYPE public id')
return cls(et, style_validators=style_validators, **kwargs)
@property
def sps_version(self):
doc_root = self.lxml.getroot()
sps_version = doc_root.attrib.get('specific-use', None)
return sps_version
@property
def dtd_validator(self):
if self.dtd:
return DTDValidator(self.dtd)
else:
return None
@utils.cachedmethod
def validate(self):
"""Validate the source XML against JATS DTD.
Returns a tuple comprising the validation status and the errors list.
"""
if self.dtd_validator is None:
raise exceptions.UndefinedDTDError('cannot validate (DTD is not set)')
result_tuple = self.dtd_validator.validate(self.lxml)
return result_tuple
@utils.cachedmethod
def validate_style(self):
"""Validate the source XML against SPS-Style Tagging guidelines.
Returns a tuple comprising the validation status and the errors list.
"""
errors = []
for validator in self.style_validators:
LOGGER.info('running validator "%s"', repr(validator))
errors += validator.validate(self.lxml)[1]
result = not bool(errors)
return result, errors
def validate_all(self, fail_fast=False):
"""Runs all validations.
First, the XML is validated against the DTD (calling :meth:`validate`).
If no DTD is provided and the argument ``fail_fast == True``, a ``TypeError``
is raised. After that, the XML is validated against the SciELO style
(calling :meth:`validate_style`).
:param fail_fast: (optional) raise ``TypeError`` if the DTD has not been loaded.
"""
try:
v_result, v_errors = self.validate()
except exceptions.UndefinedDTDError:
if fail_fast:
raise
else:
v_result = None
v_errors = []
s_result, s_errors = self.validate_style()
val_status = False if s_result is False else v_result
val_errors = v_errors + s_errors
return val_status, val_errors
def _annotate_error(self, element, error):
"""Add an annotation prior to `element`, with `error` as the content.
The annotation is a comment added prior to `element`.
:param element: etree instance to be annotated.
:param error: string of the error.
"""
notice_element = etree.Element('SPS-ERROR')
notice_element.text = error
element.addprevious(etree.Comment('SPS-ERROR: %s' % error))
def annotate_errors(self, fail_fast=False):
"""Add notes on all elements that have errors.
The errors list is generated as the result of calling :meth:`validate_all`.
"""
status, errors = self.validate_all(fail_fast=fail_fast)
mutating_xml = deepcopy(self.lxml)
if status is True:
return mutating_xml
err_pairs = []
for error in errors:
try:
err_element = error.get_apparent_element(mutating_xml)
except ValueError:
err_element = mutating_xml.getroot()
err_pairs.append((err_element, error.message))
for el, em in err_pairs:
self._annotate_error(el, em)
return mutating_xml
def __repr__(self):
arg_names = [u'lxml', u'sps_version', u'dtd']
arg_values = [reprlib.repr(getattr(self, arg)) for arg in arg_names]
arg_names[0] = u'file'
args = zip(arg_names, arg_values)
attrib_args = (u'{}={}'.format(name, value) for name, value in args)
return '<XMLValidator object at 0x%x (%s)>' % (
id(self), u', '.join(attrib_args))
@property
def meta(self):
"""Article metadata.
"""
parsed_xml = self.lxml
xml_nodes = {
"journal_title": "front/journal-meta/journal-title-group/journal-title",
"journal_eissn": "front/journal-meta/issn[@pub-type='epub']",
"journal_pissn": "front/journal-meta/issn[@pub-type='ppub']",
"article_title": "front/article-meta/title-group/article-title",
"issue_year": "front/article-meta/pub-date/year",
"issue_volume": "front/article-meta/volume",
"issue_number": "front/article-meta/issue",
}
metadata = {'filename': self.source_url}
for node_k, node_v in xml_nodes.items():
node = parsed_xml.find(node_v)
metadata[node_k] = getattr(node, 'text', None)
return metadata
@property
def assets(self):
"""Lists all static assets referenced by the XML.
"""
return utils.get_static_assets(self.lxml)
def lookup_assets(self, base):
"""Look for each asset in `base`, and returns a list of tuples
with the asset name and its presence status.
:param base: any container that implements membership tests, i.e.
it must support the ``in`` operator.
"""
return [(asset, asset in base) for asset in self.assets]
class HTMLGenerator(object):
"""Adapter that generates HTML from SPS XML.
Basic usage:
.. code-block:: python
from lxml import etree
xml = etree.parse('valid-sps-file.xml')
generator = HTMLGenerator(xml)
html = generator.generate('pt')
html_string = etree.tostring(html, encoding='unicode', method='html')
:param file: etree._ElementTree instance.
:param xslt: (optional) etree.XSLT instance. If not provided, the default XSLT is used.
:param css: (optional) URI for a CSS file.
"""
def __init__(self, file, xslt=None, css=None, print_css=None, js=None, permlink=None, url_article_page=None, url_download_ris=None):
assert isinstance(file, etree._ElementTree)
self.lxml = file
self.xslt = xslt or XSLT('root-html-2.0.xslt')
self.css = css
self.print_css = print_css
self.js = js
self.permlink = permlink
self.url_article_page = url_article_page
self.url_download_ris = url_download_ris
@classmethod
def parse(cls, file, valid_only=True, **kwargs):
"""Factory of HTMLGenerator instances.
If `file` is not an etree instance, it will be parsed using
:func:`XML`.
:param file: Path to the XML file, URL, etree or file-object.
:param valid_only: (optional) prevents the generation of HTML for invalid XMLs.
"""
if isinstance(file, etree._ElementTree):
et = file
else:
et = utils.XML(file)
if valid_only:
is_valid, _ = XMLValidator.parse(et).validate_all()
if not is_valid:
raise ValueError('invalid XML')
return cls(et, **kwargs)
@property
def languages(self):
"""The language of the main document plus all translations.
"""
return self.lxml.xpath(
'/article/@xml:lang | //sub-article[@article-type="translation"]/@xml:lang')
@property
def language(self):
"""The language of the main document.
"""
try:
return self.lxml.xpath('/article/@xml:lang')[0]
except IndexError:
return None
def _is_aop(self):
""" Has the document been published ahead-of-print?
"""
volume = self.lxml.findtext('front/article-meta/volume')
number = self.lxml.findtext('front/article-meta/issue')
return volume == '00' and number == '00'
def _get_issue_label(self):
volume = self.lxml.findtext('front/article-meta/volume')
number = self.lxml.findtext('front/article-meta/issue')
return 'vol.%s n.%s' % (volume, number)
def _get_bibliographic_legend(self):
return '[#BIBLIOGRAPHIC LEGEND#]'
issue = 'ahead of print' if self._is_aop() else self._get_issue_label()
abrev_title = self.lxml.findtext(
'front/journal-meta/journal-title-group/abbrev-journal-title[@abbrev-type="publisher"]')
city = '[#CITY#]'
pubdate = self.lxml.xpath(
'/article/front/article-meta/pub-date[@pub-type="epub-ppub" or @pub-type="epub"][1]')[0]
pubtype = 'Epub' if pubdate.xpath('@pub-type')[0] == 'epub' else ''
day = pubdate.findtext('day')
month = pubdate.findtext('month')
year = pubdate.findtext('year')
dates = ' '.join([month, year]) if month else year
parts = [abrev_title, issue, city, pubtype, dates]
return ' '.join([part for part in parts if part])
def __iter__(self):
"""Iterates thru all languages and generates the HTML for each one.
"""
for lang in self.languages:
res_html = self.generate(lang)
yield lang, res_html
def generate(self, lang):
"""Generates the HTML in the language ``lang``.
:param lang: 2-digit ISO 639-1 text string.
"""
main_language = self.language
if main_language is None:
raise exceptions.HTMLGenerationError('main document language is '
'undefined.')
if lang not in self.languages:
raise ValueError('unrecognized language: "%s"' % lang)
is_translation = lang != main_language
return self.xslt(
self.lxml,
article_lang=etree.XSLT.strparam(lang),
is_translation=etree.XSLT.strparam(str(is_translation)),
bibliographic_legend=etree.XSLT.strparam(self._get_bibliographic_legend()),
issue_label=etree.XSLT.strparam(self._get_issue_label()),
styles_css_path=etree.XSLT.strparam(self.css or ''),
print_styles_css_path=etree.XSLT.strparam(self.print_css or ''),
js_path=etree.XSLT.strparam(self.js or ''),
permlink=etree.XSLT.strparam(self.permlink or ''),
url_article_page=etree.XSLT.strparam(self.url_article_page or ''),
url_download_ris=etree.XSLT.strparam(self.url_download_ris or ''),
)
|
bsd-2-clause
| -8,368,828,022,911,303,000 | 33.9046 | 136 | 0.61145 | false |
datamade/yournextmp-popit
|
candidates/tests/test_revert.py
|
1
|
8163
|
import json
from mock import patch
from string import Template
from django.db.models import F
from django_webtest import WebTest
from popolo.models import Identifier
from candidates.models import MembershipExtra, PersonExtra
from .auth import TestUserMixin
from . import factories
example_timestamp = '2014-09-29T10:11:59.216159'
example_version_id = '5aa6418325c1a0bb'
# FIXME: add a test to check that unauthorized people can't revert
class TestRevertPersonView(TestUserMixin, WebTest):
version_template = Template('''[
{
"username": "symroe",
"information_source": "Just adding example data",
"ip": "127.0.0.1",
"version_id": "35ec2d5821176ccc",
"timestamp": "2014-10-28T14:32:36.835429",
"data": {
"name": "Tessa Jowell",
"id": "2009",
"twitter_username": "",
"standing_in": {
"2010": {
"post_id": "65808",
"name": "Dulwich and West Norwood",
"mapit_url": "http://mapit.mysociety.org/area/65808"
},
"2015": {
"post_id": "65808",
"name": "Dulwich and West Norwood",
"mapit_url": "http://mapit.mysociety.org/area/65808"
}
},
"homepage_url": "",
"birth_date": null,
"wikipedia_url": "https://en.wikipedia.org/wiki/Tessa_Jowell",
"party_memberships": {
"2010": {
"id": "$slug",
"name": "Labour Party"
},
"2015": {
"id": "$slug",
"name": "Labour Party"
}
},
"email": "jowell@example.com"
}
},
{
"username": "mark",
"information_source": "An initial version",
"ip": "127.0.0.1",
"version_id": "5469de7db0cbd155",
"timestamp": "2014-10-01T15:12:34.732426",
"data": {
"name": "Tessa Jowell",
"id": "2009",
"twitter_username": "",
"standing_in": {
"2010": {
"post_id": "65808",
"name": "Dulwich and West Norwood",
"mapit_url": "http://mapit.mysociety.org/area/65808"
}
},
"homepage_url": "http://example.org/tessajowell",
"birth_date": "1947-09-17",
"wikipedia_url": "",
"party_memberships": {
"2010": {
"id": "$slug",
"name": "Labour Party"
}
},
"email": "tessa.jowell@example.com"
}
}
]
''')
def setUp(self):
wmc_area_type = factories.AreaTypeFactory.create()
gb_parties = factories.PartySetFactory.create(
slug='gb', name='Great Britain'
)
election = factories.ElectionFactory.create(
slug='2015',
name='2015 General Election',
area_types=(wmc_area_type,)
)
earlier_election = factories.EarlierElectionFactory.create(
slug='2010',
name='2010 General Election',
area_types=(wmc_area_type,)
)
commons = factories.ParliamentaryChamberFactory.create()
post_extra = factories.PostExtraFactory.create(
elections=(election, earlier_election),
base__organization=commons,
slug='65808',
base__label='Member of Parliament for Dulwich and West Norwood',
party_set=gb_parties,
)
factories.PartyFactory.reset_sequence()
party_extra = factories.PartyExtraFactory.create()
self.party_slug = party_extra.slug
person_extra = factories.PersonExtraFactory.create(
base__id=2009,
base__name='Tessa Jowell',
base__email='jowell@example.com',
versions=self.version_template.substitute(slug=self.party_slug)
)
person_extra.base.links.create(
url='',
note='wikipedia',
)
gb_parties.parties.add(party_extra.base)
factories.CandidacyExtraFactory.create(
election=election,
base__person=person_extra.base,
base__post=post_extra.base,
base__on_behalf_of=party_extra.base
)
factories.CandidacyExtraFactory.create(
election=earlier_election,
base__person=person_extra.base,
base__post=post_extra.base,
base__on_behalf_of=party_extra.base
)
@patch('candidates.views.version_data.get_current_timestamp')
@patch('candidates.views.version_data.create_version_id')
def test_revert_to_earlier_version(
self,
mock_create_version_id,
mock_get_current_timestamp,
):
mock_get_current_timestamp.return_value = example_timestamp
mock_create_version_id.return_value = example_version_id
response = self.app.get('/person/2009/update', user=self.user)
revert_form = response.forms['revert-form-5469de7db0cbd155']
revert_form['source'] = 'Reverting to version 5469de7db0cbd155 for testing purposes'
response = revert_form.submit()
self.assertEqual(response.status_code, 302)
self.assertEqual(response.location, 'http://localhost:80/person/2009')
# Now get the person from the database and check if the
# details are the same as the earlier version:
person_extra = PersonExtra.objects.get(base__id=2009)
# First check that a new version has been created:
new_versions = json.loads(person_extra.versions)
self.maxDiff = None
expected_new_version = {
'data': {
'facebook_page_url': '',
'facebook_personal_url': '',
'name': u'Tessa Jowell',
'honorific_suffix': '',
'party_ppc_page_url': '',
'gender': '',
'image': None,
'linkedin_url': '',
'id': u'2009',
'other_names': [],
'honorific_prefix': '',
'standing_in': {
u'2010':
{
u'post_id': u'65808',
u'name': u'Dulwich and West Norwood',
}
},
'homepage_url': 'http://example.org/tessajowell',
'twitter_username': '',
'wikipedia_url': '',
'party_memberships': {
u'2010': {
u'id': unicode(self.party_slug),
u'name': u'Labour Party'
}
},
'birth_date': '1947-09-17',
'email': u'tessa.jowell@example.com'
},
'information_source': u'Reverting to version 5469de7db0cbd155 for testing purposes',
'timestamp': '2014-09-29T10:11:59.216159',
'username': u'john',
'version_id': '5aa6418325c1a0bb'
}
self.assertEqual(new_versions[0], expected_new_version)
self.assertEqual(person_extra.base.birth_date, '1947-09-17')
self.assertEqual(person_extra.homepage_url, 'http://example.org/tessajowell')
candidacies = MembershipExtra.objects.filter(
base__person=person_extra.base,
base__role=F('election__candidate_membership_role')
).order_by('election__election_date')
self.assertEqual(len(candidacies), 1)
self.assertEqual(candidacies[0].election.slug, '2010')
# The homepage link should have been added and the Wikipedia
# one removed:
self.assertEqual(1, person_extra.base.links.count())
remaining_link = person_extra.base.links.first()
self.assertEqual(remaining_link.note, 'homepage')
|
agpl-3.0
| 2,455,165,964,185,914,400 | 35.28 | 96 | 0.515252 | false |
hoondongkim/syntaxnet-kr
|
SJtoUD_POS.py
|
1
|
10446
|
import re
import codecs
import os
"""
[1] Main Function
"""
def main():
directory = os.getcwd() + '/InputDataType1'
filename = 'BTHO0437.txt'
filename = os.path.join(directory, filename)
f = open(filename, 'r', encoding='utf-16')
is_inside = False
line_counter = 0
OUT_FILENAME = "OutputDataType1\kr-ud-dev.conllu"
with codecs.open(OUT_FILENAME, "w", "utf-8") as file:
"""
ํ ์ค์ ์ฝ์ ๋๋ง๋ค ๊ทธ ๊ฒฐ๊ณผ๋ฅผ sniparray, posarray์ ์ ์ฅํฉ๋๋ค.
ํ ์ค์์ ํํ์๋ฅผ ์ฝ์ ๋๋ง๋ค ๋ฐ๋ก๋ฐ๋ก ์ถ๋ ฅํ์ง ์๊ณ
๋ค ์ฝ๊ณ ๋ ๋ค์ ๊ทธ ๊ฒฐ๊ณผ๋ฅผ ์ถ๋ ฅํ๋๋ฐ ๊ทธ ์ด์ ๋ ๋ค์๊ณผ ๊ฐ์ต๋๋ค.
'์ฎ๊ฒจ์ก๋ค.' ์ ๊ฒฝ์ฐ
์ฎ๊ธฐ/VV + ์ด/EC + ์ง/VX + ์/EP + ๋ค/EF + ./SF
๋ก ๋ถ๋ฆฌ๋์ด ์ ๋จ์ด(์ฎ๊ฒจ์ก๋ค)์์ ์ค์ ๋ก ๋ณด์กด๋ ํํ์๋ '๋ค', '.'๋ง ๋จ๊ฒ ๋๋ฏ๋ก
์ฎ๊ธฐ/VV + ์ด/EC + ์ง/VX + ์/EP + ๋ค/EF + ./SF
๋ก ๋ถ๋ฆฌํ๋ ๋์
์ฎ๊ฒจ์ก/VV + EC + VX + EP + ๋ค/EF + ./SF
๋ก ๋ถ๋ฆฌํฉ๋๋ค.
์ด ๋ ์ ๋จ์ด๊ฐ ํํ์ ๊ธฐ๋ณธํ๋ค์ ์กฐํฉ์ธ์ง ์๋๋ฉด ๋ณํ๋์๋์ง๋ฅผ ํ์
ํ๊ธฐ ์ํด
๋ฒํผ๋ฅผ ์ฌ์ฉํ์ต๋๋ค (snipbuffer, posbuffer)
์ด์ ๋ฐ๋ผ ๊ฐ ์ฝ์ด๋ค์ผ ๋์ ์ํฉ์ 4๊ฐ์ง๋ก ๊ตฌ๋ถํ๊ณ
ํ์ฌ ์ฝ๊ณ ์๋ ์์น๋ฅผ ํ์
ํ๊ธฐ ์ํด wordcount๋ฅผ ๋์
ํ๊ณ
๋ฌธ์ฅ์ ๋์ธ์ง๋ฅผ ํ์
ํ๊ธฐ ์ํด end_of_sequence ๋ฅผ ๋์
ํ์ต๋๋ค.
"""
sniparray = []
posarray = []
for line in f:
#print (line)
#break
chomped_line = line.rstrip()
if chomped_line == "<p>":
pass
# is_inside = True
#print ("inside")
elif chomped_line == "</p>":
#print()
# is_inside = False
for i in range (0, len(sniparray)):
print (i+1,"\t", "".join(sniparray[i]),"\t", "+".join(posarray[i]))
sniparray = []
posarray = []
else:
m1 = re.match('^([^\t]+)\t([^\t]+)\t([^\t]+)$', chomped_line)
if m1:
#print ("linenumber", m1.group(1))
#print ("word", m1.group(2))
word = m1.group(2)
#print ("parsed", m1.group(3))
parsed = m1.group(3)
snip_pairs = re.split(' \+ ', parsed) # +sign needs to be escaped in regex #๋์ง/VV + ์ด/EC
# split_word = m1.group(2)
snip_pairs_2d = []
for snip_pair in snip_pairs:
# line_counter += 1
# print ("snip_pair = ", snip_pair) #๋์ง/VV
m2 = re.match('^([^\/]+)\/([^\/]+)$', snip_pair)
if m2:
snip = m2.group(1)
pos = m2.group(2)
#print ("line", line_counter)
#print ("snip", snip)
#print ("pos", pos)
#print (line_counter,"\t",snip,"\t",pos)
snip_pairs_2d.append([snip, pos])
#print (snip_pairs_2d)
#print (word)
buffer_start = 0
bufer_end = len(snip_pairs_2d)-1
snipbuffer = []
posbuffer = []
word = list(word)
#print(word)
word_counter = 0
end_of_sequence = False
buffer = False
for snip_pair in snip_pairs_2d:
if snip_pairs_2d[-1] == snip_pair:
end_of_sequence = True
# 4 cases
# 1) if snippet is inside the word & no buffer
# 2) if snippet is inside the word & there is buffer
# 3) if snippet is NOT inside the word & no buffer
# 4) if snippet is NOT inside the word & there is buffer
# 1) if snippet is inside the word & no buffer
# => Print current word
if (snip_pair[0] in word[word_counter:]) and (buffer == False):
# print(1)
sniparray.append([snip_pair[0]])
posarray.append([snip_pair[1]])
buffer_start += len(snip_pair[0])
buffer = False
word_counter +=1
# 2) if snippet is inside the word & there is buffer
# => Print Buffer and Print current word
elif (snip_pair[0] in word[word_counter:]) and (buffer == True):
# print(2)
#print("Where is corresponding word:" word.index(snip_pair[0]))
buffer_end = word.index(snip_pair[0])
snipbuffer = word[buffer_start:buffer_end]
sniparray.append(snipbuffer)
posarray.append(posbuffer)
buffer_start +=len(snip_pair[0])
sniparray.append([snip_pair[0]])
posarray.append([snip_pair[1]])
buffer = False
word_counter +=1
# 3) if snippet is NOT inside the word & no buffer
# if End of Sequence => Print current word
# if not end of sequence => Do Not Print Buffer, Buffer Start
elif not (snip_pair[0] in word[word_counter:]) and (buffer == False):
if end_of_sequence == True:
# print("3-1")
# Print Current word(=remaining part in the 'word')
snipbuffer = word[buffer_start:]
sniparray.append(snipbuffer)
posarray.append([snip_pair[1]])
word_counter +=1
else:
# print("3-2")
# Buffer Start!
# snip buffer will be formed right before when buffer is eliminated
# just don't change buffer_start
posbuffer=[]
posbuffer.append(snip_pair[1])
buffer = True
word_counter +=1
# 4) if snippet is NOT inside the word & there is buffer
# if End of Sequence => Print Buffer and print current word
# if not end of sequence => Add buffer
else:
if end_of_sequence == True:
# print("4-1")
# Print Buffer and print current word
# buffer_end = len(word)-1
snipbuffer = word[buffer_start:]
sniparray.append(snipbuffer)
posbuffer.append(snip_pair[1])
posarray.append(posbuffer)
word_counter +=1
else:
# print("4-2")
# Add buffer
posbuffer.append(snip_pair[1])
word_counter +=1
if end_of_sequence == True:
continue
"""
์๋ General Function, CoNLL-U Function ์ ๊ฑด๋๋ฆฌ์ง ์์์ต๋๋ค.
์ด๋ป๊ฒ ํฉ์น ์ ์์๊น์?
"""
"""
[2] General Function
"""
def retOutputStr(line_counter, snip, pos):
returnStr = str(line_counter) + "\t" + getFormStr(snip) + "\t" + getLemmaStr(snip) + "\t" + getUpostagStr(pos) + "\t" + \
getXpostagStr(pos) + "\t" + getFeatsStr(pos) + "\t" + getHeadStr(pos) + "\t" + getDeprelStr(pos) + "\t" + \
getDepsStr(pos)
return returnStr
def doPrintAndWrite(line_counter, snip, pos, file):
outputStr = retOutputStr(line_counter, snip, pos)
print(outputStr)
file.write(outputStr + "\r\n")
"""
[3] CoNLL-U Format Function
1.ID: Word index, integer starting at 1 for each new sentence; may be a range for tokens with multiple words.
2.FORM: Word form or punctuation symbol.
3.LEMMA: Lemma or stem of word form.
4.UPOSTAG: Universal part-of-speech tag drawn from our revised version of the Google universal POS tags.
5.XPOSTAG: Language-specific part-of-speech tag; underscore if not available.
6.FEATS: List of morphological features from the universal feature inventory or from a defined language-specific extension; underscore if not available.
7.HEAD: Head of the current token, which is either a value of ID or zero (0).
8.DEPREL: Universal Stanford dependency relation to the HEAD (root iff HEAD = 0) or a defined language-specific subtype of one.
9.DEPS: List of secondary dependencies (head-deprel pairs). i.MISC: Any other annotation.
"""
# 2.FORM - FORM ์ด ํ์๋ ์๋. Japna ์ฐธ๊ณ . ํ์ฌ snip_pairs ์ ์ํด ํ ํ์ด ๋ณต์ ํ์ผ๋ก ๋ถ๋ฆฌ ๋๋ฏ๋ก, Form ์ ๋น์๋์์.
def getFormStr(snip):
return "_"
# 3.LEMMA
def getLemmaStr(snip):
return snip
# 4.UPOSTAG - ์ด ๋ถ๋ถ ์ข๋ Dictionary ์ ๋งตํ ๊ท์น์ ๋ณด์ถฉํด ๋ฃ์ด์ผ ํจ.
def getUpostagStr(pos):
tagDic = dict()
tagDic['NNG'] = 'NOUN'
tagDic['VV'] = 'VERB'
tagDic['MM'] = 'DET'
tagDic['SF'] = 'PUNCT'
if pos in tagDic.keys():
return tagDic[pos]
else :
return pos
# 5.XPOSTAG
def getXpostagStr(pos):
return pos
# 6.FEATS
def getFeatsStr(pos):
return "_"
# 7.HEAD
def getHeadStr(pos):
return "_"
# 8.DEPREL
def getDeprelStr(pos):
return "_"
# 9.DEPS
def getDepsStr(pos):
return "_"
"""
[4] Main Function Call
"""
if __name__ == "__main__":
main()
|
apache-2.0
| -8,477,834,371,595,856,000 | 35.221402 | 152 | 0.45487 | false |
h3rucutu/weevely-old
|
core/vector.py
|
1
|
1305
|
import types
class VectorList(list):
def get_vectors_by_interpreters(self, shells):
vect=[]
for v in self:
if v.interpreter in shells:
vect.append(v)
return vect
def get_vector_by_name(self, name):
for v in self:
if v.name == name:
return v
def get_names_list(self):
return [v.name for v in self]
def order(self, names):
sorted = [v for v in self for n in names if n == v.name]
print sorted
def __repr__(self):
for v in self:
print v
class Vector:
def __init__(self, interpreter, name, payloads):
self.interpreter = interpreter
self.name = name
self.payloads = {}
if isinstance(payloads, types.DictionaryType):
self.payloads = payloads
elif isinstance(payloads, types.ListType):
pos = 0
for p in payloads:
self.payloads[pos] = p
pos+=1
elif isinstance (payloads, types.StringTypes):
self.payloads[0] = payloads
else:
print "[!] Error declaring attack vector"
def __repr__(self):
return '[%s, %s, %s]' % (self.name,self.interpreter, self.payloads)
|
gpl-3.0
| 1,101,729,514,562,507,800 | 25.632653 | 76 | 0.524904 | false |
ianastewart/cwltc-admin
|
members/application_processor.py
|
1
|
3702
|
# Handle application process
from datetime import datetime
from members.models import Person, Membership, Subscription, TextBlock, AdultApplication
from members.services import (
subscription_create,
subscription_activate,
subscription_create_invoiceitems,
invoice_create,
)
from cardless.services import latest_active_mandate
from members.mail import create_draft_invoice_mail_task
from members.tasks import schedule_mail_task, schedule_invoice_payment_task
from members.views.invoice_views import create_invoice_payment_task
class ApplicationProcesser:
def __init__(self, person, request):
self.sub_year, self.start_date, self.end_date = Subscription.default_dates()
self.person = person
self.request = request
def process(self):
while self.person.linked:
self.person = self.person.linked
if self.person.membership_id is None:
self.person.state = Person.ACTIVE
self.person.save()
self._create_children_subs()
elif self.person.membership_id == Membership.PARENT:
self._create_sub(self.person)
self._create_children_subs()
elif self.person.membership_id == Membership.OFF_PEAK:
self._create_sub(self.person)
self._create_children_subs()
# elif self.person.membership_id == Membership.FULL:
# profile = self.person.adultapplication_set.all()[0]
# if profile.ability == AdultApplication.BEGINNER:
# beginner = True
# elif profile.ability < AdultApplication.INTERMEDIATE:
# lessons = True
# play_in = True
# else:
# play_in = True
else:
return False, "Unknown membership category"
invoice = invoice_create(self.person, self.sub_year, datetime.now().date())
invoice.tags.add("New member")
if invoice is None:
return False, "No invoice created"
message = f"Invoice {invoice.id} for {self.person.fullname} created "
templates = TextBlock.objects.filter(type=TextBlock.INVOICE, name="New member")
if len(templates) != 1:
message += " but no invoice template"
return False, message
mail_task = create_draft_invoice_mail_task(self.request, [invoice.id], templates[0])
mail_task.save()
schedule_mail_task(mail_task.id)
message += "and mailed "
if invoice.person.cardless_id:
mandate = latest_active_mandate(invoice.person.cardless_id)
if mandate and not mandate.payments_require_approval:
create_invoice_payment_task([invoice.id])
message += "and scheduled for automatic payment"
return True, message
def _create_children_subs(self):
for child in self.person.person_set.all():
self._create_sub(child)
def _create_sub(self, person):
""" Create active sub with invoice item"""
if person.state == Person.APPLIED:
person.state = Person.ACTIVE
person.save()
if person.membership_id:
sub = subscription_create(
person,
sub_year=self.sub_year,
start_month=self.start_date.month,
end_month=self.end_date.month,
membership_id=person.membership_id,
period=Subscription.ANNUAL,
new_member=True,
age_list=None,
)
subscription_activate(sub)
subscription_create_invoiceitems(sub, sub.start_date.month)
|
mit
| 7,603,111,895,722,632,000 | 36.77551 | 92 | 0.606699 | false |
iitis/PyLTEs
|
pyltes/network.py
|
1
|
9084
|
__author__ = 'Mariusz Slabicki, Konrad Polys'
from pyltes import devices
from pyltes import generator
from pyltes import printer
import math
import random
import pickle
import copy
class CellularNetwork:
"""Class describing cellular network"""
def __init__(self):
self.ue = []
self.bs = []
self.obstacles = []
self.constraintAreaMaxX = []
self.constraintAreaMaxY = []
self.radius = []
self.minTxPower = 10
self.maxTxPower = 40
self.minFemtoTxPower = 3
self.maxFemtoTxPower = 10
self.optimizationFunctionResults = None
self.Generator = generator.Generator(self)
self.Printer = printer.Printer(self)
self.powerConfigurator = []
self.colorConfigurator = []
def loadPowerConfigurator(self):
from pyltes import powerConfigurator
self.powerConfigurator = powerConfigurator.pygmoPowerConfigurator(self)
def loadColorConfigurator(self):
from modules import colorConfigurator
self.colorConfigurator = colorConfigurator.pygmoColorConfigurator(self)
def saveNetworkToFile(self, filename):
with open(filename+".pnf", 'wb') as f:
pickle.dump(self, f)
@classmethod
def loadNetworkFromFile(cls, filename):
with open(filename+".pnf", 'rb') as f:
return pickle.load(f)
def addOneBSTower(self, x_pos, y_pos, omnidirectional = False):
if omnidirectional == False:
for i in range(3):
bs = devices.BS()
bs.x = x_pos
bs.y = y_pos
bs.insidePower = 37
bs.outsidePower = 40
bs.angle = i * 120
bs.ID = len(self.bs)
bs.turnedOn = True
self.bs.append(copy.deepcopy(bs))
def printPowersInBS(self):
powers = []
for bs in self.bs:
powers.append(bs.outsidePower)
print(powers)
def connectUsersToNearestBS(self):
for ue in self.ue:
ue.connectToNearestBS(self.bs)
def connectUsersToTheBestBS(self):
for ue in self.ue:
ue.connectToTheBestBS(self.bs)
def setPowerInAllBS(self, outsidePowerLevel, insidePowerLevel=None):
if (insidePowerLevel==None):
insidePowerLevel = outsidePowerLevel - 3
for bs in self.bs:
if bs.useSFR:
bs.insidePower = insidePowerLevel
bs.outsidePower = outsidePowerLevel
else:
bs.insidePower = outsidePowerLevel
bs.outsidePower = outsidePowerLevel
def setRandomPowerInAllBS(self, powerLevel):
for bs in self.bs:
if bs.useSFR:
bs.insidePower = random.randint(0, powerLevel) - 3
bs.outsidePower = bs.insidePower + 3
else:
bs.outsidePower = random.randint(0, powerLevel)
bs.insidePower = bs.outsidePower
def setSmallestPossiblePowerInAllBS(self):
for bs in self.bs:
if bs.type == "MakroCell":
if bs.useSFR:
bs.insidePower = self.minTxPower - 3
bs.outsidePower = self.minTxPower
else:
bs.insidePower = self.minTxPower
bs.outsidePower = self.minTxPower
if bs.type == "FemtoCell":
bs.power == self.minFemtoTxPower
def setHighestPossiblePowerInAllBS(self):
for bs in self.bs:
if bs.type == "MakroCell":
bs.outsidePower = self.maxTxPower
if bs.type == "FemtoCell":
bs.power == self.maxFemtoTxPower
def setMiInAllBS(self, mi):
for bs in self.bs:
bs.mi = mi
def setColorRandomlyInAllBS(self):
for bs in self.bs:
bs.color = random.randint(1,3)
def setColorInAllBS(self, color):
for bs in self.bs:
bs.color = color
def getColorInAllBS(self):
for bs in self.bs:
print(bs.ID, bs.color)
def setColorInBS(self, bs, color):
self.bs[bs].color = color
def setRcInAllBS(self, Rc):
for bs in self.bs:
bs.Rc = Rc
def calculateSINRVectorForAllUE(self):
temp_measured_vector = []
for ue in self.ue:
for bs in self.bs:
if bs.ID == ue.connectedToBS:
calculatedSINR = ue.calculateSINR(self.bs)
temp_measured_vector.append(calculatedSINR)
return temp_measured_vector
def returnRealUEThroughputVectorRR(self):
numberOfConnectedUEToBS = []
max_UE_throughput_vector = []
real_UE_throughput_vector = []
for i in range(len(self.bs)):
numberOfConnectedUEToBS.append([0,0])
for ue in self.ue:
max_UE_throughput = ue.calculateMaxThroughputOfTheNode(self.bs) # need to be first to know where UE is
if (ue.inside):
numberOfConnectedUEToBS[ue.connectedToBS][0] += 1
else:
numberOfConnectedUEToBS[ue.connectedToBS][1] += 1
max_UE_throughput_vector.append(max_UE_throughput)
real_UE_throughput_vector.append(max_UE_throughput)
for i in range(len(self.ue)):
if (self.ue[i].inside):
real_UE_throughput_vector[i] = max_UE_throughput_vector[i] / numberOfConnectedUEToBS[self.ue[i].connectedToBS][0]
else:
real_UE_throughput_vector[i] = max_UE_throughput_vector[i] / numberOfConnectedUEToBS[self.ue[i].connectedToBS][1]
return real_UE_throughput_vector
def returnRealUEThroughputVectorFS(self):
sumOfInvThroughputPerBS = []
real_UE_throughput_vector = []
for i in range(len(self.bs)):
sumOfInvThroughputPerBS.append([0,0])
for ue in self.ue:
ue_throughput = ue.calculateMaxThroughputOfTheNode(self.bs)
if ue_throughput == 0:
if (ue.inside):
sumOfInvThroughputPerBS[ue.connectedToBS][0] += 1
else:
sumOfInvThroughputPerBS[ue.connectedToBS][1] += 1
else:
if (ue.inside):
sumOfInvThroughputPerBS[ue.connectedToBS][0] += 1.0 / ue_throughput
else:
sumOfInvThroughputPerBS[ue.connectedToBS][1] += 1.0 / ue_throughput
for ue in self.ue:
ue_throughput = ue.calculateMaxThroughputOfTheNode(self.bs)
if ue_throughput == 0:
if (ue.inside):
weight = 1.0 / sumOfInvThroughputPerBS[ue.connectedToBS][0]
else:
weight = 1.0 / sumOfInvThroughputPerBS[ue.connectedToBS][1]
else:
if (ue.inside):
weight = ((1.0 / ue_throughput) / sumOfInvThroughputPerBS[ue.connectedToBS][0])
else:
weight = ((1.0 / ue_throughput) / sumOfInvThroughputPerBS[ue.connectedToBS][1])
real_UE_throughput_vector.append(weight * ue_throughput)
return real_UE_throughput_vector
def returnNumberOfUEperBS(self):
numberOfConnectedUEToBS = []
for i in range(len(self.bs)):
zero = 0
numberOfConnectedUEToBS.append(zero)
for ue in self.ue:
numberOfConnectedUEToBS[ue.connectedToBS] += 1
return numberOfConnectedUEToBS
def returnAllBSinRange(self, x, y, txrange):
choosen_BS_vector = []
for bs in self.bs:
if math.sqrt((x-bs.x)**2+(y-bs.y)**2) <= txrange:
choosen_BS_vector.append(copy.deepcopy(bs.ID))
return choosen_BS_vector
def returnSumOfThroughput(self, bsnumber, step):
ue = devices.UE()
sumOfInternalThroughput = 0
internalBS = 0
sumOfExternalThroughput = 0
externalBS = 0
for x in range(0, round(self.constraintAreaMaxX), step):
for y in range(0, round(self.constraintAreaMaxY), step):
ue.x = x
ue.y = y
ue.connectToNearestBS(self.bs)
if ue.connectedToBS == bsnumber:
#if ue.distanceToBS(self.bs[bsnumber]) < self.bs[bsnumber].mi * self.bs[bsnumber].Rc:
if ue.inside:
sumOfInternalThroughput = sumOfInternalThroughput + ue.calculateMaxThroughputOfTheNode(self.bs)
internalBS = internalBS + 1
else:
sumOfExternalThroughput = sumOfExternalThroughput + ue.calculateMaxThroughputOfTheNode(self.bs)
externalBS = externalBS + 1
if externalBS != 0:
sumOfExternalThroughput = sumOfExternalThroughput/externalBS
if internalBS != 0:
sumOfInternalThroughput = sumOfInternalThroughput/internalBS
sumOfThroughput = sumOfExternalThroughput + sumOfInternalThroughput
return sumOfThroughput
|
mit
| -4,915,131,659,041,831,000 | 36.85 | 129 | 0.57882 | false |
hknyldz/pisitools
|
pisilinux/pisilinux/db/filesldb.py
|
1
|
2686
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014, Marcin Bojara
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import os
import re
import plyvel
import hashlib
import gettext
__trans = gettext.translation('pisilinux', fallback=True)
_ = __trans.ugettext
import pisilinux
import pisilinux.context as ctx
class FilesLDB ():
def __init__(self):
self.files_ldb_path = os.path.join(ctx.config.info_dir(), ctx.const.files_ldb)
self.filesdb = plyvel.DB(self.files_ldb_path, create_if_missing=True)
if not [f for f in os.listdir(self.files_ldb_path) if f.endswith('.ldb')]:
if ctx.comar: self.destroy()
self.create_filesdb()
def __del__(self):
self.close()
def create_filesdb(self):
ctx.ui.info(pisilinux.util.colorize(_('Creating files database...'), 'green'))
installdb = pisilinux.db.installdb.InstallDB()
for pkg in installdb.list_installed():
ctx.ui.info(_('Adding \'%s\' to db... ') % pkg, noln=True)
files = installdb.get_files(pkg)
self.add_files(pkg, files)
ctx.ui.info(_('OK.'))
ctx.ui.info(pisilinux.util.colorize(_('done.'), 'green'))
def get_file(self, path):
return self.filesdb.get(hashlib.md5(path).digest()), path
def search_file(self, term):
pkg, path = self.get_file(term)
if pkg:
return [(pkg,[path])]
installdb = pisilinux.db.installdb.InstallDB()
found = []
for pkg in installdb.list_installed():
files_xml = open(os.path.join(installdb.package_path(pkg), ctx.const.files_xml)).read()
paths = re.compile('<Path>(.*?%s.*?)</Path>' % re.escape(term), re.I).findall(files_xml)
if paths:
found.append((pkg, paths))
return found
def add_files(self, pkg, files):
for f in files.list:
self.filesdb.put(hashlib.md5(f.path).digest(), pkg)
def remove_files(self, files):
for f in files:
self.filesdb.delete(hashlib.md5(f.path).digest())
def destroy(self):
ctx.ui.info(pisilinux.util.colorize(_('Cleaning files database folder... '), 'green'), noln=True)
for f in os.listdir(self.files_ldb_path): os.unlink(os.path.join(self.files_ldb_path, f))
ctx.ui.info(pisilinux.util.colorize(_('done.'), 'green'))
def close(self):
if not self.filesdb.closed: self.filesdb.close()
|
gpl-3.0
| -1,854,080,393,484,700,000 | 33.883117 | 105 | 0.616902 | false |
mouton5000/DiscreteEventApplicationEditor
|
test/testsArithmeticExpressions/MathFunctions/testATan.py
|
1
|
2813
|
__author__ = 'mouton'
from triggerExpressions import Evaluation
from unittest import TestCase
from math import pi, sqrt, atan
from arithmeticExpressions import ALitteral, Func, UndefinedLitteral, SelfLitteral
from database import Variable
class TestAtan(TestCase):
@classmethod
def setUpClass(cls):
import grammar.grammars
grammar.grammars.compileGrammars()
def setUp(self):
self.eval1 = Evaluation()
self.eval2 = Evaluation()
self.eval2[Variable('X')] = pi
self.eval2[Variable('T')] = 'abc'
self.eval2[Variable('Z')] = 12.0
def test_integer_atan_with_empty_evaluation(self):
a1 = ALitteral(1)
expr = Func(a1, atan)
self.assertEqual(expr.value(self.eval1), atan(1))
def test_integer_atan_with_non_empty_evaluation(self):
a1 = ALitteral(1)
expr = Func(a1, atan)
self.assertEqual(expr.value(self.eval2), atan(1))
def test_float_atan_with_empty_evaluation(self):
a1 = ALitteral(pi)
expr = Func(a1, atan)
self.assertEqual(expr.value(self.eval1), atan(pi))
def test_float_atan_with_non_empty_evaluation(self):
a1 = ALitteral(pi)
expr = Func(a1, atan)
self.assertEqual(expr.value(self.eval2), atan(pi))
def test_string_atan_with_empty_evaluation(self):
a1 = ALitteral('abc')
expr = Func(a1, atan)
with self.assertRaises(TypeError):
expr.value(self.eval1)
def test_string_atan_with_non_empty_evaluation(self):
a1 = ALitteral('abc')
expr = Func(a1, atan)
with self.assertRaises(TypeError):
expr.value(self.eval2)
def test_undefined_atan_with_empty_evaluation(self):
a1 = UndefinedLitteral()
expr = Func(a1, atan)
with self.assertRaises(TypeError):
expr.value(self.eval1)
def test_undefined_atan_with_non_empty_evaluation(self):
a1 = UndefinedLitteral()
expr = Func(a1, atan)
with self.assertRaises(TypeError):
expr.value(self.eval2)
def test_evaluated_variable_atan(self):
a1 = ALitteral(Variable('X'))
expr = Func(a1, atan)
self.assertEqual(expr.value(self.eval2), atan(pi))
def test_unevaluated_variable_atan(self):
a1 = ALitteral(Variable('Y'))
expr = Func(a1, atan)
with self.assertRaises(ValueError):
expr.value(self.eval2)
def test_self_litteral_atan_with_empty_evaluation(self):
a1 = SelfLitteral()
expr = Func(a1, atan)
self.assertEqual(expr.value(self.eval1, pi), atan(pi))
def test_self_litteral_atan_with_non_empty_evaluation(self):
a1 = SelfLitteral()
expr = Func(a1, atan)
self.assertEqual(expr.value(self.eval2, pi), atan(pi))
|
mit
| 893,855,080,066,188,400 | 31.344828 | 82 | 0.629932 | false |
eliasrg/SURF2017
|
code/separate/coding/PAM.py
|
1
|
2567
|
# Copyright (c) 2017 Elias Riedel Gรฅrding
# Licensed under the MIT License
from utilities import to_column_vector, int_to_bits, bits_to_int
import numpy as np
class Constellation:
"""A mapping m: โค2^n โ โ^K."""
@classmethod
def uniform(cls, n):
"Equidistant points (K = 1)."
# {-(2^n - 1), -(2^n - 3), ..., 2^n - 3, 2^n - 1} (2^n integers)
ints = 2 * np.arange(2**n) - (2**n - 1)
return cls(n, 1, [np.array([x]) for x in ints])
@classmethod
def cartesian_product(cls, *constellations, repeat=None):
# Cartesian power
if repeat is not None:
(constellation,) = constellations
return cls.cartesian_product(*(repeat * [constellation]))
if len(constellations) == 1:
return constellations[0]
else:
last = constellations[-1]
init = constellations[:-1]
inner = cls.cartesian_product(*init)
points = [np.concatenate((p, q))
for p in inner.points for q in last.points]
return cls(inner.n + last.n, inner.K + last.K, points)
def __init__(self, n, K, points):
for p in points:
if type(p) != np.ndarray:
raise ValueError(
"Point not of type numpy.ndarray: {}" .format(p))
self.n = n
self.K = K
self.points = points
def modulate(self, bits):
return list(self.points[bits_to_int(bits)])
def demodulate(self, point):
index = min(range(2**self.n),
key=lambda i: norm_sq(point - self.points[i]))
return int_to_bits(index, self.n)
def metric_increment(self, SNR, bias, received, codeword):
# Real-valued codewords
assert all(isinstance(z, float) for z in received.flatten())
return (1 - bias) * self.n \
- SNR / (2 * np.log(2)) \
* norm_sq(received -
to_column_vector(self.modulate(codeword))) \
- np.log2(sum(np.exp(-SNR/2 * norm_sq(
received - to_column_vector(point)))
for point in self.points))
def normalize(self, new_power=1):
"""Normalize so that the average power is 1."""
power = np.mean([norm_sq(p) for p in self.points])
factor = np.sqrt(new_power / power)
new_points = [factor * p for p in self.points]
return self.__class__(self.n, self.K, new_points)
def norm_sq(x):
return np.sum(x**2)
|
mit
| -9,088,709,460,113,332,000 | 31.405063 | 73 | 0.532813 | false |
popazerty/dvbapp2-gui
|
lib/python/Plugins/SystemPlugins/IniFanSetup/plugin.py
|
1
|
3437
|
# -*- coding: utf-8 -*-
from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.config import config, ConfigSubsection, ConfigInteger, ConfigSelection, getConfigListEntry
modelist = {"0": _("Off"), "2": _("On"), "1": _("Auto")}
config.plugins.FanSetup = ConfigSubsection()
config.plugins.FanSetup.mode = ConfigSelection(choices = modelist, default = "2")
class FanSetupScreen(Screen, ConfigListScreen):
skin = """
<screen position="center,center" size="400,200" title="Fan setup">
<widget name="config" position="10,10" size="350,150" />
<ePixmap pixmap="skin_default/buttons/green.png" position="145,45" zPosition="0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/red.png" position="5,45" zPosition="0" size="140,40" alphatest="on" />
<widget name="ok" position="145,45" size="140,40" valign="center" halign="center" zPosition="1" font="Regular;20" transparent="1" backgroundColor="green" />
<widget name="cancel" position="5,45" size="140,40" valign="center" halign="center" zPosition="1" font="Regular;20" transparent="1" backgroundColor="red" />
</screen>"""
def __init__(self, session):
self.skin = FanSetupScreen.skin
Screen.__init__(self, session)
from Components.ActionMap import ActionMap
from Components.Button import Button
self["ok"] = Button(_("OK"))
self["cancel"] = Button(_("Cancel"))
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "CiSelectionActions"],
{
"ok": self.Go,
"save": self.Go,
"cancel": self.Cancel,
"red": self.Go,
"green": self.Cancel
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session)
mode = config.plugins.FanSetup.mode.value
self.mode = ConfigSelection(choices = modelist, default = mode)
self.list.append(getConfigListEntry(_("Fan mode"), self.mode))
self["config"].list = self.list
self["config"].l.setList(self.list)
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.setPreviewSettings()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.setPreviewSettings()
def setPreviewSettings(self):
applySettings(int(self.mode.value))
def Go(self):
config.plugins.FanSetup.mode.value = self.mode.value
config.plugins.FanSetup.save()
setConfiguredSettings()
self.close()
def Cancel(self):
setConfiguredSettings()
self.close()
def applySettings(mode):
setMode = ""
if mode == 1:
setMode = "1"
elif mode == 2:
setMode = "2"
else:
setMode = "0"
try:
file = open("/proc/stb/fp/fan_pwm", "w")
file.write('%s' % setMode)
file.close()
except:
return
def setConfiguredSettings():
applySettings(int(config.plugins.FanSetup.mode.value))
def main(session, **kwargs):
session.open(FanSetupScreen)
def startup(reason, **kwargs):
setConfiguredSettings()
def FanMain(session, **kwargs):
session.open(FanSetupScreen)
def FanSetup(menuid, **kwargs):
if menuid == "setup":
return [(_("FAN Setup"), FanMain, "fan_setup", None)]
else:
return []
def Plugins(**kwargs):
from os import path
if path.exists("/proc/stb/fp/fan_pwm"):
from Plugins.Plugin import PluginDescriptor
return [PluginDescriptor(name = _("Fan Setup"), description = _("switch Fan On/Off"), where = PluginDescriptor.WHERE_MENU, fnc = FanSetup),
PluginDescriptor(name = "Fan Setup", description = "", where = PluginDescriptor.WHERE_SESSIONSTART, fnc = startup)]
return []
|
gpl-2.0
| 8,741,970,078,657,134,000 | 29.415929 | 158 | 0.697701 | false |
matrixjoeq/timus_solutions
|
1327/slu.py
|
1
|
2005
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
1327. Fuses
Time limit: 1.0 second
Memory limit: 64 MB
[Description]
Sasha Privalov, a young programmer working in the SRITS (Scientific Research
Institute for Thaumaturgy and Spellcraft), finds his job rather enjoyable.
Indeed, he is the only programmer of such a wonderful machine as Aldan-3 -
that's a refreshing shift from a dull job in Leningrad. There is just a single
problem, and the problem's name is Janus Poluektovich.
On Privalov's first workday, Janus burdened Aldan with the task of four-
dimensional convolution in the conjuration space. Aldan worked for a while,
flashing its lights and rewinding tapes, then a fuse blew and the machine shut
down. Well, replacing fuses is something even a programmer can do. But Janus is
rather absent-minded, and he, being lost in thoughts about his convolution
problem, forgot about the weak fuse next day. So, on a third day Janus launched
his program again, blowing another fuse. The fourth day went calmly, but on a
fifth day one more fuse had to be replaced. And Janus is still not going to give
upโฆ
Nevertheless, these accidents don't bother Sasha, as long as he has enough spare
fuses. Your task is to help Sasha in making the requisition for spare parts. The
requsition is made for a specific period - from the A-th workday to the B-th
workday inclusive. You should calculate, how many fuses Janus is going to blow
with his programs in the specified period of time.
[Input]
The first line contains an integer A. The second line contains an integer B.
1 โค A โค B โค 10000.
[Output]
The output should contain one number - the amount of fuses that will be blown by
Janus in the interval from day A until day B.
'''
import sys;
import math;
def calc():
a = int(sys.stdin.readline())
b = int(sys.stdin.readline())
c = int(b - a + 1)
if (c % 2 == 1 and a % 2 == 1):
c = int(c / 2) + 1
else:
c = int(c / 2)
print c
if __name__ == '__main__':
calc()
|
mit
| 8,483,938,644,643,850,000 | 35.981481 | 80 | 0.728092 | false |
elimence/edx-platform
|
lms/djangoapps/courseware/features/smart-accordion.py
|
1
|
5762
|
#pylint: disable=C0111
#pylint: disable=W0621
from lettuce import world, step
from re import sub
from nose.tools import assert_equals
from xmodule.modulestore.django import modulestore
from common import *
from logging import getLogger
logger = getLogger(__name__)
def check_for_errors():
e = world.browser.find_by_css('.outside-app')
if len(e) > 0:
assert False, 'there was a server error at %s' % (world.browser.url)
else:
assert True
@step(u'I verify all the content of each course')
def i_verify_all_the_content_of_each_course(step):
all_possible_courses = get_courses()
logger.debug('Courses found:')
for c in all_possible_courses:
logger.debug(c.id)
ids = [c.id for c in all_possible_courses]
# Get a list of all the registered courses
registered_courses = world.browser.find_by_css('article.my-course')
if len(all_possible_courses) < len(registered_courses):
assert False, "user is registered for more courses than are uniquely posssible"
else:
pass
for test_course in registered_courses:
test_course.css_click('a')
check_for_errors()
# Get the course. E.g. 'MITx/6.002x/2012_Fall'
current_course = sub('/info', '', sub('.*/courses/', '', world.browser.url))
validate_course(current_course, ids)
world.click_link('Courseware')
assert world.is_css_present('accordion')
check_for_errors()
browse_course(current_course)
# clicking the user link gets you back to the user's home page
world.css_click('.user-link')
check_for_errors()
def browse_course(course_id):
## count chapters from xml and page and compare
chapters = get_courseware_with_tabs(course_id)
num_chapters = len(chapters)
rendered_chapters = world.browser.find_by_css('#accordion > nav > div')
num_rendered_chapters = len(rendered_chapters)
msg = '%d chapters expected, %d chapters found on page for %s' % (num_chapters, num_rendered_chapters, course_id)
#logger.debug(msg)
assert num_chapters == num_rendered_chapters, msg
chapter_it = 0
## Iterate the chapters
while chapter_it < num_chapters:
## click into a chapter
world.browser.find_by_css('#accordion > nav > div')[chapter_it].find_by_tag('h3').click()
## look for the "there was a server error" div
check_for_errors()
## count sections from xml and page and compare
sections = chapters[chapter_it]['sections']
num_sections = len(sections)
rendered_sections = world.browser.find_by_css('#accordion > nav > div')[chapter_it].find_by_tag('li')
num_rendered_sections = len(rendered_sections)
msg = ('%d sections expected, %d sections found on page, %s - %d - %s' %
(num_sections, num_rendered_sections, course_id, chapter_it, chapters[chapter_it]['chapter_name']))
#logger.debug(msg)
assert num_sections == num_rendered_sections, msg
section_it = 0
## Iterate the sections
while section_it < num_sections:
## click on a section
world.browser.find_by_css('#accordion > nav > div')[chapter_it].find_by_tag('li')[section_it].find_by_tag('a').click()
## sometimes the course-content takes a long time to load
assert world.is_css_present('.course-content')
## look for server error div
check_for_errors()
## count tabs from xml and page and compare
## count the number of tabs. If number of tabs is 0, there won't be anything rendered
## so we explicitly set rendered_tabs because otherwise find_elements returns a None object with no length
num_tabs = sections[section_it]['clickable_tab_count']
if num_tabs != 0:
rendered_tabs = world.browser.find_by_css('ol#sequence-list > li')
num_rendered_tabs = len(rendered_tabs)
else:
rendered_tabs = 0
num_rendered_tabs = 0
msg = ('%d tabs expected, %d tabs found, %s - %d - %s' %
(num_tabs, num_rendered_tabs, course_id, section_it, sections[section_it]['section_name']))
#logger.debug(msg)
# Save the HTML to a file for later comparison
world.save_the_course_content('/tmp/%s' % course_id)
assert num_tabs == num_rendered_tabs, msg
tabs = sections[section_it]['tabs']
tab_it = 0
## Iterate the tabs
while tab_it < num_tabs:
rendered_tabs[tab_it].find_by_tag('a').click()
## do something with the tab sections[section_it]
# e = world.browser.find_by_css('section.course-content section')
# process_section(e)
tab_children = tabs[tab_it]['children_count']
tab_class = tabs[tab_it]['class']
if tab_children != 0:
rendered_items = world.browser.find_by_css('div#seq_content > section > ol > li > section')
num_rendered_items = len(rendered_items)
msg = ('%d items expected, %d items found, %s - %d - %s - tab %d' %
(tab_children, num_rendered_items, course_id, section_it, sections[section_it]['section_name'], tab_it))
#logger.debug(msg)
assert tab_children == num_rendered_items, msg
tab_it += 1
section_it += 1
chapter_it += 1
def validate_course(current_course, ids):
try:
ids.index(current_course)
except:
assert False, "invalid course id %s" % current_course
|
agpl-3.0
| -6,453,196,963,020,202,000 | 35.468354 | 131 | 0.599098 | false |
WalkingMachine/sara_commun
|
wm_robocup2016/src/inspection.py
|
1
|
8680
|
#!/usr/bin/env python
import rospy
import smach
from smach_ros import SimpleActionState
import wm_supervisor.srv
from move_base_msgs.msg import MoveBaseAction
from geometry_msgs.msg import PoseStamped, PoseWithCovarianceStamped
import threading
from std_msgs.msg import Float64, String, Bool
class InitRobot(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['init_done'])
self.neck_pub = rospy.Publisher('neckHead_controller/command', Float64, queue_size=1, latch=True)
self.tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
self.amcl_initial_pose_pub = rospy.Publisher('initialpose', PoseWithCovarianceStamped, queue_size=1, latch=True)
def execute(self, ud):
initial_pose = PoseWithCovarianceStamped()
initial_pose.header.frame_id = 'map'
initial_pose.pose.pose.position.x = 0.0
initial_pose.pose.pose.position.y = 0.0
initial_pose.pose.pose.orientation.x = 0.0
initial_pose.pose.pose.orientation.y = 0.0
initial_pose.pose.pose.orientation.z = 0.0
initial_pose.pose.pose.orientation.w = 1.0
self.amcl_initial_pose_pub.publish(initial_pose)
neck_cmd = Float64()
neck_cmd.data = -2.0
self.neck_pub.publish(neck_cmd)
rospy.sleep(rospy.Duration(2))
neck_cmd.data = -0.7
self.neck_pub.publish(neck_cmd)
rospy.sleep(rospy.Duration(4))
tts_msg = String()
tts_msg.data = "I am ready to begin the inspection."
self.tts_pub.publish(tts_msg)
return 'init_done'
class WaitForStart(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['begin_inspection'])
# TODO subscribe to start button topic
self.start_sub = rospy.Subscriber('start_button_msg', Bool, self.sub_cb, queue_size=4)
self.mutex = threading.Lock()
self.start_signal_received = False
def sub_cb(self, msg):
self.mutex.acquire()
if msg.data:
self.start_signal_received = True
self.mutex.release()
def execute(self, ud):
while True:
self.mutex.acquire()
if self.start_signal_received:
self.mutex.release()
break
self.mutex.release()
rospy.sleep(rospy.Duration(1))
return 'begin_inspection'
class InspectionPoseSupervisor(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['inspection_pose_estop', 'inspection_pose_ok'])
self.supervisor_srv = rospy.ServiceProxy('robot_status', wm_supervisor.srv.robotStatus)
self.tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
def execute(self, ud):
try:
res = self.supervisor_srv()
if res.status == wm_supervisor.srv.robotStatusResponse.STATUS_OK:
tts_msg = String()
tts_msg.data = "I am moving toward the inspection point."
self.tts_pub.publish(tts_msg)
return 'inspection_pose_ok'
except rospy.ServiceException:
rospy.logerr("Failed to connect to the supervising service.")
rospy.sleep(5.0)
return 'inspection_pose_estop'
class WaitForContinue(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['continue_inspection'])
self.barcode_sub = rospy.Subscriber('barcode', String, self.sub_cb, queue_size=4)
self.tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
self.mutex = threading.Lock()
self.continue_code_received = False
def sub_cb(self, msg):
self.mutex.acquire()
if msg.data.lower().find('continue') != -1:
self.continue_code_received = True
self.mutex.release()
def execute(self, ud):
tts_msg = String()
tts_msg.data = "I have arrived at the inspection pose."
self.tts_pub.publish(tts_msg)
while True:
self.mutex.acquire()
if self.continue_code_received:
self.mutex.release()
break
self.mutex.release()
rospy.sleep(rospy.Duration(1))
return 'continue_inspection'
class ExitPoseSupervisor(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['exit_pose_estop', 'exit_pose_ok'])
self.supervisor_srv = rospy.ServiceProxy('robot_status', wm_supervisor.srv.robotStatus)
self.tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
def execute(self, ud):
try:
res = self.supervisor_srv()
if res.status == wm_supervisor.srv.robotStatusResponse.STATUS_OK:
tts_msg = String()
tts_msg.data = "I am moving toward the exit point."
self.tts_pub.publish(tts_msg)
return 'exit_pose_ok'
except rospy.ServiceException:
rospy.logerr("Failed to connect to the supervising service.")
rospy.sleep(5.0)
return 'exit_pose_estop'
if __name__ == '__main__':
rospy.init_node('stage1_navigation_node')
sm = smach.StateMachine(outcomes=['exit'])
sm.userdata.inspection_pose = PoseStamped()
sm.userdata.inspection_pose.header.frame_id = 'map'
sm.userdata.inspection_pose.header.stamp = rospy.Time.now()
sm.userdata.inspection_pose.pose.position.x = 9.16863
sm.userdata.inspection_pose.pose.position.y = -1.37198
sm.userdata.inspection_pose.pose.position.z = 0.0
sm.userdata.inspection_pose.pose.orientation.x = 0.0
sm.userdata.inspection_pose.pose.orientation.y = 0.0
sm.userdata.inspection_pose.pose.orientation.z = -0.0479196
sm.userdata.inspection_pose.pose.orientation.w = 0.998851
sm.userdata.exit_pose = PoseStamped()
sm.userdata.exit_pose.header.frame_id = 'map'
sm.userdata.exit_pose.header.stamp = rospy.Time.now()
sm.userdata.exit_pose.pose.position.x = 15.4889
sm.userdata.exit_pose.pose.position.y = 5.22395
sm.userdata.exit_pose.pose.position.z = 0.0
sm.userdata.exit_pose.pose.orientation.x = 0.0
sm.userdata.exit_pose.pose.orientation.y = 0.0
sm.userdata.exit_pose.pose.orientation.z = 0.689411
sm.userdata.exit_pose.pose.orientation.w = 0.72437
with sm:
smach.StateMachine.add('INIT',
InitRobot(),
transitions={'init_done': 'WAIT_FOR_START'})
smach.StateMachine.add('WAIT_FOR_START',
WaitForStart(),
transitions={'begin_inspection': 'INSPECTION_POSE_SUPERVISOR'})
smach.StateMachine.add('INSPECTION_POSE_SUPERVISOR',
InspectionPoseSupervisor(),
transitions={'inspection_pose_estop': 'INSPECTION_POSE_SUPERVISOR',
'inspection_pose_ok': 'MOVE_INSPECTION_POSE'})
smach.StateMachine.add('MOVE_INSPECTION_POSE',
SimpleActionState('move_base',
MoveBaseAction,
goal_slots=['target_pose']),
transitions={'succeeded': 'WAIT_FOR_CONTINUE',
'aborted': 'INSPECTION_POSE_SUPERVISOR',
'preempted': 'INSPECTION_POSE_SUPERVISOR'},
remapping={'target_pose': 'inspection_pose'})
smach.StateMachine.add('WAIT_FOR_CONTINUE',
WaitForContinue(),
transitions={'continue_inspection': 'EXIT_POSE_SUPERVISOR'})
smach.StateMachine.add('EXIT_POSE_SUPERVISOR',
ExitPoseSupervisor(),
transitions={'exit_pose_estop': 'EXIT_POSE_SUPERVISOR',
'exit_pose_ok': 'MOVE_EXIT_POSE'})
smach.StateMachine.add('MOVE_EXIT_POSE',
SimpleActionState('move_base',
MoveBaseAction,
goal_slots=['target_pose']),
transitions={'succeeded': 'exit',
'aborted': 'EXIT_POSE_SUPERVISOR',
'preempted': 'EXIT_POSE_SUPERVISOR'},
remapping={'target_pose': 'exit_pose'})
outcome = sm.execute()
|
apache-2.0
| -6,698,372,797,924,644,000 | 36.094017 | 120 | 0.575922 | false |
ZeusWPI/Haldis
|
app/models/order.py
|
1
|
3454
|
"Script for everything Order related in the database"
import typing
from datetime import datetime
from utils import first
from hlds.definitions import location_definitions
from .database import db
from .user import User
class Order(db.Model):
"Class used for configuring the Order model in the database"
id = db.Column(db.Integer, primary_key=True)
courier_id = db.Column(db.Integer, nullable=True)
location_id = db.Column(db.String(64))
location_name = db.Column(db.String(128))
starttime = db.Column(db.DateTime)
stoptime = db.Column(db.DateTime)
public = db.Column(db.Boolean, default=True)
items = db.relationship("OrderItem", backref="order", lazy="dynamic")
def __getattr__(self, name):
if name == "location":
return first(
filter(lambda l: l.id == self.location_id, location_definitions)
)
raise AttributeError()
def __repr__(self) -> str:
# pylint: disable=R1705
if self.location:
return "Order %d @ %s" % (self.id, self.location.name or "None")
else:
return "Order %d" % (self.id)
def update_from_hlds(self) -> None:
"""
Update the location name from the HLDS definition.
User should commit after running this to make the change persistent.
"""
assert (
self.location_id
), "location_id must be configured before updating from HLDS"
self.location_name = self.location.name
def for_user(self, anon=None, user=None) -> typing.List:
return list(
filter(
(lambda i: i.user == user)
if user is not None
else (lambda i: i.user_name == anon),
self.items
)
)
def group_by_user(self) -> typing.List[typing.Tuple[str, typing.List]]:
"Group items of an Order by user"
group: typing.Dict[str, typing.List] = dict()
for item in self.items:
if item.for_name not in group:
group[item.for_name] = []
group[item.for_name].append(item)
for _user_name, order_items in group.items():
order_items.sort(key=lambda order_item: order_item.comment or "")
return list(sorted(group.items(), key=lambda t: (t[0] or "", t[1] or "")))
def group_by_dish(self) -> typing.List[typing.Tuple[str, typing.List]]:
"Group items of an Order by dish"
group: typing.Dict[str, typing.List] = dict()
for item in self.items:
if item.dish_name not in group:
group[item.dish_name] = []
group[item.dish_name].append(item)
for _dish_name, order_items in group.items():
order_items.sort(key=lambda order_item: (
(order_item.comment or " No comment") +
(order_item.for_name or "")
))
return list(sorted(group.items()))
def is_closed(self) -> bool:
return self.stoptime and datetime.now() > self.stoptime
def can_close(self, user_id: int) -> bool:
"Check if a user can close the Order"
if self.stoptime and self.stoptime < datetime.now():
return False
user = None
if user_id:
user = User.query.filter_by(id=user_id).first()
if self.courier_id == user_id or (user and user.is_admin()):
return True
return False
|
mit
| -4,438,506,992,852,992,500 | 32.862745 | 82 | 0.583092 | false |
justinvforvendetta/electrum-pkb
|
lib/network.py
|
1
|
18396
|
import time
import Queue
import os
import sys
import random
import traceback
import socks
import socket
import json
import util
from bitcoin import *
import interface
from blockchain import Blockchain
DEFAULT_PORTS = {'t':'50001', 's':'50002', 'h':'8081', 'g':'8082'}
DEFAULT_SERVERS = {
'electrum-pkb.net': DEFAULT_PORTS,
}
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
def parse_servers(result):
""" parse servers list into dict format"""
from version import PROTOCOL_VERSION
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[stgh]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
try:
is_recent = float(version)>=float(PROTOCOL_VERSION)
except Exception:
is_recent = False
if out and is_recent:
out['pruning'] = pruning_level
servers[host] = out
return servers
def filter_protocol(servers, p):
l = []
for k, protocols in servers.items():
if p in protocols:
s = serialize_server(k, protocols[p], p)
l.append(s)
return l
def pick_random_server(p='s'):
return random.choice( filter_protocol(DEFAULT_SERVERS,p) )
from simple_config import SimpleConfig
proxy_modes = ['socks4', 'socks5', 'http']
def serialize_proxy(p):
if type(p) != dict:
return None
return ':'.join([p.get('mode'),p.get('host'), p.get('port')])
def deserialize_proxy(s):
if s is None:
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
return proxy
def deserialize_server(server_str):
host, port, protocol = str(server_str).split(':')
assert protocol in 'st'
int(port)
return host, port, protocol
def serialize_server(host, port, protocol):
return str(':'.join([host, port, protocol]))
class Network(util.DaemonThread):
"""The Network class manages a set of connections to remote
electrum servers, each connection is handled by its own
thread object returned from Interface(). Its external API:
- Member functions get_header(), get_parameters(), get_status_value(),
new_blockchain_height(), set_parameters(), start(),
stop()
"""
def __init__(self, pipe, config=None):
if config is None:
config = {} # Do not use mutables as default values!
util.DaemonThread.__init__(self)
self.config = SimpleConfig(config) if type(config) == type({}) else config
self.num_server = 8 if not self.config.get('oneserver') else 0
self.blockchain = Blockchain(self.config, self)
self.queue = Queue.Queue()
self.requests_queue = pipe.send_queue
self.response_queue = pipe.get_queue
# Server for addresses and transactions
self.default_server = self.config.get('server')
# Sanitize default server
try:
deserialize_server(self.default_server)
except:
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server('s')
self.irc_servers = {} # returned by interface (list from irc)
self.recent_servers = self.read_recent_servers()
self.pending_servers = set()
self.banner = ''
self.heights = {}
self.merkle_roots = {}
self.utxo_roots = {}
dir_path = os.path.join( self.config.path, 'certs')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
# subscriptions and requests
self.subscribed_addresses = set()
# cached address status
self.addr_responses = {}
# unanswered requests
self.unanswered_requests = {}
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network
self.interface = None
self.interfaces = {}
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
def read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r") as f:
data = f.read()
return json.loads(data)
except:
return []
def save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w") as f:
f.write(s)
except:
pass
def get_server_height(self):
return self.heights.get(self.default_server, 0)
def server_is_lagging(self):
h = self.get_server_height()
if not h:
self.print_error('no height for main interface')
return False
lag = self.get_local_height() - self.get_server_height()
return lag > 1
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface and self.interface.is_connected()
def send_subscriptions(self):
# clear cache
self.cached_responses = {}
self.print_error('sending subscriptions to', self.interface.server, len(self.unanswered_requests), len(self.subscribed_addresses))
for r in self.unanswered_requests.values():
self.interface.send_request(r)
for addr in self.subscribed_addresses:
self.interface.send_request({'method':'blockchain.address.subscribe','params':[addr]})
self.interface.send_request({'method':'server.banner','params':[]})
self.interface.send_request({'method':'server.peers.subscribe','params':[]})
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
def notify(self, key):
value = self.get_status_value(key)
self.response_queue.put({'method':'network.status', 'params':[key, value]})
def random_server(self):
choice_list = []
l = filter_protocol(self.get_servers(), self.protocol)
for s in l:
if s in self.pending_servers or s in self.disconnected_servers or s in self.interfaces.keys():
continue
else:
choice_list.append(s)
if not choice_list:
return
server = random.choice( choice_list )
return server
def get_parameters(self):
host, port, protocol = deserialize_server(self.default_server)
auto_connect = self.config.get('auto_cycle', True)
return host, port, protocol, self.proxy, auto_connect
def get_interfaces(self):
return self.interfaces.keys()
def get_servers(self):
if self.irc_servers:
out = self.irc_servers
else:
out = DEFAULT_SERVERS
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = { protocol:port }
return out
def start_interface(self, server):
if not server in self.interfaces.keys():
if server == self.default_server:
self.set_status('connecting')
i = interface.Interface(server, self.queue, self.config)
self.pending_servers.add(server)
i.start()
def start_random_interface(self):
server = self.random_server()
if server:
self.start_interface(server)
def start_interfaces(self):
self.start_interface(self.default_server)
for i in range(self.num_server - 1):
self.start_random_interface()
def start(self):
self.running = True
self.blockchain.start()
util.DaemonThread.start(self)
def set_proxy(self, proxy):
self.proxy = proxy
if proxy:
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode, proxy["host"], int(proxy["port"]))
socket.socket = socks.socksocket
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._socket.getaddrinfo
def start_network(self, protocol, proxy):
assert not self.interface and not self.interfaces
self.print_error('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self.set_proxy(proxy)
self.start_interfaces()
def stop_network(self):
# FIXME: this forgets to handle pending servers...
self.print_error("stopping network")
for i in self.interfaces.values():
i.stop()
self.interface = None
self.interfaces = {}
def set_parameters(self, host, port, protocol, proxy, auto_connect):
server = serialize_server(host, port, protocol)
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self.stop_network()
self.default_server = server
self.start_network(protocol, proxy)
elif self.default_server != server:
self.switch_to_interface(server)
elif auto_connect and (not self.is_connected() or self.server_is_lagging()):
self.switch_to_random_interface()
def switch_to_random_interface(self):
if self.interfaces:
server = random.choice(self.interfaces.keys())
self.switch_to_interface(server)
def switch_to_interface(self, server):
'''Switch to server as our interface. If not already connected, start a
connection - we will switch on receipt of the connection notification'''
self.default_server = server
if server in self.interfaces:
self.print_error("switching to", server)
# stop any current interface in order to terminate subscriptions
self.stop_interface()
self.interface = self.interfaces[server]
self.send_subscriptions()
self.set_status('connected')
self.notify('updated')
elif server not in self.pending_servers:
self.print_error("starting %s; will switch once connected" % server)
self.start_interface(server)
def stop_interface(self):
if self.interface:
self.interface.stop()
self.interface = None
def set_server(self, server):
if self.default_server == server and self.is_connected():
return
if self.protocol != deserialize_server(server)[2]:
return
self.switch_to_interface(server)
def add_recent_server(self, i):
# list is ordered
s = i.server
if s in self.recent_servers:
self.recent_servers.remove(s)
self.recent_servers.insert(0,s)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
def new_blockchain_height(self, blockchain_height, i):
if self.is_connected():
if self.server_is_lagging():
self.print_error("Server is lagging", blockchain_height, self.get_server_height())
if self.config.get('auto_cycle'):
self.set_server(i.server)
self.notify('updated')
def process_if_notification(self, i):
'''Handle interface addition and removal through notifications'''
if i.server in self.pending_servers:
self.pending_servers.remove(i.server)
if i.is_connected():
self.interfaces[i.server] = i
self.add_recent_server(i)
i.send_request({'method':'blockchain.headers.subscribe','params':[]})
if i.server == self.default_server:
self.switch_to_interface(i.server)
else:
self.interfaces.pop(i.server, None)
self.heights.pop(i.server, None)
if i == self.interface:
self.set_status('disconnected')
self.disconnected_servers.add(i.server)
# Our set of interfaces changed
self.notify('interfaces')
def process_response(self, i, response):
# the id comes from the daemon or the network proxy
_id = response.get('id')
if _id is not None:
if i != self.interface:
return
self.unanswered_requests.pop(_id)
method = response.get('method')
result = response.get('result')
if method == 'blockchain.headers.subscribe':
self.on_header(i, response)
elif method == 'server.peers.subscribe':
self.irc_servers = parse_servers(result)
self.notify('servers')
elif method == 'server.banner':
self.banner = result
self.notify('banner')
elif method == 'blockchain.address.subscribe':
addr = response.get('params')[0]
self.addr_responses[addr] = result
self.response_queue.put(response)
else:
self.response_queue.put(response)
def handle_requests(self):
while True:
try:
request = self.requests_queue.get_nowait()
except Queue.Empty:
break
self.process_request(request)
def process_request(self, request):
method = request['method']
params = request['params']
_id = request['id']
if method.startswith('network.'):
out = {'id':_id}
try:
f = getattr(self, method[8:])
out['result'] = f(*params)
except AttributeError:
out['error'] = "unknown method"
except BaseException as e:
out['error'] = str(e)
traceback.print_exc(file=sys.stdout)
self.print_error("network error", str(e))
self.response_queue.put(out)
return
if method == 'blockchain.address.subscribe':
addr = params[0]
self.subscribed_addresses.add(addr)
if addr in self.addr_responses:
self.response_queue.put({'id':_id, 'result':self.addr_responses[addr]})
return
# store unanswered request
self.unanswered_requests[_id] = request
self.interface.send_request(request)
def check_interfaces(self):
now = time.time()
# nodes
if len(self.interfaces) + len(self.pending_servers) < self.num_server:
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.config.get('auto_cycle'):
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
def run(self):
while self.is_running():
self.check_interfaces()
self.handle_requests()
try:
i, response = self.queue.get(timeout=0.1)
except Queue.Empty:
continue
# if response is None it is a notification about the interface
if response is None:
self.process_if_notification(i)
else:
self.process_response(i, response)
self.stop_network()
self.print_error("stopped")
def on_header(self, i, r):
result = r.get('result')
if not result:
return
height = result.get('block_height')
if not height:
return
self.heights[i.server] = height
self.merkle_roots[i.server] = result.get('merkle_root')
self.utxo_roots[i.server] = result.get('utxo_root')
# notify blockchain about the new height
self.blockchain.queue.put((i,result))
if i == self.interface:
if self.server_is_lagging() and self.config.get('auto_cycle'):
self.print_error("Server lagging, stopping interface")
self.stop_interface()
self.notify('updated')
def get_header(self, tx_height):
return self.blockchain.read_header(tx_height)
def get_local_height(self):
return self.blockchain.height()
|
gpl-3.0
| 8,183,106,280,867,124,000 | 33.193309 | 138 | 0.570613 | false |
dstrohl/advanced_config_manager
|
AdvConfigMgr/utils/flag_manager.py
|
1
|
2469
|
__author__ = 'dstrohl'
from AdvConfigMgr.utils.base_utils import list_in_list
# ===============================================================================
# Flag Manager
# ===============================================================================
class FlagList(object):
def __init__(self):
self._flags = []
def add(self, flag):
if flag not in self._flags:
self._flags.append(flag)
def rem(self, flag):
if flag in self._flags:
self._flags.remove(flag)
def __contains__(self, item):
return item in self._flags
def __bool__(self):
if self._flags:
return True
else:
return False
def __call__(self, add_rem_flags=None):
add_rem_flags = add_rem_flags.replace(',', ' ')
add_rem_flags = add_rem_flags.replace(';', ' ')
if add_rem_flags:
tmp_flags = add_rem_flags.split()
for f in tmp_flags:
if f[0] == '-':
self.rem(f[1:])
elif f[0] == '+':
self.add(f[1:])
else:
self.add(f)
return self._flags
def __str__(self):
return ', '.join(self._flags)
def __iter__(self):
for r in self._flags:
yield r
def __len__(self):
return len(self._flags)
class Flagger(object):
def __init__(self):
self._include = FlagList()
self._exclude = FlagList()
self._current = FlagList()
@property
def inc(self):
return self._include
@property
def exc(self):
return self._exclude
@property
def cur(self):
return self._current
@property
def check(self):
tmp_ret = False
if self.inc:
if list_in_list(self.cur, self.inc):
tmp_ret = True
else:
tmp_ret = True
if self.exc:
if list_in_list(self.cur, self.exc):
tmp_ret = False
return tmp_ret
def __call__(self, current=None, include=None, exclude=None, **kwargs):
if kwargs:
current = kwargs.get('c', current)
include = kwargs.get('i', include)
exclude = kwargs.get('e', exclude)
if current:
self.cur(current)
if include:
self.inc(include)
if exclude:
self.exc(exclude)
return self.check
|
gpl-2.0
| -7,637,230,000,685,972,000 | 23.445545 | 81 | 0.45808 | false |
mapillary/OpenSfM
|
opensfm/test/test_rig.py
|
1
|
4622
|
"""Test the rig module."""
import numpy as np
from opensfm import pygeometry, rig, types
def test_create_instances_with_patterns() -> None:
# A first rig model defined as left/right/top/bottom
# A complete instance
instance1 = [
"12345_left.jpg",
"12345_bottom.jpg",
"12345_top.jpg",
"12345_right.jpg",
]
# An incomplete one
instance2 = [
"1234567_left.jpg",
"1234567_bottom.jpg",
"1234567_top.jpg",
]
patterns_12 = {
"camera_left": "(left)",
"camera_right": "(right)",
"camera_top": "(top)",
"camera_bottom": "(bottom)",
}
# A second one as RED/GREEN/BLUE
instance3 = [
"RED_SENSOR_001-12345678.jpg",
"GREEN_SENSOR_002-12345678.jpg",
"BLUE_SENSOR_003-12345678.jpg",
]
patterns_3 = {
"red": "(RED_SENSOR_001)",
"green": "(GREEN_SENSOR_002)",
"blue": "(BLUE_SENSOR_003)",
}
# Two single shots
instance4 = [
"RED_toto.jpg",
"tata.jpg",
]
# Run detection with these two rig model patterns
rig_patterns = patterns_12
rig_patterns.update(patterns_3)
instances, single_shots = rig.create_instances_with_patterns(
instance1 + instance2 + instance3 + instance4, rig_patterns
)
# Ensure we have 2 instance for the first rig, and 1 for the second
assert len(instances) == 3
# Ensure the two single shots
assert len(single_shots) == 2
recovered_instance1 = instances["12345_.jpg"]
assert [x[0] for x in recovered_instance1] == instance1
recovered_instance2 = instances["1234567_.jpg"]
assert [x[0] for x in recovered_instance2] == instance2
recovered_instance3 = instances["-12345678.jpg"]
assert [x[0] for x in recovered_instance3] == instance3
def test_compute_relative_pose() -> None:
# 4-cameras rig
camera1 = pygeometry.Camera.create_spherical()
camera1.id = "camera1"
camera2 = pygeometry.Camera.create_spherical()
camera2.id = "camera2"
camera3 = pygeometry.Camera.create_spherical()
camera3.id = "camera3"
camera4 = pygeometry.Camera.create_spherical()
camera4.id = "camera4"
# a bit cumbersome that we need to have some reconstruction
rec = types.Reconstruction()
rec.add_camera(camera1)
rec.add_camera(camera2)
rec.add_camera(camera3)
rec.add_camera(camera4)
# First rig instance
rec.create_shot("shot1", "camera1", pygeometry.Pose([0, 0, 0], [-2, -2, 0]))
rec.create_shot("shot2", "camera2", pygeometry.Pose([0, 0, 0], [-3, -3, 0]))
rec.create_shot("shot3", "camera3", pygeometry.Pose([0, 0, 0], [-1, -3, 0]))
rec.create_shot("shot4", "camera4", pygeometry.Pose([0, 0, 0], [-2, -4, 0]))
# Second rig instance (rotated by pi/2 around Z)
pose_instance = pygeometry.Pose([0, 0, -1.5707963])
pose_instance.set_origin([-6, 0, 0])
rec.create_shot("shot5", "camera1", pose_instance)
pose_instance.set_origin([-7, 1, 0])
rec.create_shot("shot6", "camera2", pose_instance)
pose_instance.set_origin([-7, -1, 0])
rec.create_shot("shot7", "camera3", pose_instance)
pose_instance.set_origin([-8, 0, 0])
rec.create_shot("shot8", "camera4", pose_instance)
pose_instances = [
[
(
rec.shots["shot1"],
"camera_id_1",
),
(
rec.shots["shot2"],
"camera_id_2",
),
(
rec.shots["shot3"],
"camera_id_3",
),
(
rec.shots["shot4"],
"camera_id_4",
),
],
[
(
rec.shots["shot5"],
"camera_id_1",
),
(
rec.shots["shot6"],
"camera_id_2",
),
(
rec.shots["shot7"],
"camera_id_3",
),
(
rec.shots["shot8"],
"camera_id_4",
),
],
]
# Compute rig cameras poses
rig_cameras = rig.compute_relative_pose(pose_instances)
assert np.allclose(
[0, -1, 0], rig_cameras["camera_id_1"].pose.get_origin(), atol=1e-7
)
assert np.allclose(
[1, 0, 0], rig_cameras["camera_id_2"].pose.get_origin(), atol=1e-7
)
assert np.allclose(
[-1, 0, 0], rig_cameras["camera_id_3"].pose.get_origin(), atol=1e-7
)
assert np.allclose(
[0, 1, 0], rig_cameras["camera_id_4"].pose.get_origin(), atol=1e-7
)
|
bsd-2-clause
| -3,292,371,213,176,436,700 | 28.069182 | 80 | 0.540242 | false |
jnoortheen/nigandu
|
src/managebkm.py
|
2
|
1697
|
__author__ = 'noor'
from PySide.QtGui import (QDialog, QLabel, QIcon, QWidget, QVBoxLayout, QPushButton, QListWidget, QFont)
from PySide.QtCore import QRect
class managebkm(QDialog):
def __init__(self, parent=None):
super(managebkm, self).__init__(parent)
appicom = QIcon(":/icons/njnlogo.png")
self.setWindowIcon(appicom)
self.setWindowTitle("Nigandu | Manage Book Marks")
self.setFixedSize(463, 242)
self.verticalLayoutWidget = QWidget(self)
self.verticalLayoutWidget.setGeometry(QRect(350, 30, 101, 201))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.sortbtn = QPushButton(self.verticalLayoutWidget)
self.sortbtn.setText("&Sort")
self.verticalLayout.addWidget(self.sortbtn)
self.deletebtn = QPushButton(self.verticalLayoutWidget)
self.deletebtn.setText("&Delete")
self.verticalLayout.addWidget(self.deletebtn)
self.deleteallbtn = QPushButton(self.verticalLayoutWidget)
self.deleteallbtn.setText("Delete &All")
self.verticalLayout.addWidget(self.deleteallbtn)
self.closebtn = QPushButton(self.verticalLayoutWidget)
self.closebtn.setText("&Close")
self.verticalLayout.addWidget(self.closebtn)
self.listWidget = QListWidget(self)
self.listWidget.setGeometry(QRect(10, 30, 331, 201))
self.label = QLabel(self)
self.label.setGeometry(QRect(20, 10, 91, 25))
font = QFont()
font.setPointSize(10)
self.label.setFont(font)
self.label.setBuddy(self.listWidget)
self.label.setText("Book Mark List:")
|
gpl-2.0
| 2,093,781,256,533,425,200 | 35.711111 | 104 | 0.747201 | false |
sebastiansIT/RPI-Display-Backlight-Controller-for-Kodi
|
sources/screensaver.rpi-backlight-disabler/screensaver.py
|
1
|
2345
|
# Copyright 2016 Sebastian Spautz <sebastian@human-injection.de>
#
# This file is part of "RPI Display Backlight Control for Kodi".
#
# "RPI Display Backlight Control for Kodi" is free software: you can
# redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either
# version 3 of the License, or any later version.
#
# "RPI Display Backlight Control for Kodi" is distributed in the hope
# that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
import xbmcaddon
import xbmcgui
import xbmc
import os
addon = xbmcaddon.Addon()
addonName = addon.getAddonInfo('name')
addonPath = addon.getAddonInfo('path')
class Screensaver(xbmcgui.WindowXMLDialog):
class ExitMonitor(xbmc.Monitor):
def __init__(self, exit_callback):
self.exit_callback = exit_callback
def onScreensaverDeactivated(self):
self.exit_callback()
def prepareShellCommand(self, command):
if os.geteuid() != 0:
self.log('Don\'t root, try sudo to toggle backlight.')
return 'sudo bash -c \'' + command + '\''
else:
return command;
def onInit(self):
self.log('Start Screensaver')
self.exit_monitor = self.ExitMonitor(self.exit)
shellCommand = self.prepareShellCommand('echo 1 > /sys/class/backlight/rpi_backlight/bl_power')
os.system(shellCommand)
def exit(self):
self.exit_monitor = None
shellCommand = self.prepareShellCommand('echo 0 > /sys/class/backlight/rpi_backlight/bl_power')
os.system(shellCommand)
self.close()
self.log('Stopped Screensaver')
def log(self, msg):
xbmc.log(u'%(name)s: %(message)s' % {'name': addonName, 'message': msg})
if __name__ == '__main__':
screensaver = Screensaver(
'screensaver-%s-Main.xml' % addonName.replace(' ', ''),
addonPath,
'default',
)
screensaver.doModal()
del screensaver
sys.modules.clear()
|
gpl-3.0
| -6,250,596,263,587,048,000 | 33.5 | 103 | 0.660128 | false |
mcanthony/oiio
|
testsuite/oiiotool-spi/run.py
|
1
|
2476
|
#!/usr/bin/env python
imagedir = parent + "spi-oiio-tests/"
refdir = imagedir + "ref/"
# Define a handy function that runs an oiiotool command, and
# also diffs the result against a reference image.
def oiiotool_and_test (inputfile, ops, outputfile, precommand="") :
cmd = oiiotool (precommand + " " + imagedir + inputfile +
" " + ops + " -o " + outputfile)
cmd += diff_command (outputfile, refdir+outputfile)
return cmd
# Test fit with pad on DPX
command += oiiotool_and_test ("testFullFrame_2kfa_lg10.0006.dpx",
"--fit:pad=1 512x512", "fit_lg10.dpx")
# Conversion of linear half exr to vd16 uint16 TIFF
# at very high resolution used for marketing stills.
command += oiiotool_and_test ("mkt019_comp_wayn_fullres_s3d_lf_v51_misc_lnh.1001.exr",
"--croptofull --unpremult --colorconvert lnh vd16 --premult --ch R,G,B,A -d uint16",
"mkt019_comp_wayn_fullres_s3d_lf_v51_alpha_misc_vd16.1001.tif",
precommand = "--colorconfig " + imagedir + "ht2.ocio/config.ocio")
# Test fit/cut on JPEG
command += oiiotool_and_test ("ffr0830_avid_ref_v3_hd_ref8.1024.jpg",
"--fit 2154x0 --cut 2154x1137+0+38 --cut 2154x1136",
"ffr0830_avid_ref_match_v3_2kdcip_ref8.1024.jpg")
# Test fit + color conversion + DPX->JPEG
command += oiiotool_and_test ("ep0400_bg1_v101_3kalxog_alogc16.1001.dpx",
"--fit 1028x662 --colorconvert alogc16 vd8",
"ep0400_bg1_v101_1kalxog_vd8.1001.jpg",
precommand = "--colorconfig " + imagedir + "pxl.ocio/config.ocio")
# Test ociofiletransform
command += oiiotool_and_test ("os0225_110_lightingfix_v002.0101.dpx",
"--colorconvert lm10 lnf --ociofiletransform srgb_look.csp --colorconvert lnf vd8 -d uint8",
"os0225_110_lightingfix_v002.0101.png",
precommand = "--colorconfig " + imagedir + "os4.ocio/config.ocio")
# Regression test on dealing with DPX with overscan
# REMOVED -- DPX spec doesn't support overscan!
#command += oiiotool_and_test ("dpxoverscan_hg0700_fg1_v2_2kdciufa_lg16.1014.dpx",
# "--iscolorspace lg16 --crop -2,0,2401,911 --fullpixels",
# "dpxoverscan_lg16.dpx")
outputs = [ "out.txt"]
|
bsd-3-clause
| 8,015,243,802,899,688,000 | 44.851852 | 122 | 0.588853 | false |
arcean/qtcontacts-tracker
|
tests/teststats.py
|
1
|
10828
|
#!/usr/bin/python
# This file is part of QtContacts tracker storage plugin
#
# Copyright (c) 2011 Nokia Corporation and/or its subsidiary(-ies).
#
# Contact: Nokia Corporation (info@qt.nokia.com)
#
# GNU Lesser General Public License Usage
# This file may be used under the terms of the GNU Lesser General Public License
# version 2.1 as published by the Free Software Foundation and appearing in the
# file LICENSE.LGPL included in the packaging of this file. Please review the
# following information to ensure the GNU Lesser General Public License version
# 2.1 requirements will be met:
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
#
# In addition, as a special exception, Nokia gives you certain additional rights.
# These rights are described in the Nokia Qt LGPL Exception version 1.1, included
# in the file LGPL_EXCEPTION.txt in this package.
#
# Other Usage
# Alternatively, this file may be used in accordance with the terms and
# conditions contained in a signed written agreement between you and Nokia.
class Version(object):
def __init__(self, text):
from re import sub, split
self.__text = text
text = text.replace('~', '_0_')
text = text.replace('-', '_1_')
text = text.replace('+', '_2_') + '_1_'
self.__numbers = map(int, filter(None, split(r'[^0-9]+', text)))
def __cmp__(self, other):
return cmp(self.numbers, other.numbers)
def __repr__(self):
return '[Version: %r - %r]' % (self.text, self.numbers)
def __str__(self):
return self.text
text = property(fget=lambda self: self.__text)
numbers = property(fget=lambda self: self.__numbers)
class TestSuite(object):
'''Model object describing a test suite'''
def __init__(self, name, description, timestamp, hostinfo, results):
from hashlib import sha1
self.__name = name
self.__description = description
self.__timestamp = timestamp
self.__package = hostinfo.get('package')
self.__version = Version(hostinfo.get('version', '0'))
self.__hostname = hostinfo.get('hostname')
self.__username = hostinfo.get('username')
self.__cpumodel = hostinfo.get('cpumodel')
self.__results = results
hostkey = self.username, self.hostname, self.cpumodel
self.__hostkey = sha1('\0'.join(map(str, hostkey))).hexdigest()
self.__fullkey = '-'.join([self.name, self.version.text, self.hostkey,
self.timestamp.strftime('%s')])
name = property(fget=lambda self: self.__name)
description = property(fget=lambda self: self.__description)
timestamp = property(fget=lambda self: self.__timestamp)
package = property(fget=lambda self: self.__package)
version = property(fget=lambda self: self.__version)
hostname = property(fget=lambda self: self.__hostname)
username = property(fget=lambda self: self.__username)
cpumodel = property(fget=lambda self: self.__cpumodel)
results = property(fget=lambda self: self.__results)
hostkey = property(fget=lambda self: self.__hostkey)
fullkey = property(fget=lambda self: self.__fullkey)
class TestResult(object):
'''Model object describing a test result'''
def __init__(self, fixture, case, start, end):
self.__fixture = fixture
self.__case = case
self.__duration = end - start
def __cmp__(self, other):
return cmp(self.duration, other.duration)
fixture = property(fget=lambda self: self.__fixture)
case = property(fget=lambda self: self.__case)
duration = property(fget=lambda self: self.__duration)
name = property(fget=lambda self: self.fixture + '::' + self.case)
class Category(object):
'''Model object describing performance categories'''
def __init__(self, name, timeout):
self.__name = name
self.__timeout = timeout
self.__results = []
name = property(fget=lambda self: self.__name)
timeout = property(fget=lambda self: self.__timeout)
results = property(fget=lambda self: self.__results)
class ResultCache(object):
'''Cache for previous test results'''
def __init__(self):
from os.path import expanduser, join
self.__cachedir = expanduser('~/.cache/contacts')
self.__filename = join(self.__cachedir, 'performance.dat')
self.__suites = {}
def add(self, suite):
self.__suites[suite.fullkey] = suite
def load(self):
from cPickle import load
try: self.__suites = load(file(self.__filename, 'rb'))
except IOError: return False
return True
def save(self):
from cPickle import HIGHEST_PROTOCOL, dump
from tempfile import NamedTemporaryFile
from os import rename
tmp = NamedTemporaryFile(delete=False, dir=self.__cachedir, prefix='performance.')
dump(self.__suites, tmp, HIGHEST_PROTOCOL)
rename(tmp.name, self.filename)
def findcurr(self, suite):
return [s for s in self.__suites.values() if (s.fullkey != suite.fullkey and
s.name == suite.name and
s.hostkey == suite.hostkey and
s.version == suite.version)]
def findprev(self, suite):
matches, pv = [], None
for s in self.__suites.values():
if s.name == suite.name and s.hostkey == suite.hostkey:
if s.version < suite.version:
if not pv or s.version > pv:
pv = s.version
matches = []
if s.version == pv:
matches.append(s)
continue
return matches
filename = property(fget=lambda self: self.__filename)
suites = property(fget=lambda self: self.__suites)
def parselog(source):
'''Parses a test results log generated by Harmattan's testrunner'''
from datetime import datetime
from xml.etree import ElementTree
from re import MULTILINE, compile
re_hostinfo = compile(r'^(\w+)=(.*)$', MULTILINE)
suites, results = [], {}
fixture, case = None, None
start, end = None, None
timestamp = None
hostinfo = {}
for event, element in ElementTree.iterparse(source, events=('start', 'end')):
if 'start' == event:
if 'case' == element.tag:
case = element.attrib['name']
continue
if 'set' == element.tag:
fixture, case = element.attrib['name'], None
continue
continue
if 'end' == event:
if 'suite' == element.tag:
suites.append(TestSuite(element.attrib['name'],
element.find('description').text,
timestamp, hostinfo, results))
timestamp, hostinfo, results = None, {}, {}
continue
if 'case' == element.tag:
r = TestResult(fixture, case, start, end)
results[r.name] = r
start, end = None, None
continue
if 'stdout' == element.tag:
if 'testHostInfo' == case:
hostinfo = dict(re_hostinfo.findall(element.text or ''))
continue
if 'start' == element.tag:
start = datetime.strptime(element.text, '%Y-%m-%d %H:%M:%S')
timestamp = timestamp and min(timestamp, start) or start
continue
if 'end' == element.tag:
end = datetime.strptime(element.text, '%Y-%m-%d %H:%M:%S')
continue
continue
return suites
def checkperf(suite, reference):
from os import isatty
fmtfail = isatty(1) and '\033[31mFAILURE: %s\033[0m' or 'FAILURE: %s'
fmtwarn = isatty(1) and '\033[33mWARNING: %s\033[0m' or 'WARNING: %s'
def fail(msg, *args): print fmtfail % msg % args
def warn(msg, *args): print fmtwarn % msg % args
for r in suite.results.values():
durations = []
for rs in reference:
rr = rs.results.get(r.name)
if rr: durations.append(float(rr.duration.seconds))
if len(durations) == 0:
continue
dmax = max(durations)
davg = sum(durations)/len(durations)
if r.duration.seconds > (dmax * 1.01 + 1):
fail('%s %ds needed, but maximum duration was %ds in %s',
r.name, r.duration.seconds, dmax, rs.version)
continue
if r.duration.seconds > (davg * 1.05 + 1):
warn('%s run for %ds, but average duration was %ds in %s',
r.name, r.duration.seconds, davg, rs.version)
continue
def main(source):
'''Main routine of the script'''
from datetime import timedelta
# load the result cache
cache = ResultCache()
cache.load()
# read test log and update the result cache
suites = parselog(source)
for s in suites:
cache.add(s)
cache.save()
# compare current test performance with performance from previous runs
for s in suites:
checkperf(s, cache.findcurr(s))
checkperf(s, cache.findprev(s))
# declare performance categories
categories = [Category(name, timedelta(seconds=timeout)) for name, timeout
in zip(['fast', 'slow', 'glacial'], [0, 5, 30])]
# put results into categories
for s in suites:
for r in s.results.values():
for c in reversed(categories):
if r.duration >= c.timeout:
c.results.append(r)
break
# print general performance stats
print '# number of %s tests: %s' % ('/'.join([c.name for c in categories]),
'/'.join([str(len(c.results)) for c in categories]))
print
# print all tests not qualifying for the 'fast' category
for c in [c for c in categories if c.name != 'fast']:
for r in c.results:
print '%s: %s (%s seconds)' % (r.name, c.name, r.duration.seconds)
print
def test_versions():
assert Version('1.0') == Version('1.0')
assert Version('1.0') < Version('2.0')
assert Version('1.0') < Version('1.1')
assert Version('1.0') < Version('1.0-1')
assert Version('1.0') < Version('1.0+1')
assert Version('1.0') > Version('1.0~git1')
assert Version('1.0~git1') < Version('1.0~git2')
assert Version('2.0') > Version('1.0')
if '__main__' == __name__:
from sys import argv
if len(argv) != 2:
raise SystemExit('Usage: %s FILENAME' % argv[0])
test_versions()
main(file(argv[1]))
|
lgpl-2.1
| 5,286,435,379,430,205,000 | 32.627329 | 92 | 0.579608 | false |
spywhere/Javatar
|
commands/creates/create_class.py
|
1
|
13833
|
import sublime
import sublime_plugin
import os.path
from ...core import (
JavaClass,
JavaClassPath,
JavaUtils,
RE,
SnippetsManager,
StateProperty
)
from ...utils import (
ActionHistory,
StatusManager
)
EXTENDS_IMPLEMENTS_RE = "([:<])"
MAIN_TEMPLATE = "public static void main(String[] args) {\n\t\t${1}\n\t}"
VISIBILITY_MAP = {
"public": "public ",
"default": "",
"private": "private ",
"protected": "protected "
}
MODIFIER_MAP = {
"abstract": "abstract ",
"final": "final "
}
class JavatarCreateCommand(sublime_plugin.WindowCommand):
"""
Command to show menu which use to create a new Java file
"""
def find_keyword(self, jclass, keywords, default=None):
"""
Returns a 3-tuple consists of (trimmed class, keyword, value) by
remove the matched keyword
@param jclass: a class path to trimmed
@param keywords: a keyword dictionary
@param default: a 2-tuple/list for default keyword and value when no
keyword matched
"""
default = default or (None, None)
for key, value in keywords.items():
if jclass.get().lower().startswith(key):
return (JavaClass(jclass.get()[len(key):]), key, value)
return (jclass,) + tuple(default)
def parse_class_info(self, text):
"""
Returns class informations by analyse the input text
@param text: text to be analysed
"""
relative_path = True
if text.startswith("~"):
text = text[1:]
relative_path = False
parts = RE().get(
"extends_implements",
EXTENDS_IMPLEMENTS_RE
).split(text)
class_path = JavaClassPath(parts.pop(0))
jclass = class_path.get_class()
jclass, visibility_keyword, visibility = self.find_keyword(
jclass,
VISIBILITY_MAP,
["public", VISIBILITY_MAP["public"]]
)
jclass, modifier_keyword, modifier = self.find_keyword(
jclass,
MODIFIER_MAP,
["", ""]
)
class_name = jclass.get()
extends = []
implements = []
while parts:
part = parts.pop(0)
if part == "<":
implements = [
cl.strip() for cl in parts.pop(0).split(",") if cl.strip()
]
elif part == ":":
extends = [
cl.strip() for cl in parts.pop(0).split(",") if cl.strip()
]
as_main = False
if class_name.lower().endswith("asmain"):
as_main = True
class_name = class_name[:-6]
body = MAIN_TEMPLATE
else:
body = "${1}"
return {
"relative_path": relative_path,
"class_name": class_name,
"package": class_path.get_package(),
"as_main": as_main,
"body": body,
"implements": implements,
"extends": extends,
"visibility_keyword": visibility_keyword,
"visibility": visibility,
"modifier_keyword": modifier_keyword,
"modifier": modifier
}
def parse_create(self, text):
"""
If success, returns class informations, package path and file path
from input text, otherwise, returns a string described an error
@param text: text to be analysed
"""
if not StateProperty().is_project() and not StateProperty().is_file():
return "Cannot specify package location"
if not JavaUtils().is_class_path(text.strip("~"), special=True):
return "Invalid class naming"
class_info = self.parse_class_info(text)
if not class_info["class_name"]:
return "Invalid class naming"
if class_info["relative_path"] and StateProperty().get_dir():
create_directory = os.path.join(
StateProperty().get_dir(),
class_info["package"].as_path()
)
else:
create_directory = os.path.join(
StateProperty().get_source_folder(),
class_info["package"].as_path()
)
class_info["package"] = JavaUtils().to_package(create_directory)
class_info["directory"] = create_directory
class_info["file"] = os.path.join(
create_directory,
class_info["class_name"] + ".java"
)
return class_info
def build_prefix(self, info):
"""
Returns a string described the class that will be used as prefix
@param info: class informations
"""
prefix = ""
if info["visibility_keyword"]:
prefix += info["visibility_keyword"]
if info["modifier_keyword"]:
prefix += " " + info["modifier_keyword"]
if info["as_main"]:
prefix += " main"
prefix += " " + self.args["create_type"]
prefix = prefix.strip()
return prefix[:1].upper() + prefix[1:].lower()
def quote_list(self, lst):
"""
Returns a joined string which each item in the list got quoted
@param lst: a list to be joined
"""
return ", ".join(
["\"{}\"".format(item) for item in lst]
)
def build_additional_text(self, info):
"""
Returns a string described additional class informations such as class
inheritances or warnings that will be appended to the end of
the line
@param info: class informations
"""
additional_text = ""
if info["extends"]:
additional_text += ", extends {}".format(
self.quote_list(info["extends"][:2])
)
if len(info["extends"]) > 2:
additional_text += " and {} more classes".format(
len(info["extends"]) - 2
)
if info["implements"]:
additional_text += ", implements {}".format(
self.quote_list(info["implements"][:2])
)
if len(info["implements"]) > 2:
additional_text += " and {} more classes".format(
len(info["implements"]) - 2
)
if self.args["create_type"] == "Class" and len(info["extends"]) > 1:
additional_text += " [Warning! Class can be extent only once]"
elif self.args["create_type"] == "Enumerator" and info["extends"]:
additional_text += (
" [Warning! Enumerator use \"implements\"" +
" instead of \"extends\"]"
)
elif self.args["create_type"] == "Interface" and info["implements"]:
additional_text += (
" [Warning! Interface use \"extends\"" +
" instead of \"implements\"]"
)
return additional_text
def get_file_contents(self, info):
"""
Returns a snippet contents, if found,
otherwise, returns None
@param info: class informations
"""
class_type = self.args["create_type"]
snippet = SnippetsManager().get_snippet(class_type)
if snippet is None:
sublime.error_message(
"Snippet \"{snippet_name}\" is not found".format_map(
{
"snippet_name": class_type
}
)
)
return None
data = snippet["data"]
data = data.replace(
"%package%",
(
"package " + info["package"].as_class_path() + ";"
if info["package"].as_class_path()
else ""
)
)
inheritance = ""
# Enum can only implements interfaces
# Interface can only extends another interface
if class_type != "Enumerator" and info["extends"]:
if class_type == "Class" and len(info["extends"]) > 1:
inheritance = " extends " + info["extends"][0]
else:
inheritance = " extends " + ", ".join(info["extends"])
if class_type != "Interface" and info["implements"]:
inheritance += " implements " + ", ".join(info["implements"])
data = (
data.replace("%class%", info["class_name"])
.replace("%file%", info["file"])
.replace("%file_name%", os.path.basename(info["file"]))
.replace("%package_path%", info["package"].as_class_path())
.replace("%visibility%", info["visibility"])
.replace("%inheritance%", inheritance)
.replace("%body%", info["body"])
)
if class_type == "Class":
data = data.replace("%modifier%", info["modifier"])
return data
def insert_and_save(self, view, contents, info):
"""
Insert contents into the specified view and save it, also organize
the imports if required
@param view: a target view
@param contents: contents to add into the view
@param info: class informations
"""
view.run_command("insert_snippet", {"contents": contents})
if info["extends"] or info["implements"]:
view.run_command("javatar_organize_imports")
view.run_command("save")
def create_class_file(self, info):
"""
Create a specified Java class and returns the status
@param info: class informations
"""
contents = self.get_file_contents(info)
if contents is None:
return False
if os.path.exists(info["file"]):
sublime.error_message(
"{class_type} \"{class_name}\" already exists".format_map({
"class_type": self.args["create_type"],
"class_name": info["class_name"]
})
)
return False
open(info["file"], "w").close()
view = sublime.active_window().open_file(info["file"])
view.set_syntax_file("Packages/Java/Java.tmLanguage")
# File Header override
view.settings().set("enable_add_template_to_empty_file", False)
sublime.set_timeout(
lambda: self.insert_and_save(view, contents, info),
100
)
return True
def on_done(self, text=""):
"""
Create a class with informations from the input text
@param text: text from input panel
"""
self.hide_status()
info = self.parse_create(text)
if isinstance(info, str):
sublime.error_message(info)
return
ActionHistory().add_action(
"javatar.commands.create.create_class.on_done",
"Create [info={info}]".format_map({
"info": info
})
)
if JavaUtils().create_package_path(
info["directory"], True) == JavaUtils().CREATE_ERROR:
return
if self.create_class_file(info):
sublime.set_timeout(lambda: StatusManager().show_status(
"{class_type} \"{class_name}\" is created within".format_map({
"class_type": self.args["create_type"],
"class_name": info["class_name"]
}) + " package \"{readable_package_path}\"".format_map({
"readable_package_path": JavaUtils().to_readable_class_path(
info["package"].as_class_path(),
as_class_path=True
)
})
), 500)
def on_change(self, text=""):
"""
Shows informations about how class get created in the status bar
@param text: text from input panel
"""
status = ""
info = self.parse_create(text)
if isinstance(info, str):
status = info
elif os.path.exists(info["file"]):
status = "{class_type} \"{class_name}\" already exists".format_map({
"class_type": self.args["create_type"],
"class_name": info["class_name"]
})
else:
prefix = self.build_prefix(info)
status = "{prefix} \"{class_name}\" will be created".format_map({
"prefix": prefix,
"class_name": info["class_name"]
})
status += " within package \"{readable_package_path}\"".format_map({
"readable_package_path": JavaUtils().to_readable_class_path(
info["package"].as_class_path(),
as_class_path=True
)
})
status += " {additional_text}".format_map({
"additional_text": self.build_additional_text(info)
})
StatusManager().show_status(
status,
delay=-1,
ref="create_description"
)
def hide_status(self):
"""
Hides the text that is showed by on_change
"""
StatusManager().hide_status("create_description")
def run(self, create_type=None):
"""
Create a specified Java file
@param create_type: a snippet type to create
"""
self.args = {
"create_type": create_type
}
ActionHistory().add_action(
"javatar.commands.create.create_class.run",
"Create [create_type={create_type}]".format_map(self.args)
)
sublime.active_window().show_input_panel(
"{create_type} Name:".format_map(self.args),
"",
self.on_done,
self.on_change,
self.hide_status
)
|
mit
| 719,629,061,234,050,600 | 32.01432 | 80 | 0.51247 | false |
meejah/pyobfsproxy
|
obfsproxy/common/log.py
|
1
|
1094
|
# obfsproxy logging code
import logging
import sys
# XXX Add Formatter!!!
our_logger = logging.getLogger('our_logger')
default_handler = logging.StreamHandler(sys.stdout)
our_logger.addHandler(default_handler)
our_logger.propagate = False
def set_log_file(filename):
"""Set up our logger so that it starts logging to file in 'filename' instead."""
# remove the default handler, and add the FileHandler:
our_logger.removeHandler(default_handler)
log_handler = logging.FileHandler(filename)
our_logger.addHandler(log_handler)
def set_log_severity(sev_string):
"""Update our minimum logging severity to 'sev_string'."""
# Turn it into a numeric level that logging understands first.
numeric_level = getattr(logging, sev_string.upper(), None)
our_logger.setLevel(numeric_level)
def disable_logs():
"""Disable all logging."""
logging.disable(logging.CRITICAL)
# Redirect logging functions to our custom logger.
debug = our_logger.debug
info = our_logger.info
warning = our_logger.warning
error = our_logger.error
critical = our_logger.critical
|
bsd-3-clause
| 8,584,981,672,234,394,000 | 27.051282 | 84 | 0.738574 | false |
buzz/volctl
|
volctl/osd.py
|
1
|
7911
|
"""
OSD volume overlay
A transparent OSD volume indicator for the bottom-right corner.
Various code snippets taken from https://github.com/kozec/sc-controller
"""
import math
import cairo
from gi.repository import Gdk, Gtk, GdkX11, GLib
import volctl.xwrappers as X
class VolumeOverlay(Gtk.Window):
"""Window that displays volume sliders."""
BASE_WIDTH = 200
BASE_HEIGHT = 200
BASE_FONT_SIZE = 42
BASE_LINE_WIDTH = 5
SCREEN_MARGIN = 64
BASE_PADDING = 24
BG_OPACITY = 0.85
BG_CORNER_RADIUS = 8
MUTE_OPACITY = 0.2
TEXT_OPACITY = 0.8
NUM_BARS = 16
def __init__(self, volctl):
super().__init__()
self._volctl = volctl
self.position = (-self.SCREEN_MARGIN, -self.SCREEN_MARGIN)
scale = self._volctl.settings.get_int("osd-scale") / 100
self._width = int(self.BASE_WIDTH * scale)
self._height = int(self.BASE_HEIGHT * scale)
self._font_size = int(self.BASE_FONT_SIZE * scale)
self._line_width = self.BASE_LINE_WIDTH * scale
self._padding = int(self.BASE_PADDING * scale)
self._corner_radius = int(self.BG_CORNER_RADIUS * scale)
self.set_default_size(self._width, self._height)
self._volume = 0
self._mute = False
self._hide_timeout = None
self._fadeout_timeout = None
self._opacity = 1.0
self.set_decorated(False)
self.stick()
self.set_skip_taskbar_hint(True)
self.set_skip_pager_hint(True)
self.set_keep_above(True)
self.set_type_hint(Gdk.WindowTypeHint.NOTIFICATION)
self.set_resizable(False)
self.screen = self.get_screen()
self.visual = self.screen.get_rgba_visual()
if self.visual is not None and self.screen.is_composited():
self._compositing = True
self.set_visual(self.visual)
else:
self._compositing = False
self.set_app_paintable(True)
self.connect("draw", self._draw_osd)
self.realize()
self.get_window().set_override_redirect(True)
self._move_to_corner()
Gtk.Window.show(self)
self._make_window_clickthrough()
def update_values(self, volume, mute):
"""Remember current volume and mute values."""
self._volume = volume
self._mute = mute
self._unhide()
if self._hide_timeout is not None:
GLib.Source.remove(self._hide_timeout)
self._hide_timeout = GLib.timeout_add(
self._volctl.settings.get_int("osd-timeout"), self._cb_hide_timeout
)
def _move_to_corner(self):
xpos, ypos = self._compute_position()
if xpos < 0: # Negative X position is counted from right border
xpos = Gdk.Screen.width() - self.get_allocated_width() + xpos + 1
if ypos < 0: # Negative Y position is counted from bottom border
ypos = Gdk.Screen.height() - self.get_allocated_height() + ypos + 1
self.move(xpos, ypos)
def _draw_osd(self, _, cairo_r):
"""Draw on-screen volume display."""
mute_opacity = self.MUTE_OPACITY if self._mute else 1.0
xcenter = self._width / 2
# Background
deg = math.pi / 180.0
cairo_r.new_sub_path()
cairo_r.arc(
self._width - self._corner_radius,
self._corner_radius,
self._corner_radius,
-90 * deg,
0,
)
cairo_r.arc(
self._width - self._corner_radius,
self._height - self._corner_radius,
self._corner_radius,
0,
90 * deg,
)
cairo_r.arc(
self._corner_radius,
self._height - self._corner_radius,
self._corner_radius,
90 * deg,
180 * deg,
)
cairo_r.arc(
self._corner_radius,
self._corner_radius,
self._corner_radius,
180 * deg,
270 * deg,
)
cairo_r.close_path()
cairo_r.set_source_rgba(0.1, 0.1, 0.1, self.BG_OPACITY * self._opacity)
cairo_r.set_operator(cairo.OPERATOR_SOURCE)
cairo_r.fill()
cairo_r.set_operator(cairo.OPERATOR_OVER)
# Color
cairo_r.set_source_rgba(
1.0, 1.0, 1.0, self.TEXT_OPACITY * mute_opacity * self._opacity
)
# Text
text = "{:d} %".format(round(100 * self._volume))
cairo_r.select_font_face("sans-serif")
cairo_r.set_font_size(self._font_size)
_, _, text_width, text_height, _, _ = cairo_r.text_extents(text)
cairo_r.move_to(xcenter - text_width / 2, self._height - self._padding)
cairo_r.show_text(text)
# Volume indicator
ind_height = self._height - 3 * self._padding - text_height
outer_radius = ind_height / 2
inner_radius = outer_radius / 1.618
bars = min(round(self.NUM_BARS * self._volume), self.NUM_BARS)
cairo_r.set_line_width(self._line_width)
cairo_r.set_line_cap(cairo.LINE_CAP_ROUND)
for i in range(bars):
cairo_r.identity_matrix()
cairo_r.translate(xcenter, self._padding + ind_height / 2)
cairo_r.rotate(math.pi + 2 * math.pi / self.NUM_BARS * i)
cairo_r.move_to(0.0, -inner_radius)
cairo_r.line_to(0.0, -outer_radius)
cairo_r.stroke()
def _compute_position(self):
"""Adjusts position for currently active screen (display)."""
xpos, ypos = self.position
width, height = self._get_window_size()
geometry = self._get_active_screen_geometry()
if geometry:
if xpos < 0:
xpos = xpos + geometry.x + geometry.width - width
else:
xpos = xpos + geometry.x
if ypos < 0:
ypos = ypos + geometry.y + geometry.height - height
else:
ypos = geometry.y + ypos
return xpos, ypos
def _make_window_clickthrough(self):
"""Make events pass through window."""
dpy = X.Display(hash(GdkX11.x11_get_default_xdisplay()))
try:
xid = self.get_window().get_xid()
except AttributeError:
# Probably on Wayland
return
win = X.XID(xid)
reg = X.create_region(dpy, None, 0)
X.set_window_shape_region(dpy, win, X.SHAPE_BOUNDING, 0, 0, 0)
X.set_window_shape_region(dpy, win, X.SHAPE_INPUT, 0, 0, reg)
X.destroy_region(dpy, reg)
def _get_active_screen_geometry(self):
"""
Returns geometry of active screen or None if active screen
cannot be determined.
"""
screen = self.get_window().get_screen()
active_window = screen.get_active_window()
if active_window:
monitor = screen.get_monitor_at_window(active_window)
if monitor is not None:
return screen.get_monitor_geometry(monitor)
return None
def _get_window_size(self):
return self.get_window().get_width(), self.get_window().get_height()
def _hide(self):
if self._compositing:
self._fadeout_timeout = GLib.timeout_add(30, self._cb_fadeout_timeout)
else:
self.destroy()
def _unhide(self):
if self._fadeout_timeout is not None:
GLib.Source.remove(self._fadeout_timeout)
self._fadeout_timeout = None
self._move_to_corner()
self._opacity = 1.0
self.queue_draw()
def _cb_fadeout_timeout(self):
self._opacity -= 0.05
self.queue_draw()
if self._opacity >= 0:
return True
self._opacity = 0.0
self._fadeout_timeout = None
self.destroy()
return False
def _cb_hide_timeout(self):
self._hide_timeout = None
self._hide()
|
gpl-2.0
| 1,739,896,336,297,428,500 | 32.100418 | 82 | 0.562761 | false |
classcat/cc-prism-nsm
|
main/webapp/ccp_conf.py
|
1
|
1509
|
#############################################################
# ClassCat(R) Prism for HIDS
# Copyright (C) 2015 ClassCat Co.,Ltd. All rights reseerved.
##############################################################
# === Notice ===
# all python scripts were written by masao (@classcat.com)
#
# === History ===
# 02-aug-15 : fixed for beta.
#
import os.path
ccp_conf = {
'lang' : 'ja',
'username' : 'cc-admin',
'password' : 'ClassCat',
'myip' : '192.168.0.50',
'bro_dir' : "/usr/local/bro",
# Ossec directory
'ossec_dir' : "/var/ossec",
# Maximum alerts per page
'ossec_max_alerts_per_page' : 1000,
# Default search values
'ossec_search_level' : 7,
'ossec_search_time' : 14400,
'ossec_refresh_time' : 90
}
class CCPConf (object):
def __init__(self):
self.lang = ccp_conf['lang']
self.username = ccp_conf['username']
self.password = ccp_conf['password']
self.myip = ccp_conf['myip']
self.bro_dir = ccp_conf['bro_dir']
"""
self.ossec_dir = ccp_conf['ossec_dir']
self.ossec_max_alerts_per_page = ccp_conf['ossec_max_alerts_per_page']
self.ossec_search_level = ccp_conf['ossec_search_level' ]
self.ossec_search_time = ccp_conf['ossec_search_time']
self.ossec_refresh_time = ccp_conf['ossec_refresh_time']
"""
def check_dir(self):
if os.path.exists(self.ossec_dir):
return True
else:
return False
|
agpl-3.0
| 7,328,809,626,072,071,000 | 22.578125 | 78 | 0.526839 | false |
shoopio/shoop
|
shuup/core/pricing/_price_display_options.py
|
2
|
1631
|
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
class PriceDisplayOptions(object):
"""
Price display options.
Parameters on how prices should be rendered.
"""
def __init__(self, include_taxes=None, show_prices=True):
"""
Initialize price display options.
:type include_taxes: bool|None
:param include_taxes:
Whether include taxes to rendered prices or not. If None,
show prices in their original taxness.
:type show_prices: bool
:param show_prices:
Whether show prices at all.
"""
self.include_taxes = include_taxes
self.show_prices = show_prices
@property
def hide_prices(self):
return not self.show_prices
@classmethod
def from_context(cls, context):
"""
Get price display options from context.
:type context: jinja2.runtime.Context|dict
:rtype: PriceDisplayOptions
"""
options = context.get('price_display_options')
if options is None:
request = context.get('request') # type: django.http.HttpRequest
options = getattr(request, 'price_display_options', None)
if options is None:
options = cls()
return options
def set_for_request(self, request):
"""
Set price display options of given request to self.
"""
request.price_display_options = self
|
agpl-3.0
| 3,897,043,345,657,898,500 | 27.614035 | 77 | 0.618639 | false |
mohjaba/Health-Activity-Monitoring
|
1_code/interactive_map/Demo1/twitter_stream.py
|
1
|
1207
|
import tweepy
import json
from pymongo import Connection
from bson import json_util
from tweepy.utils import import_simplejson
json = import_simplejson()
mongocon = Connection()
db = mongocon.twiter_data
col = db.tweets_stream
consumer_key = 'AYh6x5HIt5ubprSXeEGVqmLnT'
consumer_secret = 'wx4wC2ttf3hS34iGPMeL6VAifwIZ7AOCbqT9Z4Vri0ZQPDhQrF'
access_token_key = '92291123-Wysxd5FnzMAKZWBQzFlExWRrq6kSduWd78J9TYlpk'
access_token_secret = 'stP7FrAwohkVmo3HYZ3oRUqm1jcgvVK9rZubEVX7Tlreq'
auth1 = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth1.set_access_token(access_token_key, access_token_secret)
class StreamListener(tweepy.StreamListener):
mongocon = Connection()
db = mongocon.tstream
col = db.tweets
json = import_simplejson()
def on_status(self, tweet):
print 'Ran on_status'
def on_error(self, status_code):
return False
def on_data(self, data):
if data[0].isdigit():
pass
else:
col.insert(json.loads(data))
print(json.loads(data))
l = StreamListener()
streamer = tweepy.Stream(auth=auth1, listener=l)
setTerms = ['#CrossFit', '#loseit', 'twitter']
streamer.filter(track = setTerms)
|
gpl-2.0
| -1,814,583,310,914,595,000 | 25.822222 | 71 | 0.720795 | false |
EmrysChe/My_Python_practice
|
codes/extra/backup_script.py
|
1
|
1043
|
#!/usr/bin/python3
###################################
# for Linux #
###################################
import os
import time
source_dir = ['/root/data']
target_dir = '/root/backup'
today_log_dir = target_dir + os.sep + time.strftime('%y%m%d')
success_count = 0
fail_count = 0
if not os.path.exists(today_log_dir):
os.makedirs(today_log_dir)
for path in source_dir:
if not os.path.exists(path):
print('{0} does not exist'.format(path))
fail_count = fail_count + 1
continue
else:
tmp = path.split('/')
backup_file = today_log_dir + os.sep + tmp[len(tmp) - 1] + '_' + time.strftime('%H%M%S') + '.tar.gz'
cmd = 'tar -zcPf ' + backup_file + ' ' + path
if os.system(cmd) == 0:
print('%r done' % cmd)
success_count = success_count + 1
else:
print('%r failed' % cmd)
fail_count = fail_count + 1
print('backup:{0} have done.{1} succeeded,{2} failed.'.format(len(source_dir),success_count,fail_count))
|
gpl-2.0
| -5,788,582,095,096,189,000 | 31.59375 | 108 | 0.520614 | false |
theaeolianmachine/hokiefinder
|
hokiefinder/geolocator/google_geocoding.py
|
1
|
1103
|
import json
import requests
GEOCODING_URL = 'http://maps.googleapis.com/maps/api/geocode/json'
ZERO_RESULTS = 'ZERO_RESULTS'
OVER_QUERY_LIMIT = 'OVER_QUERY_LIMIT'
REQUEST_DENIED = 'REQUEST_DENIED'
INVALID_REQUEST = 'INVALID_REQUEST'
ERRORS = (ZERO_RESULTS, OVER_QUERY_LIMIT, REQUEST_DENIED, INVALID_REQUEST)
def get_location(address):
params = {'address': address, 'sensor': 'false'}
headers = {'accept-encoding': 'gzip'}
response = requests.get(GEOCODING_URL, params=params, headers=headers)
json_dict = response.json()
status = json_dict['status']
if status in ERRORS:
print status
return None
else:
if len(json_dict['results']) > 0:
result = json_dict['results'][0] # Always use first result
try:
addr = result['formatted_address']
lat = result['geometry']['location']['lat']
lng = result['geometry']['location']['lng']
except KeyError:
return None
else:
return (addr, lat, lng)
else:
return None
|
mit
| 1,528,149,819,978,163,500 | 32.424242 | 74 | 0.596555 | false |
naturali/tensorflow
|
tensorflow/python/framework/tensor_shape.py
|
1
|
26539
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper classes for tensor shape inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.util import compat
class Dimension(object):
"""Represents the value of one dimension in a TensorShape."""
def __init__(self, value):
"""Creates a new Dimension with the given value."""
if value is None:
self._value = None
else:
self._value = int(value)
if (not isinstance(value, compat.bytes_or_text_types)
and self._value != value):
raise ValueError("Ambiguous dimension: %s" % value)
if self._value < 0:
raise ValueError("Dimension %d must be >= 0" % self._value)
def __repr__(self):
return "Dimension(%s)" % repr(self._value)
def __str__(self):
value = self._value
return "?" if value is None else str(value)
def __eq__(self, other):
"""Returns true if `other` has the same known value as this Dimension."""
try:
other = as_dimension(other)
except ValueError:
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value == other.value
def __ne__(self, other):
"""Returns true if `other` has a different known value from `self`."""
try:
other = as_dimension(other)
except ValueError:
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value != other.value
def __int__(self):
return self._value
def __index__(self):
# Allow use in Python 3 range
return self._value
@property
def value(self):
"""The value of this dimension, or None if it is unknown."""
return self._value
def is_compatible_with(self, other):
"""Returns true if `other` is compatible with this Dimension.
Two known Dimensions are compatible if they have the same value.
An unknown Dimension is compatible with all other Dimensions.
Args:
other: Another Dimension.
Returns:
True if this Dimension and `other` are compatible.
"""
other = as_dimension(other)
return (self._value is None
or other.value is None
or self._value == other.value)
def assert_is_compatible_with(self, other):
"""Raises an exception if `other` is not compatible with this Dimension.
Args:
other: Another Dimension.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
if not self.is_compatible_with(other):
raise ValueError("Dimensions %s and %s are not compatible"
% (self, other))
def merge_with(self, other):
"""Returns a Dimension that combines the information in `self` and `other`.
Dimensions are combined as follows:
Dimension(n) .merge_with(Dimension(n)) == Dimension(n)
Dimension(n) .merge_with(Dimension(None)) == Dimension(n)
Dimension(None).merge_with(Dimension(n)) == Dimension(n)
Dimension(None).merge_with(Dimension(None)) == Dimension(None)
Dimension(n) .merge_with(Dimension(m)) raises ValueError for n != m
Args:
other: Another Dimension.
Returns:
A Dimension containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
other = as_dimension(other)
self.assert_is_compatible_with(other)
if self._value is None:
return Dimension(other.value)
else:
return Dimension(self._value)
def __add__(self, other):
"""Returns the sum of `self` and `other`.
Dimensions are summed as follows:
Dimension(m) + Dimension(n) == Dimension(m + n)
Dimension(m) + Dimension(None) == Dimension(None)
Dimension(None) + Dimension(n) == Dimension(None)
Dimension(None) + Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value + other.value)
def __sub__(self, other):
"""Returns the subtraction of `other` from `self`.
Dimensions are subtracted as follows:
Dimension(m) - Dimension(n) == Dimension(m - n)
Dimension(m) - Dimension(None) == Dimension(None)
Dimension(None) - Dimension(n) == Dimension(None)
Dimension(None) - Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the subtraction of sum of `other` from `self`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value - other.value)
def __mul__(self, other):
"""Returns the product of `self` and `other`.
Dimensions are summed as follows:
Dimension(m) * Dimension(n) == Dimension(m * n)
Dimension(m) * Dimension(None) == Dimension(None)
Dimension(None) * Dimension(n) == Dimension(None)
Dimension(None) * Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value * other.value)
def __floordiv__(self, other):
"""Returns the quotient of `self` and `other` rounded down.
Dimensions are divided as follows:
Dimension(m) // Dimension(n) == Dimension(m // n)
Dimension(m) // Dimension(None) == Dimension(None)
Dimension(None) // Dimension(n) == Dimension(None)
Dimension(None) // Dimension(None) == Dimension(None)
Args:
other: Another `Dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value // other.value)
def __div__(self, other):
"""DEPRECATED: Use `__floordiv__` via `x // y` instead.
This function exists only for backwards compatibility purposes; new code
should use `__floordiv__` via the syntax `x // y`. Using `x // y`
communicates clearly that the result rounds down, and is forward compatible
to Python 3.
Args:
other: Another `Dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
return self // other
def __mod__(self, other):
"""Returns `self` modulo `other.
Dimension moduli are computed as follows:
Dimension(m) % Dimension(n) == Dimension(m % n)
Dimension(m) % Dimension(None) == Dimension(None)
Dimension(None) % Dimension(n) == Dimension(None)
Dimension(None) % Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is `self` modulo `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value % other.value)
def __lt__(self, other):
"""Returns True if `self` is known to be less than `other`.
Dimensions are compared as follows:
Dimension(m) < Dimension(n) == m < n
Dimension(m) < Dimension(None) == None
Dimension(None) < Dimension(n) == None
Dimension(None) < Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value < other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value < other.value
def __le__(self, other):
"""Returns True if `self` is known to be less than or equal to `other`.
Dimensions are compared as follows:
Dimension(m) <= Dimension(n) == m <= n
Dimension(m) <= Dimension(None) == None
Dimension(None) <= Dimension(n) == None
Dimension(None) <= Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value <= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value <= other.value
def __gt__(self, other):
"""Returns True if `self` is known to be greater than `other`.
Dimensions are compared as follows:
Dimension(m) > Dimension(n) == m > n
Dimension(m) > Dimension(None) == None
Dimension(None) > Dimension(n) == None
Dimension(None) > Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value > other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value > other.value
def __ge__(self, other):
"""Returns True if `self` is known to be greater than or equal to `other`.
Dimensions are compared as follows:
Dimension(m) >= Dimension(n) == m >= n
Dimension(m) >= Dimension(None) == None
Dimension(None) >= Dimension(n) == None
Dimension(None) >= Dimension(None) == None
Args:
other: Another Dimension.
Returns:
The value of `self.value >= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value >= other.value
def as_dimension(value):
"""Converts the given value to a Dimension.
A Dimenson input will be returned unmodified.
An input of `None` will be converted to an unknown Dimension.
An integer input will be converted to a Dimension with that value.
Args:
value: The value to be converted.
Returns:
A Dimension corresponding to the given value.
"""
if isinstance(value, Dimension):
return value
else:
return Dimension(value)
class TensorShape(object):
"""Represents the shape of a `Tensor`.
A `TensorShape` represents a possibly-partial shape specification for a
`Tensor`. It may be one of the following:
* *Fully-known shape:* has a known number of dimensions and a known size
for each dimension.
* *Partially-known shape:* has a known number of dimensions, and an unknown
size for one or more dimension.
* *Unknown shape:* has an unknown number of dimensions, and an unknown
size in all dimensions.
If a tensor is produced by an operation of type `"Foo"`, its shape
may be inferred if there is a registered shape function for
`"Foo"`. See [`tf.RegisterShape()`](../../api_docs/python/framework.md#RegisterShape)
for details of shape
functions and how to register them. Alternatively, the shape may be set
explicitly using [`Tensor.set_shape()`](../../api_docs/python/framework.md#Tensor.set_shape).
@@merge_with
@@concatenate
@@ndims
@@dims
@@as_list
@@as_proto
@@is_compatible_with
@@is_fully_defined
@@with_rank
@@with_rank_at_least
@@with_rank_at_most
@@assert_has_rank
@@assert_same_rank
@@assert_is_compatible_with
@@assert_is_fully_defined
"""
def __init__(self, dims):
"""Creates a new TensorShape with the given dimensions.
Args:
dims: A list of Dimensions, or None if the shape is unspecified.
DEPRECATED: A single integer is treated as a singleton list.
Raises:
TypeError: If dims cannot be converted to a list of dimensions.
"""
# TODO(irving): Eliminate the single integer special case.
if dims is None:
self._dims = None
elif isinstance(dims, compat.bytes_or_text_types):
raise TypeError("A string has ambiguous TensorShape, please wrap in a "
"list or convert to an int: %s" % dims)
elif isinstance(dims, tensor_shape_pb2.TensorShapeProto):
if dims.unknown_rank:
self._dims = None
else:
self._dims = [
# Protos store variable-size dimensions as -1
as_dimension(dim.size if dim.size != -1 else None)
for dim in dims.dim]
elif isinstance(dims, TensorShape):
self._dims = dims.dims
else:
try:
dims_iter = iter(dims)
except TypeError:
# Treat as a singleton dimension
self._dims = [as_dimension(dims)]
else:
# Got a list of dimensions
self._dims = [as_dimension(d) for d in dims_iter]
def __repr__(self):
return "TensorShape(%r)" % self._dims
def __str__(self):
if self.ndims is None:
return "<unknown>"
elif self.ndims == 1:
return "(%s,)" % self._dims[0]
else:
return "(%s)" % ", ".join(str(d) for d in self._dims)
@property
def dims(self):
"""Returns a list of Dimensions, or None if the shape is unspecified."""
return self._dims
@property
def ndims(self):
"""Returns the rank of this shape, or None if it is unspecified."""
if self._dims is None:
return None
else:
return len(self._dims)
def __len__(self):
"""Returns the rank of this shape, or raises ValueError if unspecified."""
if self._dims is None:
raise ValueError("Cannot take the length of Shape with unknown rank.")
return len(self._dims)
def __bool__(self):
"""Returns True if this shape contains non-zero information."""
return self._dims is not None
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __iter__(self):
"""Returns `self.dims` if the rank is known, otherwise raises ValueError."""
if self._dims is None:
raise ValueError("Cannot iterate over a shape with unknown rank.")
else:
return iter(self._dims)
def __getitem__(self, key):
"""Returns the value of a dimension or a shape, depending on the key.
Args:
key: If `key` is an integer, returns the dimension at that index;
otherwise if `key` is a slice, returns a TensorShape whose
dimensions are those selected by the slice from `self`.
Returns:
A dimension if `key` is an integer, or a `TensorShape` if `key` is a
slice.
Raises:
ValueError: If `key` is a slice, and any of its elements are negative, or
if `self` is completely unknown and the step is set.
"""
if self._dims is not None:
if isinstance(key, slice):
return TensorShape(self._dims[key])
else:
return self._dims[key]
else:
if isinstance(key, slice):
start = key.start if key.start is not None else 0
stop = key.stop
if key.step is not None:
# TODO(mrry): Handle these maybe.
raise ValueError("Steps are not yet handled")
if stop is None:
# NOTE(mrry): This implies that TensorShape(None) is compatible with
# TensorShape(None)[1:], which is obviously not true. It would be
# possible to track the number of dimensions symbolically,
# and perhaps we should do that.
return unknown_shape()
elif start < 0 or stop < 0:
# TODO(mrry): Handle this better, as it will be useful for handling
# suffixes of otherwise unknown shapes.
return unknown_shape()
else:
return unknown_shape(ndims=stop-start)
else:
return Dimension(None)
def num_elements(self):
"""Returns the total number of elements, or none for incomplete shapes."""
if self.is_fully_defined():
size = 1
for dim in self._dims:
size *= dim.value
return size
else:
return None
def merge_with(self, other):
"""Returns a `TensorShape` combining the information in `self` and `other`.
The dimensions in `self` and `other` are merged elementwise,
according to the rules defined for `Dimension.merge_with()`.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible.
"""
other = as_shape(other)
if self._dims is None:
return other
else:
try:
self.assert_same_rank(other)
new_dims = []
for i, dim in enumerate(self._dims):
new_dims.append(dim.merge_with(other[i]))
return TensorShape(new_dims)
except ValueError:
raise ValueError("Shapes %s and %s are not compatible" %
(self, other))
def concatenate(self, other):
"""Returns the concatenation of the dimension in `self` and `other`.
*N.B.* If either `self` or `other` is completely unknown,
concatenation will discard information about the other shape. In
future, we might support concatenation that preserves this
information for use with slicing.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` whose dimensions are the concatenation of the
dimensions in `self` and `other`.
"""
# TODO(mrry): Handle the case where we concatenate a known shape with a
# completely unknown shape, so that we can use the partial information.
other = as_shape(other)
if self._dims is None or other.dims is None:
return unknown_shape()
else:
return TensorShape(self._dims + other.dims)
def assert_same_rank(self, other):
"""Raises an exception if `self` and `other` do not have compatible ranks.
Args:
other: Another `TensorShape`.
Raises:
ValueError: If `self` and `other` do not represent shapes with the
same rank.
"""
other = as_shape(other)
if self.ndims is not None and other.ndims is not None:
if self.ndims != other.ndims:
raise ValueError(
"Shapes %s and %s must have the same rank" % (self, other))
def assert_has_rank(self, rank):
"""Raises an exception if `self` is not compatible with the given `rank`.
Args:
rank: An integer.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
if self.ndims not in (None, rank):
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank(self, rank):
"""Returns a shape based on `self` with the given rank.
This method promotes a completely unknown shape to one with a
known rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with the given rank.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
try:
return self.merge_with(unknown_shape(ndims=rank))
except ValueError:
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank_at_least(self, rank):
"""Returns a shape based on `self` with at least the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at least the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at least the given
`rank`.
"""
if self.ndims is not None and self.ndims < rank:
raise ValueError("Shape %s must have rank at least %d" % (self, rank))
else:
return self
def with_rank_at_most(self, rank):
"""Returns a shape based on `self` with at most the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at most the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at most the given
`rank`.
"""
if self.ndims is not None and self.ndims > rank:
raise ValueError("Shape %s must have rank at most %d" % (self, rank))
else:
return self
def is_compatible_with(self, other):
"""Returns True iff `self` is compatible with `other`.
Two possibly-partially-defined shapes are compatible if there
exists a fully-defined shape that both shapes can represent. Thus,
compatibility allows the shape inference code to reason about
partially-defined shapes. For example:
* TensorShape(None) is compatible with all shapes.
* TensorShape([None, None]) is compatible with all two-dimensional
shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is
not compatible with, for example, TensorShape([None]) or
TensorShape([None, None, None]).
* TensorShape([32, None]) is compatible with all two-dimensional shapes
with size 32 in the 0th dimension, and also TensorShape([None, None])
and TensorShape(None). It is not compatible with, for example,
TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]).
* TensorShape([32, 784]) is compatible with itself, and also
TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None,
None]) and TensorShape(None). It is not compatible with, for example,
TensorShape([32, 1, 784]) or TensorShape([None]).
The compatibility relation is reflexive and symmetric, but not
transitive. For example, TensorShape([32, 784]) is compatible with
TensorShape(None), and TensorShape(None) is compatible with
TensorShape([4, 4]), but TensorShape([32, 784]) is not compatible with
TensorShape([4, 4]).
Args:
other: Another TensorShape.
Returns:
True iff `self` is compatible with `other`.
"""
other = as_shape(other)
if self._dims is not None and other.dims is not None:
if self.ndims != other.ndims:
return False
for x_dim, y_dim in zip(self._dims, other.dims):
if not x_dim.is_compatible_with(y_dim):
return False
return True
def assert_is_compatible_with(self, other):
"""Raises exception if `self` and `other` do not represent the same shape.
This method can be used to assert that there exists a shape that both
`self` and `other` represent.
Args:
other: Another TensorShape.
Raises:
ValueError: If `self` and `other` do not represent the same shape.
"""
if not self.is_compatible_with(other):
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
def is_fully_defined(self):
"""Returns True iff `self` is fully defined in every dimension."""
return (self._dims is not None
and all(dim.value is not None for dim in self._dims))
def assert_is_fully_defined(self):
"""Raises an exception if `self` is not fully defined in every dimension.
Raises:
ValueError: If `self` does not have a known value for every dimension.
"""
if not self.is_fully_defined():
raise ValueError("Shape %s is not fully defined" % self)
def as_list(self):
"""Returns a list of integers or `None` for each dimension.
Returns:
A list of integers or `None` for each dimension.
Raises:
ValueError: If `self` is an unknown shape with an unknown rank.
"""
if self._dims is None:
raise ValueError("as_list() is not defined on an unknown TensorShape.")
return [dim.value for dim in self._dims]
def as_proto(self):
"""Returns this shape as a `TensorShapeProto`."""
if self._dims is None:
return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)
else:
return tensor_shape_pb2.TensorShapeProto(dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=-1 if d.value is None else d.value)
for d in self._dims])
def __eq__(self, other):
"""Returns True if `self` is equivalent to `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
return self._dims == other.dims
def __ne__(self, other):
"""Returns True if `self` is known to be different from `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
if self.ndims is None or other.ndims is None:
raise ValueError("The inequality of unknown TensorShapes is undefined.")
if self.ndims != other.ndims:
return True
return self._dims != other.dims
def as_shape(shape):
"""Converts the given object to a TensorShape."""
if isinstance(shape, TensorShape):
return shape
else:
return TensorShape(shape)
def unknown_shape(ndims=None):
"""Returns an unknown TensorShape, optionally with a known rank.
Args:
ndims: (Optional) If specified, the number of dimensions in the shape.
Returns:
An unknown TensorShape.
"""
if ndims is None:
return TensorShape(None)
else:
return TensorShape([Dimension(None)] * ndims)
def scalar():
"""Returns a shape representing a scalar."""
return TensorShape([])
def vector(length):
"""Returns a shape representing a vector.
Args:
length: The length of the vector, which may be None if unknown.
Returns:
A TensorShape representing a vector of the given length.
"""
return TensorShape([length])
def matrix(rows, cols):
"""Returns a shape representing a matrix.
Args:
rows: The number of rows in the matrix, which may be None if unknown.
cols: The number of columns in the matrix, which may be None if unknown.
Returns:
A TensorShape representing a matrix of the given size.
"""
return TensorShape([rows, cols])
|
apache-2.0
| -1,721,125,840,426,754,000 | 29.859302 | 95 | 0.637778 | false |
eharney/cinder
|
cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py
|
1
|
289881
|
# Copyright (c) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
from copy import deepcopy
import datetime
import tempfile
import time
from xml.dom import minidom
import mock
import requests
import six
from cinder import context
from cinder import exception
from cinder.objects import fields
from cinder.objects import group
from cinder.objects import group_snapshot
from cinder.objects import volume_type
from cinder import test
from cinder.tests.unit import fake_group
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.volume.drivers.dell_emc.vmax import common
from cinder.volume.drivers.dell_emc.vmax import fc
from cinder.volume.drivers.dell_emc.vmax import iscsi
from cinder.volume.drivers.dell_emc.vmax import masking
from cinder.volume.drivers.dell_emc.vmax import provision
from cinder.volume.drivers.dell_emc.vmax import rest
from cinder.volume.drivers.dell_emc.vmax import utils
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
from cinder.zonemanager import utils as fczm_utils
CINDER_EMC_CONFIG_DIR = '/etc/cinder/'
class VMAXCommonData(object):
# array info
array = '000197800123'
srp = 'SRP_1'
srp2 = 'SRP_2'
slo = 'Diamond'
workload = 'DSS'
port_group_name_f = 'OS-fibre-PG'
port_group_name_i = 'OS-iscsi-PG'
masking_view_name_f = 'OS-HostX-F-OS-fibre-PG-MV'
masking_view_name_i = 'OS-HostX-SRP_1-I-OS-iscsi-PG-MV'
initiatorgroup_name_f = 'OS-HostX-F-IG'
initiatorgroup_name_i = 'OS-HostX-I-IG'
parent_sg_f = 'OS-HostX-F-OS-fibre-PG-SG'
parent_sg_i = 'OS-HostX-I-OS-iscsi-PG-SG'
storagegroup_name_f = 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG'
storagegroup_name_i = 'OS-HostX-SRP_1-Diamond-DSS-OS-iscsi-PG'
defaultstoragegroup_name = 'OS-SRP_1-Diamond-DSS-SG'
default_sg_no_slo = 'OS-no_SLO-SG'
default_sg_compr_disabled = 'OS-SRP_1-Diamond-DSS-CD-SG'
default_sg_re_enabled = 'OS-SRP_1-Diamond-DSS-RE-SG'
failed_resource = 'OS-failed-resource'
fake_host = 'HostX@Backend#Diamond+DSS+SRP_1+000197800123'
new_host = 'HostX@Backend#Silver+OLTP+SRP_1+000197800123'
version = '3.0.0'
volume_wwn = '600000345'
remote_array = '000197800124'
device_id = '00001'
device_id2 = '00002'
rdf_group_name = '23_24_007'
rdf_group_no = '70'
u4v_version = '84'
storagegroup_name_source = 'Grp_source_sg'
storagegroup_name_target = 'Grp_target_sg'
group_snapshot_name = 'Grp_snapshot'
target_group_name = 'Grp_target'
storagegroup_name_with_id = 'GrpId_group_name'
# connector info
wwpn1 = "123456789012345"
wwpn2 = "123456789054321"
wwnn1 = "223456789012345"
initiator = 'iqn.1993-08.org.debian: 01: 222'
ip = u'123.456.7.8'
iqn = u'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000001,t,0x0001'
connector = {'ip': ip,
'initiator': initiator,
'wwpns': [wwpn1, wwpn2],
'wwnns': [wwnn1],
'host': 'HostX'}
fabric_name_prefix = "fakeFabric"
end_point_map = {connector['wwpns'][0]: [wwnn1],
connector['wwpns'][1]: [wwnn1]}
target_wwns = [wwnn1]
zoning_mappings = {
'array': u'000197800123',
'init_targ_map': end_point_map,
'initiator_group': initiatorgroup_name_f,
'port_group': port_group_name_f,
'target_wwns': target_wwns}
device_map = {}
for wwn in connector['wwpns']:
fabric_name = ''.join([fabric_name_prefix,
wwn[-2:]])
target_wwn = wwn[::-1]
fabric_map = {'initiator_port_wwn_list': [wwn],
'target_port_wwn_list': [target_wwn]
}
device_map[fabric_name] = fabric_map
iscsi_device_info = {'maskingview': masking_view_name_i,
'ip_and_iqn': [{'ip': ip,
'iqn': initiator}],
'is_multipath': True,
'array': array,
'controller': {'host': '10.00.00.00'},
'hostlunid': 3}
fc_device_info = {'maskingview': masking_view_name_f,
'array': array,
'controller': {'host': '10.00.00.00'},
'hostlunid': 3}
# cinder volume info
ctx = context.RequestContext('admin', 'fake', True)
provider_location = {'array': six.text_type(array),
'device_id': device_id}
provider_location2 = {'array': six.text_type(array),
'device_id': device_id2}
provider_location3 = {'array': six.text_type(remote_array),
'device_id': device_id2}
legacy_provider_location = {
'classname': 'Symm_StorageVolume',
'keybindings': {'CreationClassName': u'Symm_StorageVolume',
'SystemName': u'SYMMETRIX+000197800123',
'DeviceID': device_id,
'SystemCreationClassName': u'Symm_StorageSystem'}}
legacy_provider_location2 = {
'classname': 'Symm_StorageVolume',
'keybindings': {'CreationClassName': u'Symm_StorageVolume',
'SystemName': u'SYMMETRIX+000197800123',
'DeviceID': device_id2,
'SystemCreationClassName': u'Symm_StorageSystem'}}
test_volume_type = fake_volume.fake_volume_type_obj(
context=ctx
)
test_volume = fake_volume.fake_volume_obj(
context=ctx, name='vol1', size=2, provider_auth=None,
provider_location=six.text_type(provider_location),
volume_type=test_volume_type, host=fake_host,
replication_driver_data=six.text_type(provider_location3))
test_attached_volume = fake_volume.fake_volume_obj(
context=ctx, name='vol1', size=2, provider_auth=None,
provider_location=six.text_type(provider_location), host=fake_host,
volume_type=test_volume_type, attach_status="attached",
replication_driver_data=six.text_type(provider_location3))
test_legacy_vol = fake_volume.fake_volume_obj(
context=ctx, name='vol1', size=2, provider_auth=None,
provider_location=six.text_type(legacy_provider_location),
replication_driver_data=six.text_type(legacy_provider_location2),
host=fake_host, volume_type=test_volume_type)
test_clone_volume = fake_volume.fake_volume_obj(
context=ctx, name='vol1', size=2, provider_auth=None,
provider_location=six.text_type(provider_location2),
host=fake_host)
snapshot_id = '390eeb4d-0f56-4a02-ba14-167167967014'
test_snapshot_snap_name = 'OS-' + snapshot_id[:6] + snapshot_id[-9:]
snap_location = {'snap_name': test_snapshot_snap_name,
'source_id': device_id}
test_snapshot = fake_snapshot.fake_snapshot_obj(
context=ctx, id=snapshot_id,
name='my_snap', size=2,
provider_location=six.text_type(snap_location),
host=fake_host, volume=test_volume)
test_legacy_snapshot = fake_snapshot.fake_snapshot_obj(
context=ctx, id='8d38ccfc-3d29-454c-858b-8348a8f9cc95',
name='my_snap', size=2,
provider_location=six.text_type(legacy_provider_location),
host=fake_host, volume=test_volume)
test_failed_snap = fake_snapshot.fake_snapshot_obj(
context=ctx,
id='4732de9b-98a4-4b6d-ae4b-3cafb3d34220',
name=failed_resource,
size=2,
provider_location=six.text_type(snap_location),
host=fake_host, volume=test_volume)
location_info = {'location_info': '000197800123#SRP_1#Diamond#DSS',
'storage_protocol': 'FC'}
test_host = {'capabilities': location_info,
'host': fake_host}
# extra-specs
vol_type_extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123'}
vol_type_extra_specs_compr_disabled = {
'pool_name': u'Diamond+DSS+SRP_1+000197800123',
'storagetype:disablecompression': "true"}
vol_type_extra_specs_rep_enabled = {
'pool_name': u'Diamond+DSS+SRP_1+000197800123',
'replication_enabled': '<is> True'}
extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123',
'slo': slo,
'workload': workload,
'srp': srp,
'array': array,
'interval': 3,
'retries': 120}
extra_specs_disable_compression = deepcopy(extra_specs)
extra_specs_disable_compression[utils.DISABLECOMPRESSION] = "true"
extra_specs_intervals_set = deepcopy(extra_specs)
extra_specs_intervals_set['interval'] = 1
extra_specs_intervals_set['retries'] = 1
extra_specs_rep_enabled = deepcopy(extra_specs)
extra_specs_rep_enabled['replication_enabled'] = True
rep_extra_specs = deepcopy(extra_specs_rep_enabled)
rep_extra_specs['array'] = remote_array
rep_extra_specs['interval'] = 0
rep_extra_specs['retries'] = 0
rep_extra_specs['srp'] = srp2
test_volume_type_1 = volume_type.VolumeType(
id='2b06255d-f5f0-4520-a953-b029196add6a', name='abc',
extra_specs=extra_specs
)
test_volume_type_list = volume_type.VolumeTypeList(
objects=[test_volume_type_1])
test_vol_grp_name_id_only = 'ec870a2f-6bf7-4152-aa41-75aad8e2ea96'
test_vol_grp_name = 'Grp_source_sg_%s' % test_vol_grp_name_id_only
test_group_1 = group.Group(
context=None, name=storagegroup_name_source,
group_id='abc', size=1,
id=test_vol_grp_name_id_only,
status='available',
provider_auth=None, volume_type_ids=['abc'],
group_type_id='grptypeid',
volume_types=test_volume_type_list,
host=fake_host, provider_location=six.text_type(provider_location))
test_group_failed = group.Group(
context=None, name=failed_resource,
group_id='14b8894e-54ec-450a-b168-c172a16ed166',
size=1,
id='318c721c-51ad-4160-bfe1-ebde2273836f',
status='available',
provider_auth=None, volume_type_ids=['abc'],
group_type_id='grptypeid',
volume_types=test_volume_type_list,
host=fake_host, provider_location=six.text_type(provider_location))
test_group = fake_group.fake_group_obj(
context=ctx, name=storagegroup_name_source,
id='7634bda4-6950-436f-998c-37c3e01bad30', host=fake_host)
test_group_without_name = fake_group.fake_group_obj(
context=ctx,
name=None,
id=test_vol_grp_name_id_only,
host=fake_host)
test_group_snapshot_1 = group_snapshot.GroupSnapshot(
context=None, id='6560405d-b89a-4f79-9e81-ad1752f5a139',
group_id='876d9fbb-de48-4948-9f82-15c913ed05e7',
name=group_snapshot_name,
group_type_id='c6934c26-dde8-4bf8-a765-82b3d0130e9f',
status='available',
group=test_group_1)
test_group_snapshot_failed = group_snapshot.GroupSnapshot(
context=None, id='0819dd5e-9aa1-4ec7-9dda-c78e51b2ad76',
group_id='1fc735cb-d36c-4352-8aa6-dc1e16b5a0a7',
name=failed_resource,
group_type_id='6b70de13-98c5-46b2-8f24-e4e96a8988fa',
status='available',
group=test_group_failed)
# masking view dict
masking_view_dict = {
'array': array,
'connector': connector,
'device_id': device_id,
'init_group_name': initiatorgroup_name_f,
'initiator_check': False,
'maskingview_name': masking_view_name_f,
'parent_sg_name': parent_sg_f,
'srp': srp,
'storagetype:disablecompression': False,
utils.PORTGROUPNAME: port_group_name_f,
'slo': slo,
'storagegroup_name': storagegroup_name_f,
'volume_name': test_volume.name,
'workload': workload,
'replication_enabled': False}
masking_view_dict_no_slo = deepcopy(masking_view_dict)
masking_view_dict_no_slo.update(
{'slo': None, 'workload': None,
'storagegroup_name': 'OS-HostX-No_SLO-OS-fibre-PG'})
masking_view_dict_compression_disabled = deepcopy(masking_view_dict)
masking_view_dict_compression_disabled.update(
{'storagetype:disablecompression': True,
'storagegroup_name': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-CD'})
masking_view_dict_replication_enabled = deepcopy(masking_view_dict)
masking_view_dict_replication_enabled.update(
{'replication_enabled': True,
'storagegroup_name': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-RE'})
# vmax data
# sloprovisioning
compression_info = {"symmetrixId": ["000197800128"]}
inititiatorgroup = [{"initiator": [wwpn1],
"hostId": initiatorgroup_name_f,
"maskingview": [masking_view_name_f]},
{"initiator": [initiator],
"hostId": initiatorgroup_name_i,
"maskingview": [masking_view_name_i]}]
initiator_list = [{"host": initiatorgroup_name_f,
"initiatorId": wwpn1,
"maskingview": [masking_view_name_f]},
{"host": initiatorgroup_name_i,
"initiatorId": initiator,
"maskingview": [masking_view_name_i]},
{"initiatorId": [
"FA-1D:4:" + wwpn1,
"SE-4E:0:" + initiator]}]
maskingview = [{"maskingViewId": masking_view_name_f,
"portGroupId": port_group_name_f,
"storageGroupId": storagegroup_name_f,
"hostId": initiatorgroup_name_f,
"maskingViewConnection": [
{"host_lun_address": "0003"}]},
{"maskingViewId": masking_view_name_i,
"portGroupId": port_group_name_i,
"storageGroupId": storagegroup_name_i,
"hostId": initiatorgroup_name_i,
"maskingViewConnection": [
{"host_lun_address": "0003"}]},
{}]
portgroup = [{"portGroupId": port_group_name_f,
"symmetrixPortKey": [
{"directorId": "FA-1D",
"portId": "FA-1D:4"}],
"maskingview": [masking_view_name_f]},
{"portGroupId": port_group_name_i,
"symmetrixPortKey": [
{"directorId": "SE-4E",
"portId": "SE-4E:0"}],
"maskingview": [masking_view_name_i]}]
port_list = [
{"symmetrixPort": {"num_of_masking_views": 1,
"maskingview": [masking_view_name_f],
"identifier": wwnn1,
"symmetrixPortKey": {
"directorId": "FA-1D",
"portId": "4"},
"portgroup": [port_group_name_f]}},
{"symmetrixPort": {"identifier": initiator,
"symmetrixPortKey": {
"directorId": "SE-4E",
"portId": "0"},
"ip_addresses": [ip],
"num_of_masking_views": 1,
"maskingview": [masking_view_name_i],
"portgroup": [port_group_name_i]}}]
sg_details = [{"srp": srp,
"num_of_vols": 2,
"cap_gb": 2,
"storageGroupId": defaultstoragegroup_name,
"slo": slo,
"workload": workload},
{"srp": srp,
"num_of_vols": 2,
"cap_gb": 2,
"storageGroupId": storagegroup_name_f,
"slo": slo,
"workload": workload,
"maskingview": [masking_view_name_f],
"parent_storage_group": [parent_sg_f]},
{"srp": srp,
"num_of_vols": 2,
"cap_gb": 2,
"storageGroupId": storagegroup_name_i,
"slo": slo,
"workload": workload,
"maskingview": [masking_view_name_i],
"parent_storage_group": [parent_sg_i]},
{"num_of_vols": 2,
"cap_gb": 2,
"storageGroupId": parent_sg_f,
"num_of_child_sgs": 1,
"child_storage_group": [storagegroup_name_f],
"maskingview": [masking_view_name_f]},
{"num_of_vols": 2,
"cap_gb": 2,
"storageGroupId": parent_sg_i,
"num_of_child_sgs": 1,
"child_storage_group": [storagegroup_name_i],
"maskingview": [masking_view_name_i], }
]
sg_details_rep = [{"childNames": [],
"numDevicesNonGk": 2,
"isLinkTarget": False,
"rdf": False,
"capacityGB": 2.0,
"name": storagegroup_name_source,
"snapVXSnapshots": ['12345'],
"symmetrixId": array,
"numSnapVXSnapshots": 1}]
sg_list = {"storageGroupId": [storagegroup_name_f,
defaultstoragegroup_name]}
sg_list_rep = [storagegroup_name_with_id]
srp_details = {"srpSloDemandId": ["Bronze", "Diamond", "Gold",
"None", "Optimized", "Silver"],
"srpId": srp,
"total_allocated_cap_gb": 5244.7,
"total_usable_cap_gb": 20514.4,
"total_subscribed_cap_gb": 84970.1,
"reserved_cap_percent": 10}
volume_details = [{"cap_gb": 2,
"num_of_storage_groups": 1,
"volumeId": device_id,
"volume_identifier": "1",
"wwn": volume_wwn,
"snapvx_target": 'false',
"snapvx_source": 'false',
"storageGroupId": [defaultstoragegroup_name,
storagegroup_name_f]},
{"cap_gb": 1,
"num_of_storage_groups": 1,
"volumeId": device_id2,
"volume_identifier": "OS-2",
"wwn": '600012345',
"storageGroupId": [defaultstoragegroup_name,
storagegroup_name_f]}]
volume_list = [
{"resultList": {"result": [{"volumeId": device_id}]}},
{"resultList": {"result": [{"volumeId": device_id2}]}},
{"resultList": {"result": [{"volumeId": device_id},
{"volumeId": device_id2}]}}]
private_vol_details = {
"resultList": {
"result": [{
"timeFinderInfo": {
"snapVXSession": [
{"srcSnapshotGenInfo": [
{"snapshotHeader": {
"snapshotName": "temp-1",
"device": device_id},
"lnkSnapshotGenInfo": [
{"targetDevice": device_id2}]}]},
{"tgtSrcSnapshotGenInfo": {
"snapshotName": "temp-1",
"targetDevice": device_id2,
"sourceDevice": device_id}}],
"snapVXSrc": 'true',
"snapVXTgt": 'true'}}]}}
workloadtype = {"workloadId": ["OLTP", "OLTP_REP", "DSS", "DSS_REP"]}
slo_details = {"sloId": ["Bronze", "Diamond", "Gold",
"Optimized", "Platinum", "Silver"]}
# replication
volume_snap_vx = {"snapshotLnks": [],
"snapshotSrcs": [
{"generation": 0,
"linkedDevices": [
{"targetDevice": device_id2,
"percentageCopied": 100,
"state": "Copied",
"copy": True,
"defined": True,
"linked": True}],
"snapshotName": test_snapshot_snap_name,
"state": "Established"}]}
capabilities = {"symmetrixCapability": [{"rdfCapable": True,
"snapVxCapable": True,
"symmetrixId": "0001111111"},
{"symmetrixId": array,
"snapVxCapable": True,
"rdfCapable": True}]}
group_snap_vx = {"generation": 0,
"isLinked": False,
"numUniqueTracks": 0,
"isRestored": False,
"name": group_snapshot_name,
"numStorageGroupVolumes": 1,
"state": ["Established"],
"timeToLiveExpiryDate": "N/A",
"isExpired": False,
"numSharedTracks": 0,
"timestamp": "00:30:50 Fri, 02 Jun 2017 IST +0100",
"numSourceVolumes": 1
}
group_snap_vx_1 = {"generation": 0,
"isLinked": False,
"numUniqueTracks": 0,
"isRestored": False,
"name": group_snapshot_name,
"numStorageGroupVolumes": 1,
"state": ["Copied"],
"timeToLiveExpiryDate": "N/A",
"isExpired": False,
"numSharedTracks": 0,
"timestamp": "00:30:50 Fri, 02 Jun 2017 IST +0100",
"numSourceVolumes": 1,
"linkedStorageGroup":
{"name": target_group_name,
"percentageCopied": 100},
}
grp_snapvx_links = [{"name": target_group_name,
"percentageCopied": 100},
{"name": "another-target",
"percentageCopied": 90}]
rdf_group_list = {"rdfGroupID": [{"rdfgNumber": rdf_group_no,
"label": rdf_group_name}]}
rdf_group_details = {"modes": ["Synchronous"],
"remoteSymmetrix": remote_array,
"label": rdf_group_name,
"type": "Dynamic",
"numDevices": 1,
"remoteRdfgNumber": rdf_group_no,
"rdfgNumber": rdf_group_no}
rdf_group_vol_details = {"remoteRdfGroupNumber": rdf_group_no,
"localSymmetrixId": array,
"volumeConfig": "RDF1+TDEV",
"localRdfGroupNumber": rdf_group_no,
"localVolumeName": device_id,
"rdfpairState": "Synchronized",
"remoteVolumeName": device_id2,
"localVolumeState": "Ready",
"rdfMode": "Synchronous",
"remoteVolumeState": "Write Disabled",
"remoteSymmetrixId": remote_array}
# system
job_list = [{"status": "SUCCEEDED",
"jobId": "12345",
"result": "created",
"resourceLink": "storagegroup/%s" % storagegroup_name_f},
{"status": "RUNNING", "jobId": "55555"},
{"status": "FAILED", "jobId": "09999"}]
symmetrix = {"symmetrixId": array,
"model": "VMAX250F",
"ucode": "5977.1091.1092"}
headroom = {"headroom": [{"headroomCapacity": 20348.29}]}
class FakeLookupService(object):
def get_device_mapping_from_network(self, initiator_wwns, target_wwns):
return VMAXCommonData.device_map
class FakeResponse(object):
def __init__(self, status_code, return_object):
self.status_code = status_code
self.return_object = return_object
def json(self):
if self.return_object:
return self.return_object
else:
raise ValueError
class FakeRequestsSession(object):
def __init__(self, *args, **kwargs):
self.data = VMAXCommonData()
def request(self, method, url, params=None, data=None):
return_object = ''
status_code = 200
if method == 'GET':
status_code, return_object = self._get_request(url, params)
elif method == 'POST' or method == 'PUT':
status_code, return_object = self._post_or_put(url, data)
elif method == 'DELETE':
status_code, return_object = self._delete(url)
elif method == 'TIMEOUT':
raise requests.Timeout
elif method == 'EXCEPTION':
raise Exception
return FakeResponse(status_code, return_object)
def _get_request(self, url, params):
status_code = 200
return_object = None
if self.data.failed_resource in url:
status_code = 500
return_object = self.data.job_list[2]
elif 'sloprovisioning' in url:
if 'volume' in url:
return_object = self._sloprovisioning_volume(url, params)
elif 'storagegroup' in url:
return_object = self._sloprovisioning_sg(url)
elif 'maskingview' in url:
return_object = self._sloprovisioning_mv(url)
elif 'portgroup' in url:
return_object = self._sloprovisioning_pg(url)
elif 'director' in url:
return_object = self._sloprovisioning_port(url)
elif 'host' in url:
return_object = self._sloprovisioning_ig(url)
elif 'initiator' in url:
return_object = self._sloprovisioning_initiator(url)
elif 'srp' in url:
return_object = self.data.srp_details
elif 'workloadtype' in url:
return_object = self.data.workloadtype
elif 'compressionCapable' in url:
return_object = self.data.compression_info
else:
return_object = self.data.slo_details
elif 'replication' in url:
return_object = self._replication(url)
elif 'system' in url:
return_object = self._system(url)
elif 'headroom' in url:
return_object = self.data.headroom
return status_code, return_object
def _sloprovisioning_volume(self, url, params):
return_object = self.data.volume_list[2]
if '/private' in url:
return_object = self.data.private_vol_details
elif params:
if '1' in params.values():
return_object = self.data.volume_list[0]
elif '2' in params.values():
return_object = self.data.volume_list[1]
else:
for vol in self.data.volume_details:
if vol['volumeId'] in url:
return_object = vol
break
return return_object
def _sloprovisioning_sg(self, url):
return_object = self.data.sg_list
for sg in self.data.sg_details:
if sg['storageGroupId'] in url:
return_object = sg
break
return return_object
def _sloprovisioning_mv(self, url):
if self.data.masking_view_name_i in url:
return_object = self.data.maskingview[1]
else:
return_object = self.data.maskingview[0]
return return_object
def _sloprovisioning_pg(self, url):
return_object = None
for pg in self.data.portgroup:
if pg['portGroupId'] in url:
return_object = pg
break
return return_object
def _sloprovisioning_port(self, url):
return_object = None
for port in self.data.port_list:
if port['symmetrixPort']['symmetrixPortKey']['directorId'] in url:
return_object = port
break
return return_object
def _sloprovisioning_ig(self, url):
return_object = None
for ig in self.data.inititiatorgroup:
if ig['hostId'] in url:
return_object = ig
break
return return_object
def _sloprovisioning_initiator(self, url):
return_object = self.data.initiator_list[2]
if self.data.wwpn1 in url:
return_object = self.data.initiator_list[0]
elif self.data.initiator in url:
return_object = self.data.initiator_list[1]
return return_object
def _replication(self, url):
return_object = None
if 'rdf_group' in url:
if self.data.device_id in url:
return_object = self.data.rdf_group_vol_details
elif self.data.rdf_group_no in url:
return_object = self.data.rdf_group_details
else:
return_object = self.data.rdf_group_list
elif 'storagegroup' in url:
return_object = self._replication_sg(url)
elif 'snapshot' in url:
return_object = self.data.volume_snap_vx
elif 'capabilities' in url:
return_object = self.data.capabilities
return return_object
def _replication_sg(self, url):
return_object = None
if 'generation' in url:
return_object = self.data.group_snap_vx
elif 'storagegroup' in url:
return_object = self.data.sg_details_rep[0]
return return_object
def _system(self, url):
return_object = None
if 'job' in url:
for job in self.data.job_list:
if job['jobId'] in url:
return_object = job
break
else:
return_object = self.data.symmetrix
return return_object
def _post_or_put(self, url, payload):
return_object = self.data.job_list[0]
status_code = 201
if self.data.failed_resource in url:
status_code = 500
return_object = self.data.job_list[2]
elif payload:
payload = ast.literal_eval(payload)
if self.data.failed_resource in payload.values():
status_code = 500
return_object = self.data.job_list[2]
if payload.get('executionOption'):
status_code = 202
return status_code, return_object
def _delete(self, url):
if self.data.failed_resource in url:
status_code = 500
return_object = self.data.job_list[2]
else:
status_code = 204
return_object = None
return status_code, return_object
def session(self):
return FakeRequestsSession()
class FakeConfiguration(object):
def __init__(self, emc_file=None, volume_backend_name=None,
interval=0, retries=0, replication_device=None):
self.cinder_dell_emc_config_file = emc_file
self.interval = interval
self.retries = retries
self.volume_backend_name = volume_backend_name
self.config_group = volume_backend_name
if replication_device:
self.replication_device = [replication_device]
def safe_get(self, key):
try:
return getattr(self, key)
except Exception:
return None
def append_config_values(self, values):
pass
class FakeXML(object):
def __init__(self):
""""""
self.tempdir = tempfile.mkdtemp()
self.data = VMAXCommonData()
def create_fake_config_file(self, config_group, portgroup,
ssl_verify=False):
doc = minidom.Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
doc = self.add_array_info(doc, emc, portgroup, ssl_verify)
filename = 'cinder_dell_emc_config_%s.xml' % config_group
config_file_path = self.tempdir + '/' + filename
f = open(config_file_path, 'w')
doc.writexml(f)
f.close()
return config_file_path
def add_array_info(self, doc, emc, portgroup_name, ssl_verify):
array = doc.createElement("Array")
arraytext = doc.createTextNode(self.data.array)
emc.appendChild(array)
array.appendChild(arraytext)
ecomserverip = doc.createElement("RestServerIp")
ecomserveriptext = doc.createTextNode("1.1.1.1")
emc.appendChild(ecomserverip)
ecomserverip.appendChild(ecomserveriptext)
ecomserverport = doc.createElement("RestServerPort")
ecomserverporttext = doc.createTextNode("8443")
emc.appendChild(ecomserverport)
ecomserverport.appendChild(ecomserverporttext)
ecomusername = doc.createElement("RestUserName")
ecomusernametext = doc.createTextNode("smc")
emc.appendChild(ecomusername)
ecomusername.appendChild(ecomusernametext)
ecompassword = doc.createElement("RestPassword")
ecompasswordtext = doc.createTextNode("smc")
emc.appendChild(ecompassword)
ecompassword.appendChild(ecompasswordtext)
portgroup = doc.createElement("PortGroup")
portgrouptext = doc.createTextNode(portgroup_name)
portgroup.appendChild(portgrouptext)
portgroups = doc.createElement("PortGroups")
portgroups.appendChild(portgroup)
emc.appendChild(portgroups)
srp = doc.createElement("SRP")
srptext = doc.createTextNode("SRP_1")
emc.appendChild(srp)
srp.appendChild(srptext)
if ssl_verify:
restcert = doc.createElement("SSLCert")
restcerttext = doc.createTextNode("/path/cert.crt")
emc.appendChild(restcert)
restcert.appendChild(restcerttext)
restverify = doc.createElement("SSLVerify")
restverifytext = doc.createTextNode("/path/cert.pem")
emc.appendChild(restverify)
restverify.appendChild(restverifytext)
return doc
class VMAXUtilsTest(test.TestCase):
def setUp(self):
self.data = VMAXCommonData()
super(VMAXUtilsTest, self).setUp()
config_group = 'UtilsTests'
fake_xml = FakeXML().create_fake_config_file(
config_group, self.data.port_group_name_i, True)
configuration = FakeConfiguration(fake_xml, config_group)
rest.VMAXRest._establish_rest_session = mock.Mock(
return_value=FakeRequestsSession())
driver = iscsi.VMAXISCSIDriver(configuration=configuration)
self.driver = driver
self.common = self.driver.common
self.utils = self.common.utils
def test_get_volumetype_extra_specs(self):
with mock.patch.object(volume_types, 'get_volume_type_extra_specs',
return_value={'specs'}) as type_mock:
# path 1: volume_type_id not passed in
self.data.test_volume.volume_type_id = (
self.data.test_volume_type.id)
self.utils.get_volumetype_extra_specs(self.data.test_volume)
volume_types.get_volume_type_extra_specs.assert_called_once_with(
self.data.test_volume_type.id)
type_mock.reset_mock()
# path 2: volume_type_id passed in
self.utils.get_volumetype_extra_specs(self.data.test_volume, '123')
volume_types.get_volume_type_extra_specs.assert_called_once_with(
'123')
type_mock.reset_mock()
# path 3: no type_id
self.utils.get_volumetype_extra_specs(self.data.test_clone_volume)
(volume_types.get_volume_type_extra_specs.
assert_not_called())
def test_get_volumetype_extra_specs_exception(self):
extra_specs = self.utils.get_volumetype_extra_specs(
{'name': 'no_type_id'})
self.assertEqual({}, extra_specs)
def test_get_random_portgroup(self):
# 4 portgroups
data = ("<?xml version='1.0' encoding='UTF-8'?>\n<EMC>\n"
"<PortGroups>"
"<PortGroup>OS-PG1</PortGroup>\n"
"<PortGroup>OS-PG2</PortGroup>\n"
"<PortGroup>OS-PG3</PortGroup>\n"
"<PortGroup>OS-PG4</PortGroup>\n"
"</PortGroups>"
"</EMC>")
dom = minidom.parseString(data)
portgroup = self.utils._get_random_portgroup(dom)
self.assertIn('OS-PG', portgroup)
# Duplicate portgroups
data = ("<?xml version='1.0' encoding='UTF-8'?>\n<EMC>\n"
"<PortGroups>"
"<PortGroup>OS-PG1</PortGroup>\n"
"<PortGroup>OS-PG1</PortGroup>\n"
"<PortGroup>OS-PG1</PortGroup>\n"
"<PortGroup>OS-PG2</PortGroup>\n"
"</PortGroups>"
"</EMC>")
dom = minidom.parseString(data)
portgroup = self.utils._get_random_portgroup(dom)
self.assertIn('OS-PG', portgroup)
def test_get_random_portgroup_none(self):
# Missing PortGroup tag
data = ("<?xml version='1.0' encoding='UTF-8'?>\n<EMC>\n"
"</EMC>")
dom = minidom.parseString(data)
self.assertIsNone(self.utils._get_random_portgroup(dom))
# Missing portgroups
data = ("<?xml version='1.0' encoding='UTF-8'?>\n<EMC>\n"
"<PortGroups>"
"</PortGroups>"
"</EMC>")
dom = minidom.parseString(data)
self.assertIsNone(self.utils._get_random_portgroup(dom))
def test_get_host_short_name(self):
host_under_16_chars = 'host_13_chars'
host1 = self.utils.get_host_short_name(
host_under_16_chars)
self.assertEqual(host_under_16_chars, host1)
host_over_16_chars = (
'host_over_16_chars_host_over_16_chars_host_over_16_chars')
# Check that the same md5 value is retrieved from multiple calls
host2 = self.utils.get_host_short_name(
host_over_16_chars)
host3 = self.utils.get_host_short_name(
host_over_16_chars)
self.assertEqual(host2, host3)
host_with_period = 'hostname.with.many.parts'
ref_host_name = self.utils.generate_unique_trunc_host('hostname')
host4 = self.utils.get_host_short_name(host_with_period)
self.assertEqual(ref_host_name, host4)
def test_get_volume_element_name(self):
volume_id = 'ea95aa39-080b-4f11-9856-a03acf9112ad'
volume_element_name = self.utils.get_volume_element_name(volume_id)
expect_vol_element_name = ('OS-' + volume_id)
self.assertEqual(expect_vol_element_name, volume_element_name)
def test_parse_file_to_get_array_map(self):
kwargs = (
{'RestServerIp': '1.1.1.1',
'RestServerPort': '8443',
'RestUserName': 'smc',
'RestPassword': 'smc',
'SSLCert': '/path/cert.crt',
'SSLVerify': '/path/cert.pem',
'SerialNumber': self.data.array,
'srpName': 'SRP_1',
'PortGroup': self.data.port_group_name_i})
array_info = self.utils.parse_file_to_get_array_map(
self.common.configuration.cinder_dell_emc_config_file)
self.assertEqual(kwargs, array_info)
@mock.patch.object(utils.VMAXUtils,
'_get_connection_info')
@mock.patch.object(utils.VMAXUtils,
'_get_random_portgroup')
def test_parse_file_to_get_array_map_errors(self, mock_port, mock_conn):
tempdir = tempfile.mkdtemp()
doc = minidom.Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
filename = 'cinder_dell_emc_config_%s.xml' % 'fake_xml'
config_file_path = tempdir + '/' + filename
f = open(config_file_path, 'w')
doc.writexml(f)
f.close()
array_info = self.utils.parse_file_to_get_array_map(
config_file_path)
self.assertIsNone(array_info['SerialNumber'])
def test_parse_file_to_get_array_map_conn_errors(self):
tempdir = tempfile.mkdtemp()
doc = minidom.Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
filename = 'cinder_dell_emc_config_%s.xml' % 'fake_xml'
config_file_path = tempdir + '/' + filename
f = open(config_file_path, 'w')
doc.writexml(f)
f.close()
self.assertRaises(exception.VolumeBackendAPIException,
self.utils.parse_file_to_get_array_map,
config_file_path)
def test_truncate_string(self):
# string is less than max number
str_to_truncate = 'string'
response = self.utils.truncate_string(str_to_truncate, 10)
self.assertEqual(str_to_truncate, response)
def test_get_default_oversubscription_ratio(self):
default_ratio = 20.0
max_over_sub_ratio1 = 30.0
returned_max = self.utils.get_default_oversubscription_ratio(
max_over_sub_ratio1)
self.assertEqual(max_over_sub_ratio1, returned_max)
max_over_sub_ratio2 = 0.5
returned_max = self.utils.get_default_oversubscription_ratio(
max_over_sub_ratio2)
self.assertEqual(default_ratio, returned_max)
def test_get_default_storage_group_name_slo_workload(self):
srp_name = self.data.srp
slo = self.data.slo
workload = self.data.workload
sg_name = self.utils.get_default_storage_group_name(
srp_name, slo, workload)
self.assertEqual(self.data.defaultstoragegroup_name, sg_name)
def test_get_default_storage_group_name_no_slo(self):
srp_name = self.data.srp
slo = None
workload = None
sg_name = self.utils.get_default_storage_group_name(
srp_name, slo, workload)
self.assertEqual(self.data.default_sg_no_slo, sg_name)
def test_get_default_storage_group_name_compr_disabled(self):
srp_name = self.data.srp
slo = self.data.slo
workload = self.data.workload
sg_name = self.utils.get_default_storage_group_name(
srp_name, slo, workload, True)
self.assertEqual(self.data.default_sg_compr_disabled, sg_name)
def test_get_time_delta(self):
start_time = 1487781721.09
end_time = 1487781758.16
delta = end_time - start_time
ref_delta = six.text_type(datetime.timedelta(seconds=int(delta)))
time_delta = self.utils.get_time_delta(start_time, end_time)
self.assertEqual(ref_delta, time_delta)
def test_get_short_protocol_type(self):
# iscsi
short_i_protocol = self.utils.get_short_protocol_type('iscsi')
self.assertEqual('I', short_i_protocol)
# fc
short_f_protocol = self.utils.get_short_protocol_type('FC')
self.assertEqual('F', short_f_protocol)
# else
other_protocol = self.utils.get_short_protocol_type('OTHER')
self.assertEqual('OTHER', other_protocol)
def test_get_temp_snap_name(self):
clone_name = "12345"
source_device_id = self.data.device_id
ref_name = "temp-00001-12345"
snap_name = self.utils.get_temp_snap_name(
clone_name, source_device_id)
self.assertEqual(ref_name, snap_name)
def test_get_array_and_device_id(self):
volume = deepcopy(self.data.test_volume)
external_ref = {u'source-name': u'00002'}
array, device_id = self.utils.get_array_and_device_id(
volume, external_ref)
self.assertEqual(self.data.array, array)
self.assertEqual('00002', device_id)
def test_get_array_and_device_id_exception(self):
volume = deepcopy(self.data.test_volume)
external_ref = {u'source-name': None}
self.assertRaises(exception.VolumeBackendAPIException,
self.utils.get_array_and_device_id,
volume, external_ref)
def test_get_pg_short_name(self):
pg_under_12_chars = 'pg_11_chars'
pg1 = self.utils.get_pg_short_name(pg_under_12_chars)
self.assertEqual(pg_under_12_chars, pg1)
pg_over_12_chars = 'portgroup_over_12_characters'
# Check that the same md5 value is retrieved from multiple calls
pg2 = self.utils.get_pg_short_name(pg_over_12_chars)
pg3 = self.utils.get_pg_short_name(pg_over_12_chars)
self.assertEqual(pg2, pg3)
def test_is_compression_disabled_true(self):
extra_specs = self.data.extra_specs_disable_compression
do_disable_compression = self.utils.is_compression_disabled(
extra_specs)
self.assertTrue(do_disable_compression)
def test_is_compression_disabled_false(self):
# Path 1: no compression extra spec set
extra_specs = self.data.extra_specs
do_disable_compression = self.utils.is_compression_disabled(
extra_specs)
self.assertFalse(do_disable_compression)
# Path 2: compression extra spec set to false
extra_specs2 = deepcopy(extra_specs)
extra_specs2.update({utils.DISABLECOMPRESSION: 'false'})
do_disable_compression2 = self.utils.is_compression_disabled(
extra_specs)
self.assertFalse(do_disable_compression2)
def test_change_compression_type_true(self):
source_compr_disabled_true = 'true'
new_type_compr_disabled = {
'extra_specs': {utils.DISABLECOMPRESSION: 'no'}}
ans = self.utils.change_compression_type(
source_compr_disabled_true, new_type_compr_disabled)
self.assertTrue(ans)
def test_change_compression_type_false(self):
source_compr_disabled_true = True
new_type_compr_disabled = {
'extra_specs': {utils.DISABLECOMPRESSION: 'true'}}
ans = self.utils.change_compression_type(
source_compr_disabled_true, new_type_compr_disabled)
self.assertFalse(ans)
def test_is_replication_enabled(self):
is_re = self.utils.is_replication_enabled(
self.data.vol_type_extra_specs_rep_enabled)
self.assertTrue(is_re)
is_re2 = self.utils.is_replication_enabled(self.data.extra_specs)
self.assertFalse(is_re2)
def test_get_replication_config(self):
# Success, allow_extend false
rep_device_list1 = [{'target_device_id': self.data.remote_array,
'remote_pool': self.data.srp,
'remote_port_group': self.data.port_group_name_f,
'rdf_group_label': self.data.rdf_group_name}]
rep_config1 = self.utils.get_replication_config(rep_device_list1)
self.assertEqual(self.data.remote_array, rep_config1['array'])
# Success, allow_extend true
rep_device_list2 = [{'target_device_id': self.data.remote_array,
'remote_pool': self.data.srp,
'rdf_group_label': self.data.rdf_group_name,
'remote_port_group': self.data.port_group_name_f,
'allow_extend': 'true'}]
rep_config2 = self.utils.get_replication_config(rep_device_list2)
self.assertTrue(rep_config2['allow_extend'])
# No rep_device_list
rep_device_list3 = []
rep_config3 = self.utils.get_replication_config(rep_device_list3)
self.assertIsNone(rep_config3)
# Exception
rep_device_list4 = [{'target_device_id': self.data.remote_array,
'remote_pool': self.data.srp}]
self.assertRaises(exception.VolumeBackendAPIException,
self.utils.get_replication_config, rep_device_list4)
def test_is_volume_failed_over(self):
vol = deepcopy(self.data.test_volume)
vol.replication_status = fields.ReplicationStatus.FAILED_OVER
is_fo1 = self.utils.is_volume_failed_over(vol)
self.assertTrue(is_fo1)
is_fo2 = self.utils.is_volume_failed_over(self.data.test_volume)
self.assertFalse(is_fo2)
is_fo3 = self.utils.is_volume_failed_over(None)
self.assertFalse(is_fo3)
def test_add_legacy_pools(self):
pools = [{'pool_name': "Diamond+None+SRP_1+000197800111"},
{'pool_name': "Diamond+OLTP+SRP_1+000197800111"}]
new_pools = self.utils.add_legacy_pools(pools)
ref_pools = [{'pool_name': "Diamond+None+SRP_1+000197800111"},
{'pool_name': "Diamond+OLTP+SRP_1+000197800111"},
{'pool_name': "Diamond+SRP_1+000197800111"}]
self.assertEqual(ref_pools, new_pools)
def test_update_volume_group_name(self):
group = self.data.test_group_1
ref_group_name = self.data.test_vol_grp_name
vol_grp_name = self.utils.update_volume_group_name(group)
self.assertEqual(ref_group_name, vol_grp_name)
def test_update_volume_group_name_id_only(self):
group = self.data.test_group_without_name
ref_group_name = self.data.test_vol_grp_name_id_only
vol_grp_name = self.utils.update_volume_group_name(group)
self.assertEqual(ref_group_name, vol_grp_name)
def test_update_admin_metadata(self):
admin_metadata = {'targetVolumeName': '123456'}
ref_model_update = [{'id': '12345',
'admin_metadata': admin_metadata}]
volume_model_update = {'id': '12345'}
volumes_model_update = [volume_model_update]
key = 'targetVolumeName'
values = {}
values['12345'] = '123456'
self.utils.update_admin_metadata(
volumes_model_update, key, values)
self.assertEqual(ref_model_update, volumes_model_update)
def test_get_volume_group_utils(self):
group = self.data.test_group_1
array, extraspecs_dict = self.utils.get_volume_group_utils(
group, interval=1, retries=1)
ref_array = self.data.array
self.assertEqual(ref_array, array)
def test_update_extra_specs_list(self):
extra_specs = self.data.extra_specs
volume_type_id = 'abc'
extraspecs_dict = self.utils._update_extra_specs_list(
extra_specs, volume_type_id, interval=1, retries=1)
self.assertEqual(extra_specs, extraspecs_dict['extra_specs'])
def test_update_intervals_and_retries(self):
extra_specs = self.data.extra_specs
ref_interval = 1
extraspecs = self.utils._update_intervals_and_retries(
extra_specs, interval=1, retries=1)
self.assertEqual(ref_interval, extraspecs['interval'])
def test_get_intervals_retries_dict(self):
ref_value = {'interval': 1, 'retries': 1}
ret_dict = self.utils.get_intervals_retries_dict(
interval=1, retries=1)
self.assertEqual(ref_value, ret_dict)
def test_update_volume_model_updates(self):
volume_model_updates = [{'id': '1', 'status': 'available'}]
volumes = [self.data.test_volume]
ref_val = {'id': self.data.test_volume.id,
'status': 'error_deleting'}
ret_val = self.utils.update_volume_model_updates(
volume_model_updates, volumes, 'abc', status='error_deleting')
self.assertEqual(ref_val, ret_val[1])
def test_update_volume_model_updates_empty_update_list(self):
volume_model_updates = []
volumes = [self.data.test_volume]
ref_val = [{'id': self.data.test_volume.id,
'status': 'available'}]
ret_val = self.utils.update_volume_model_updates(
volume_model_updates, volumes, 'abc')
self.assertEqual(ref_val, ret_val)
def test_update_volume_model_updates_empty_vol_list(self):
volume_model_updates = []
volumes = []
ref_val = []
ret_val = self.utils.update_volume_model_updates(
volume_model_updates, volumes, 'abc')
self.assertEqual(ref_val, ret_val)
class VMAXRestTest(test.TestCase):
def setUp(self):
self.data = VMAXCommonData()
super(VMAXRestTest, self).setUp()
config_group = 'RestTests'
fake_xml = FakeXML().create_fake_config_file(
config_group, self.data.port_group_name_f)
configuration = FakeConfiguration(fake_xml, config_group)
rest.VMAXRest._establish_rest_session = mock.Mock(
return_value=FakeRequestsSession())
driver = fc.VMAXFCDriver(configuration=configuration)
self.driver = driver
self.common = self.driver.common
self.rest = self.common.rest
self.utils = self.common.utils
def test_rest_request_exception(self):
sc, msg = self.rest.request('/fake_url', 'TIMEOUT')
self.assertIsNone(sc)
self.assertIsNone(msg)
self.assertRaises(exception.VolumeBackendAPIException,
self.rest.request, '', 'EXCEPTION')
def test_wait_for_job_complete(self):
rc, job, status, task = self.rest.wait_for_job_complete(
{'status': 'created', 'jobId': '12345'}, self.data.extra_specs)
self.assertEqual(0, rc)
def test_wait_for_job_complete_failed(self):
with mock.patch.object(self.rest, '_is_job_finished',
side_effect=exception.BadHTTPResponseStatus):
self.assertRaises(exception.VolumeBackendAPIException,
self.rest.wait_for_job_complete,
self.data.job_list[0], self.data.extra_specs)
def test_is_job_finished_false(self):
job_id = "55555"
complete, response, rc, status, task = self.rest._is_job_finished(
job_id)
self.assertFalse(complete)
def test_is_job_finished_failed(self):
job_id = "55555"
complete, response, rc, status, task = self.rest._is_job_finished(
job_id)
self.assertFalse(complete)
with mock.patch.object(self.rest, 'request',
return_value=(200, {'status': 'FAILED'})):
complete, response, rc, status, task = (
self.rest._is_job_finished(job_id))
self.assertTrue(complete)
self.assertEqual(-1, rc)
def test_check_status_code_success(self):
status_code = 200
self.rest.check_status_code_success(
'test success', status_code, "")
def test_check_status_code_not_success(self):
status_code = 500
self.assertRaises(exception.VolumeBackendAPIException,
self.rest.check_status_code_success,
'test exception', status_code, "")
def test_wait_for_job_success(self):
operation = 'test'
status_code = 202
job = self.data.job_list[0]
extra_specs = self.data.extra_specs
self.rest.wait_for_job(
operation, status_code, job, extra_specs)
def test_wait_for_job_failed(self):
operation = 'test'
status_code = 202
job = self.data.job_list[2]
extra_specs = self.data.extra_specs
with mock.patch.object(self.rest, 'wait_for_job_complete',
return_value=(-1, '', '', '')):
self.assertRaises(exception.VolumeBackendAPIException,
self.rest.wait_for_job,
operation, status_code, job, extra_specs)
def test_get_resource_present(self):
array = self.data.array
category = 'sloprovisioning'
resource_type = 'storagegroup'
resource = self.rest.get_resource(array, category, resource_type)
self.assertEqual(self.data.sg_list, resource)
def test_get_resource_not_present(self):
array = self.data.array
category = 'sloprovisioning'
resource_type = self.data.failed_resource
resource = self.rest.get_resource(array, category, resource_type)
self.assertIsNone(resource)
def test_create_resource_success(self):
array = self.data.array
category = ''
resource_type = ''
payload = {'someKey': 'someValue'}
status_code, message = self.rest.create_resource(
array, category, resource_type, payload)
self.assertEqual(self.data.job_list[0], message)
def test_create_resource_failed(self):
array = self.data.array
category = ''
resource_type = ''
payload = {'someKey': self.data.failed_resource}
self.assertRaises(
exception.VolumeBackendAPIException,
self.rest.create_resource, array, category,
resource_type, payload)
def test_modify_resource(self):
array = self.data.array
category = ''
resource_type = ''
payload = {'someKey': 'someValue'}
status_code, message = self.rest.modify_resource(
array, category, resource_type, payload)
self.assertEqual(self.data.job_list[0], message)
def test_modify_resource_failed(self):
array = self.data.array
category = ''
resource_type = ''
payload = {'someKey': self.data.failed_resource}
self.assertRaises(
exception.VolumeBackendAPIException,
self.rest.modify_resource, array, category,
resource_type, payload)
def test_delete_resource(self):
operation = 'delete res resource'
status_code = 204
message = None
array = self.data.array
category = 'cat'
resource_type = 'res'
resource_name = 'name'
with mock.patch.object(self.rest, 'check_status_code_success'):
self.rest.delete_resource(
array, category, resource_type, resource_name)
self.rest.check_status_code_success.assert_called_with(
operation, status_code, message)
def test_delete_resource_failed(self):
array = self.data.array
category = self.data.failed_resource
resource_type = self.data.failed_resource
resource_name = self.data.failed_resource
self.assertRaises(
exception.VolumeBackendAPIException,
self.rest.modify_resource, array, category,
resource_type, resource_name)
def test_get_array_serial(self):
ref_details = self.data.symmetrix
array_details = self.rest.get_array_serial(self.data.array)
self.assertEqual(ref_details, array_details)
def test_get_array_serial_failed(self):
array_details = self.rest.get_array_serial(self.data.failed_resource)
self.assertIsNone(array_details)
def test_get_srp_by_name(self):
ref_details = self.data.srp_details
srp_details = self.rest.get_srp_by_name(
self.data.array, self.data.srp)
self.assertEqual(ref_details, srp_details)
def test_get_slo_list(self):
ref_settings = self.data.slo_details['sloId']
slo_settings = self.rest.get_slo_list(self.data.array)
self.assertEqual(ref_settings, slo_settings)
def test_get_workload_settings(self):
ref_settings = self.data.workloadtype['workloadId']
wl_settings = self.rest.get_workload_settings(
self.data.array)
self.assertEqual(ref_settings, wl_settings)
def test_get_workload_settings_failed(self):
wl_settings = self.rest.get_workload_settings(
self.data.failed_resource)
self.assertFalse(wl_settings)
def test_get_headroom_capacity(self):
ref_headroom = self.data.headroom['headroom'][0]['headroomCapacity']
headroom_cap = self.rest.get_headroom_capacity(
self.data.array, self.data.srp,
self.data.slo, self.data.workload)
self.assertEqual(ref_headroom, headroom_cap)
def test_get_headroom_capacity_failed(self):
headroom_cap = self.rest.get_headroom_capacity(
self.data.failed_resource, self.data.srp,
self.data.slo, self.data.workload)
self.assertIsNone(headroom_cap)
def test_is_compression_capable_true(self):
compr_capable = self.rest.is_compression_capable('000197800128')
self.assertTrue(compr_capable)
def test_is_compression_capable_false(self):
compr_capable = self.rest.is_compression_capable(self.data.array)
self.assertFalse(compr_capable)
with mock.patch.object(self.rest, 'request', return_value=(200, {})):
compr_capable = self.rest.is_compression_capable(self.data.array)
self.assertFalse(compr_capable)
def test_get_storage_group(self):
ref_details = self.data.sg_details[0]
sg_details = self.rest.get_storage_group(
self.data.array, self.data.defaultstoragegroup_name)
self.assertEqual(ref_details, sg_details)
def test_get_storage_group_list(self):
ref_details = self.data.sg_list['storageGroupId']
sg_list = self.rest.get_storage_group_list(
self.data.array, {})
self.assertEqual(ref_details, sg_list)
def test_get_storage_group_list_none(self):
with mock.patch.object(self.rest, 'get_resource', return_value=None):
sg_list = self.rest.get_storage_group_list(
self.data.array, {})
self.assertFalse(sg_list)
def test_create_storage_group(self):
with mock.patch.object(self.rest, 'create_resource'):
payload = {'someKey': 'someValue'}
self.rest._create_storagegroup(self.data.array, payload)
self.rest.create_resource.assert_called_once_with(
self.data.array, 'sloprovisioning', 'storagegroup', payload)
def test_create_storage_group_success(self):
sg_name = self.rest.create_storage_group(
self.data.array, self.data.storagegroup_name_f, self.data.srp,
self.data.slo, self.data.workload, self.data.extra_specs)
self.assertEqual(self.data.storagegroup_name_f, sg_name)
def test_create_storage_group_failed(self):
self.assertRaises(
exception.VolumeBackendAPIException,
self.rest.create_storage_group, self.data.array,
self.data.failed_resource, self.data.srp, self.data.slo,
self.data.workload, self.data.extra_specs)
def test_create_storage_group_no_slo(self):
sg_name = self.rest.create_storage_group(
self.data.array, self.data.default_sg_no_slo, self.data.srp,
None, None, self.data.extra_specs)
self.assertEqual(self.data.default_sg_no_slo, sg_name)
def test_create_storage_group_compression_disabled(self):
with mock.patch.object(self.rest, '_create_storagegroup',
return_value=(200, self.data.job_list[0])):
self.rest.create_storage_group(
self.data.array, self.data.default_sg_compr_disabled,
self.data.srp, self.data.slo, self.data.workload,
self.data.extra_specs, True)
payload = {"srpId": self.data.srp,
"storageGroupId": self.data.default_sg_compr_disabled,
"emulation": "FBA",
"sloBasedStorageGroupParam": [
{"num_of_vols": 0,
"sloId": self.data.slo,
"workloadSelection": self.data.workload,
"volumeAttribute": {
"volume_size": "0",
"capacityUnit": "GB"},
"noCompression": "true"}]}
self.rest._create_storagegroup.assert_called_once_with(
self.data.array, payload)
def test_modify_storage_group(self):
array = self.data.array
storagegroup = self.data.defaultstoragegroup_name
payload = {'someKey': 'someValue'}
version = self.data.u4v_version
with mock.patch.object(self.rest, 'modify_resource'):
self.rest.modify_storage_group(array, storagegroup, payload)
self.rest.modify_resource.assert_called_once_with(
self.data.array, 'sloprovisioning', 'storagegroup',
payload, version, resource_name=storagegroup)
def test_create_volume_from_sg_success(self):
volume_name = self.data.volume_details[0]['volume_identifier']
ref_dict = self.data.provider_location
volume_dict = self.rest.create_volume_from_sg(
self.data.array, volume_name, self.data.defaultstoragegroup_name,
self.data.test_volume.size, self.data.extra_specs)
self.assertEqual(ref_dict, volume_dict)
def test_create_volume_from_sg_failed(self):
volume_name = self.data.volume_details[0]['volume_identifier']
self.assertRaises(
exception.VolumeBackendAPIException,
self.rest.create_volume_from_sg, self.data.array,
volume_name, self.data.failed_resource,
self.data.test_volume.size, self.data.extra_specs)
def test_create_volume_from_sg_cannot_retrieve_device_id(self):
with mock.patch.object(self.rest, 'find_volume_device_id',
return_value=None):
volume_name = self.data.volume_details[0]['volume_identifier']
self.assertRaises(
exception.VolumeBackendAPIException,
self.rest.create_volume_from_sg, self.data.array,
volume_name, self.data.failed_resource,
self.data.test_volume.size, self.data.extra_specs)
def test_add_vol_to_sg_success(self):
operation = 'Add volume to sg'
status_code = 202
message = self.data.job_list[0]
with mock.patch.object(self.rest, 'wait_for_job'):
device_id = self.data.device_id
self.rest.add_vol_to_sg(
self.data.array, self.data.storagegroup_name_f, device_id,
self.data.extra_specs)
self.rest.wait_for_job.assert_called_with(
operation, status_code, message, self.data.extra_specs)
def test_add_vol_to_sg_failed(self):
device_id = [self.data.device_id]
self.assertRaises(
exception.VolumeBackendAPIException,
self.rest.add_vol_to_sg, self.data.array,
self.data.failed_resource, device_id,
self.data.extra_specs)
def test_remove_vol_from_sg_success(self):
operation = 'Remove vol from sg'
status_code = 202
message = self.data.job_list[0]
with mock.patch.object(self.rest, 'wait_for_job'):
device_id = self.data.device_id
self.rest.remove_vol_from_sg(
self.data.array, self.data.storagegroup_name_f, device_id,
self.data.extra_specs)
self.rest.wait_for_job.assert_called_with(
operation, status_code, message, self.data.extra_specs)
@mock.patch.object(time, 'sleep')
def test_remove_vol_from_sg_failed(self, mock_sleep):
device_id = [self.data.volume_details[0]['volumeId']]
self.assertRaises(
exception.VolumeBackendAPIException,
self.rest.remove_vol_from_sg, self.data.array,
self.data.failed_resource, device_id,
self.data.extra_specs)
def test_get_vmax_default_storage_group(self):
ref_storage_group = self.data.sg_details[0]
ref_sg_name = self.data.defaultstoragegroup_name
storagegroup, storagegroup_name = (
self.rest.get_vmax_default_storage_group(
self.data.array, self.data.srp,
self.data.slo, self.data.workload))
self.assertEqual(ref_sg_name, storagegroup_name)
self.assertEqual(ref_storage_group, storagegroup)
def test_delete_storage_group(self):
operation = 'delete storagegroup resource'
status_code = 204
message = None
with mock.patch.object(self.rest, 'check_status_code_success'):
self.rest.delete_storage_group(
self.data.array, self.data.storagegroup_name_f)
self.rest.check_status_code_success.assert_called_with(
operation, status_code, message)
def test_is_child_sg_in_parent_sg(self):
is_child1 = self.rest.is_child_sg_in_parent_sg(
self.data.array, self.data.storagegroup_name_f,
self.data.parent_sg_f)
is_child2 = self.rest.is_child_sg_in_parent_sg(
self.data.array, self.data.defaultstoragegroup_name,
self.data.parent_sg_f)
self.assertTrue(is_child1)
self.assertFalse(is_child2)
def test_add_child_sg_to_parent_sg(self):
payload = {"editStorageGroupActionParam": {
"expandStorageGroupParam": {
"addExistingStorageGroupParam": {
"storageGroupId": [self.data.storagegroup_name_f]}}}}
with mock.patch.object(self.rest, 'modify_storage_group',
return_value=(202, self.data.job_list[0])):
self.rest.add_child_sg_to_parent_sg(
self.data.array, self.data.storagegroup_name_f,
self.data.parent_sg_f, self.data.extra_specs)
self.rest.modify_storage_group.assert_called_once_with(
self.data.array, self.data.parent_sg_f, payload)
def test_remove_child_sg_from_parent_sg(self):
payload = {"editStorageGroupActionParam": {
"removeStorageGroupParam": {
"storageGroupId": [self.data.storagegroup_name_f],
"force": 'true'}}}
with mock.patch.object(self.rest, 'modify_storage_group',
return_value=(202, self.data.job_list[0])):
self.rest.remove_child_sg_from_parent_sg(
self.data.array, self.data.storagegroup_name_f,
self.data.parent_sg_f, self.data.extra_specs)
self.rest.modify_storage_group.assert_called_once_with(
self.data.array, self.data.parent_sg_f, payload)
def test_get_volume_list(self):
ref_volumes = [self.data.device_id, self.data.device_id2]
volumes = self.rest.get_volume_list(self.data.array, {})
self.assertEqual(ref_volumes, volumes)
def test_get_volume(self):
ref_volumes = self.data.volume_details[0]
device_id = self.data.device_id
volumes = self.rest.get_volume(self.data.array, device_id)
self.assertEqual(ref_volumes, volumes)
def test_get_private_volume(self):
device_id = self.data.device_id
ref_volume = self.data.private_vol_details['resultList']['result'][0]
volume = self.rest._get_private_volume(self.data.array, device_id)
self.assertEqual(ref_volume, volume)
def test_get_private_volume_exception(self):
device_id = self.data.device_id
with mock.patch.object(self.rest, 'get_resource',
return_value={}):
self.assertRaises(exception.VolumeBackendAPIException,
self.rest._get_private_volume,
self.data.array, device_id)
def test_modify_volume_success(self):
array = self.data.array
device_id = self.data.device_id
payload = {'someKey': 'someValue'}
with mock.patch.object(self.rest, 'modify_resource'):
self.rest._modify_volume(array, device_id, payload)
self.rest.modify_resource.assert_called_once_with(
self.data.array, 'sloprovisioning', 'volume',
payload, resource_name=device_id)
def test_modify_volume_failed(self):
payload = {'someKey': self.data.failed_resource}
device_id = self.data.device_id
self.assertRaises(
exception.VolumeBackendAPIException,
self.rest._modify_volume, self.data.array,
device_id, payload)
def test_extend_volume(self):
device_id = self.data.device_id
new_size = '3'
extend_vol_payload = {"executionOption": "ASYNCHRONOUS",
"editVolumeActionParam": {
"expandVolumeParam": {
"volumeAttribute": {
"volume_size": new_size,
"capacityUnit": "GB"}}}}
with mock.patch.object(self.rest, '_modify_volume',
return_value=(202, self.data.job_list[0])):
self.rest.extend_volume(self.data.array, device_id, new_size,
self.data.extra_specs)
self.rest._modify_volume.assert_called_once_with(
self.data.array, device_id, extend_vol_payload)
def test_delete_volume(self):
device_id = self.data.device_id
with mock.patch.object(self.rest, 'delete_resource'):
with mock.patch.object(
self.rest, '_modify_volume',
side_effect=[None, exception.VolumeBackendAPIException]):
for x in range(0, 2):
self.rest.delete_volume(self.data.array, device_id)
mod_call_count = self.rest._modify_volume.call_count
self.assertEqual(2, mod_call_count)
self.rest.delete_resource.assert_called_once_with(
self.data.array, 'sloprovisioning', 'volume', device_id)
def test_rename_volume(self):
device_id = self.data.device_id
payload = {"editVolumeActionParam": {
"modifyVolumeIdentifierParam": {
"volumeIdentifier": {
"identifier_name": 'new_name',
"volumeIdentifierChoice": "identifier_name"}}}}
with mock.patch.object(self.rest, '_modify_volume'):
self.rest.rename_volume(self.data.array, device_id, 'new_name')
self.rest._modify_volume.assert_called_once_with(
self.data.array, device_id, payload)
def test_find_mv_connections_for_vol(self):
device_id = self.data.device_id
ref_lun_id = int((self.data.maskingview[0]['maskingViewConnection']
[0]['host_lun_address']), 16)
host_lun_id = self.rest.find_mv_connections_for_vol(
self.data.array, self.data.masking_view_name_f, device_id)
self.assertEqual(ref_lun_id, host_lun_id)
def test_find_mv_connections_for_vol_failed(self):
# no masking view info retrieved
device_id = self.data.volume_details[0]['volumeId']
host_lun_id = self.rest.find_mv_connections_for_vol(
self.data.array, self.data.failed_resource, device_id)
self.assertIsNone(host_lun_id)
# no connection info received
with mock.patch.object(self.rest, 'get_resource',
return_value={'no_conn': 'no_info'}):
host_lun_id2 = self.rest.find_mv_connections_for_vol(
self.data.array, self.data.masking_view_name_f, device_id)
self.assertIsNone(host_lun_id2)
def test_get_storage_groups_from_volume(self):
array = self.data.array
device_id = self.data.device_id
ref_list = self.data.volume_details[0]['storageGroupId']
sg_list = self.rest.get_storage_groups_from_volume(array, device_id)
self.assertEqual(ref_list, sg_list)
def test_get_num_vols_in_sg(self):
num_vol = self.rest.get_num_vols_in_sg(
self.data.array, self.data.defaultstoragegroup_name)
self.assertEqual(2, num_vol)
def test_get_num_vols_in_sg_no_num(self):
with mock.patch.object(self.rest, 'get_storage_group',
return_value={}):
num_vol = self.rest.get_num_vols_in_sg(
self.data.array, self.data.defaultstoragegroup_name)
self.assertEqual(0, num_vol)
def test_is_volume_in_storagegroup(self):
# True
array = self.data.array
device_id = self.data.device_id
storagegroup = self.data.defaultstoragegroup_name
is_vol1 = self.rest.is_volume_in_storagegroup(
array, device_id, storagegroup)
# False
with mock.patch.object(self.rest, 'get_storage_groups_from_volume',
return_value=[]):
is_vol2 = self.rest.is_volume_in_storagegroup(
array, device_id, storagegroup)
self.assertTrue(is_vol1)
self.assertFalse(is_vol2)
def test_find_volume_device_number(self):
array = self.data.array
volume_name = self.data.volume_details[0]['volume_identifier']
ref_device = self.data.device_id
device_number = self.rest.find_volume_device_id(array, volume_name)
self.assertEqual(ref_device, device_number)
def test_find_volume_device_number_failed(self):
array = self.data.array
with mock.patch.object(self.rest, 'get_volume_list',
return_value=[]):
device_number = self.rest.find_volume_device_id(
array, 'name')
self.assertIsNone(device_number)
def test_get_volume_success(self):
array = self.data.array
device_id = self.data.device_id
ref_volume = self.data.volume_details[0]
volume = self.rest.get_volume(array, device_id)
self.assertEqual(ref_volume, volume)
def test_get_volume_failed(self):
array = self.data.array
device_id = self.data.failed_resource
self.assertRaises(exception.VolumeBackendAPIException,
self.rest.get_volume,
array, device_id)
def test_find_volume_identifier(self):
array = self.data.array
device_id = self.data.device_id
ref_name = self.data.volume_details[0]['volume_identifier']
vol_name = self.rest.find_volume_identifier(array, device_id)
self.assertEqual(ref_name, vol_name)
def test_get_volume_size(self):
array = self.data.array
device_id = self.data.device_id
ref_size = self.data.test_volume.size
size = self.rest.get_size_of_device_on_array(array, device_id)
self.assertEqual(ref_size, size)
def test_get_volume_size_exception(self):
array = self.data.array
device_id = self.data.device_id
with mock.patch.object(self.rest, 'get_volume',
return_value=None):
size = self.rest.get_size_of_device_on_array(
array, device_id)
self.assertIsNone(size)
def test_get_portgroup(self):
array = self.data.array
pg_name = self.data.port_group_name_f
ref_pg = self.data.portgroup[0]
portgroup = self.rest.get_portgroup(array, pg_name)
self.assertEqual(ref_pg, portgroup)
def test_get_port_ids(self):
array = self.data.array
pg_name = self.data.port_group_name_f
ref_ports = ["FA-1D:4"]
port_ids = self.rest.get_port_ids(array, pg_name)
self.assertEqual(ref_ports, port_ids)
def test_get_port_ids_no_portgroup(self):
array = self.data.array
pg_name = self.data.port_group_name_f
with mock.patch.object(self.rest, 'get_portgroup',
return_value=None):
port_ids = self.rest.get_port_ids(array, pg_name)
self.assertFalse(port_ids)
def test_get_port(self):
array = self.data.array
port_id = "FA-1D:4"
ref_port = self.data.port_list[0]
port = self.rest.get_port(array, port_id)
self.assertEqual(ref_port, port)
def test_get_iscsi_ip_address_and_iqn(self):
array = self.data.array
port_id = "SE-4E:0"
ref_ip = [self.data.ip]
ref_iqn = self.data.initiator
ip_addresses, iqn = self.rest.get_iscsi_ip_address_and_iqn(
array, port_id)
self.assertEqual(ref_ip, ip_addresses)
self.assertEqual(ref_iqn, iqn)
def test_get_iscsi_ip_address_and_iqn_no_port(self):
array = self.data.array
port_id = "SE-4E:0"
with mock.patch.object(self.rest, 'get_port', return_value=None):
ip_addresses, iqn = self.rest.get_iscsi_ip_address_and_iqn(
array, port_id)
self.assertIsNone(ip_addresses)
self.assertIsNone(iqn)
def test_get_target_wwns(self):
array = self.data.array
pg_name = self.data.port_group_name_f
ref_wwns = [self.data.wwnn1]
target_wwns = self.rest.get_target_wwns(array, pg_name)
self.assertEqual(ref_wwns, target_wwns)
def test_get_target_wwns_failed(self):
array = self.data.array
pg_name = self.data.port_group_name_f
with mock.patch.object(self.rest, 'get_port',
return_value=None):
target_wwns = self.rest.get_target_wwns(array, pg_name)
self.assertFalse(target_wwns)
def test_get_initiator_group(self):
array = self.data.array
ig_name = self.data.initiatorgroup_name_f
ref_ig = self.data.inititiatorgroup[0]
response_ig = self.rest.get_initiator_group(array, ig_name)
self.assertEqual(ref_ig, response_ig)
def test_get_initiator(self):
array = self.data.array
initiator_name = self.data.initiator
ref_initiator = self.data.initiator_list[1]
response_initiator = self.rest.get_initiator(array, initiator_name)
self.assertEqual(ref_initiator, response_initiator)
def test_get_initiator_list(self):
array = self.data.array
with mock.patch.object(self.rest, 'get_resource',
return_value={'initiatorId': '1234'}):
init_list = self.rest.get_initiator_list(array)
self.assertIsNotNone(init_list)
def test_get_initiator_list_none(self):
array = self.data.array
with mock.patch.object(self.rest, 'get_resource', return_value={}):
init_list = self.rest.get_initiator_list(array)
self.assertFalse(init_list)
def test_get_in_use_initiator_list_from_array(self):
ref_list = self.data.initiator_list[2]['initiatorId']
init_list = self.rest.get_in_use_initiator_list_from_array(
self.data.array)
self.assertEqual(ref_list, init_list)
def test_get_in_use_initiator_list_from_array_failed(self):
array = self.data.array
with mock.patch.object(self.rest, 'get_initiator_list',
return_value=[]):
init_list = self.rest.get_in_use_initiator_list_from_array(array)
self.assertFalse(init_list)
def test_get_initiator_group_from_initiator(self):
initiator = self.data.wwpn1
ref_group = self.data.initiatorgroup_name_f
init_group = self.rest.get_initiator_group_from_initiator(
self.data.array, initiator)
self.assertEqual(ref_group, init_group)
def test_get_initiator_group_from_initiator_failed(self):
initiator = self.data.wwpn1
with mock.patch.object(self.rest, 'get_initiator',
return_value=None):
init_group = self.rest.get_initiator_group_from_initiator(
self.data.array, initiator)
self.assertIsNone(init_group)
with mock.patch.object(self.rest, 'get_initiator',
return_value={'name': 'no_host'}):
init_group = self.rest.get_initiator_group_from_initiator(
self.data.array, initiator)
self.assertIsNone(init_group)
def test_create_initiator_group(self):
init_group_name = self.data.initiatorgroup_name_f
init_list = [self.data.wwpn1]
extra_specs = self.data.extra_specs
with mock.patch.object(self.rest, 'create_resource',
return_value=(202, self.data.job_list[0])):
payload = ({"executionOption": "ASYNCHRONOUS",
"hostId": init_group_name, "initiatorId": init_list})
self.rest.create_initiator_group(
self.data.array, init_group_name, init_list, extra_specs)
self.rest.create_resource.assert_called_once_with(
self.data.array, 'sloprovisioning', 'host', payload)
def test_delete_initiator_group(self):
with mock.patch.object(self.rest, 'delete_resource'):
self.rest.delete_initiator_group(
self.data.array, self.data.initiatorgroup_name_f)
self.rest.delete_resource.assert_called_once_with(
self.data.array, 'sloprovisioning', 'host',
self.data.initiatorgroup_name_f)
def test_get_masking_view(self):
array = self.data.array
masking_view_name = self.data.masking_view_name_f
ref_mask_view = self.data.maskingview[0]
masking_view = self.rest.get_masking_view(array, masking_view_name)
self.assertEqual(ref_mask_view, masking_view)
def test_get_masking_views_from_storage_group(self):
array = self.data.array
storagegroup_name = self.data.storagegroup_name_f
ref_mask_view = [self.data.masking_view_name_f]
masking_view = self.rest.get_masking_views_from_storage_group(
array, storagegroup_name)
self.assertEqual(ref_mask_view, masking_view)
def test_get_masking_views_by_initiator_group(self):
array = self.data.array
initiatorgroup_name = self.data.initiatorgroup_name_f
ref_mask_view = [self.data.masking_view_name_f]
masking_view = self.rest.get_masking_views_by_initiator_group(
array, initiatorgroup_name)
self.assertEqual(ref_mask_view, masking_view)
def test_get_masking_views_by_initiator_group_failed(self):
array = self.data.array
initiatorgroup_name = self.data.initiatorgroup_name_f
with mock.patch.object(self.rest, 'get_initiator_group',
return_value=None):
masking_view = self.rest.get_masking_views_by_initiator_group(
array, initiatorgroup_name)
self.assertFalse(masking_view)
with mock.patch.object(self.rest, 'get_initiator_group',
return_value={'name': 'no_mv'}):
masking_view = self.rest.get_masking_views_by_initiator_group(
array, initiatorgroup_name)
self.assertFalse(masking_view)
def test_get_element_from_masking_view(self):
array = self.data.array
maskingview_name = self.data.masking_view_name_f
# storage group
ref_sg = self.data.storagegroup_name_f
storagegroup = self.rest.get_element_from_masking_view(
array, maskingview_name, storagegroup=True)
self.assertEqual(ref_sg, storagegroup)
# initiator group
ref_ig = self.data.initiatorgroup_name_f
initiatorgroup = self.rest.get_element_from_masking_view(
array, maskingview_name, host=True)
self.assertEqual(ref_ig, initiatorgroup)
# portgroup
ref_pg = self.data.port_group_name_f
portgroup = self.rest.get_element_from_masking_view(
array, maskingview_name, portgroup=True)
self.assertEqual(ref_pg, portgroup)
def test_get_element_from_masking_view_failed(self):
array = self.data.array
maskingview_name = self.data.masking_view_name_f
# no element chosen
element = self.rest.get_element_from_masking_view(
array, maskingview_name)
self.assertIsNone(element)
# cannot retrieve maskingview
with mock.patch.object(self.rest, 'get_masking_view',
return_value=None):
self.assertRaises(exception.VolumeBackendAPIException,
self.rest.get_element_from_masking_view,
array, maskingview_name)
def test_get_common_masking_views(self):
array = self.data.array
initiatorgroup = self.data.initiatorgroup_name_f
portgroup = self.data.port_group_name_f
ref_maskingview = self.data.masking_view_name_f
maskingview_list = self.rest.get_common_masking_views(
array, portgroup, initiatorgroup)
self.assertEqual(ref_maskingview, maskingview_list)
def test_get_common_masking_views_none(self):
array = self.data.array
initiatorgroup = self.data.initiatorgroup_name_f
portgroup = self.data.port_group_name_f
with mock.patch.object(self.rest, 'get_masking_view_list',
return_value=[]):
maskingview_list = self.rest.get_common_masking_views(
array, portgroup, initiatorgroup)
self.assertFalse(maskingview_list)
def test_create_masking_view(self):
maskingview_name = self.data.masking_view_name_f
storagegroup_name = self.data.storagegroup_name_f
port_group_name = self.data.port_group_name_f
init_group_name = self.data.initiatorgroup_name_f
extra_specs = self.data.extra_specs
with mock.patch.object(self.rest, 'create_resource',
return_value=(202, self.data.job_list[0])):
payload = ({"executionOption": "ASYNCHRONOUS",
"portGroupSelection": {
"useExistingPortGroupParam": {
"portGroupId": port_group_name}},
"maskingViewId": maskingview_name,
"hostOrHostGroupSelection": {
"useExistingHostParam": {
"hostId": init_group_name}},
"storageGroupSelection": {
"useExistingStorageGroupParam": {
"storageGroupId": storagegroup_name}}})
self.rest.create_masking_view(
self.data.array, maskingview_name, storagegroup_name,
port_group_name, init_group_name, extra_specs)
self.rest.create_resource.assert_called_once_with(
self.data.array, 'sloprovisioning', 'maskingview', payload)
def test_delete_masking_view(self):
with mock.patch.object(self.rest, 'delete_resource'):
self.rest.delete_masking_view(
self.data.array, self.data.masking_view_name_f)
self.rest.delete_resource.assert_called_once_with(
self.data.array, 'sloprovisioning', 'maskingview',
self.data.masking_view_name_f)
def test_get_replication_capabilities(self):
ref_response = self.data.capabilities['symmetrixCapability'][1]
capabilities = self.rest.get_replication_capabilities(self.data.array)
self.assertEqual(ref_response, capabilities)
def test_is_clone_licenced(self):
licence = self.rest.is_snapvx_licensed(self.data.array)
self.assertTrue(licence)
false_response = {'rdfCapable': True,
'snapVxCapable': False,
'symmetrixId': '000197800123'}
with mock.patch.object(self.rest, 'get_replication_capabilities',
return_value=false_response):
licence2 = self.rest.is_snapvx_licensed(self.data.array)
self.assertFalse(licence2)
def test_is_clone_licenced_error(self):
with mock.patch.object(self.rest, 'get_replication_capabilities',
return_value=None):
licence3 = self.rest.is_snapvx_licensed(self.data.array)
self.assertFalse(licence3)
def test_create_volume_snap(self):
snap_name = (self.data.volume_snap_vx
['snapshotSrcs'][0]['snapshotName'])
device_id = self.data.device_id
extra_specs = self.data.extra_specs
payload = {"deviceNameListSource": [{"name": device_id}],
"bothSides": 'false', "star": 'false',
"force": 'false'}
resource_type = 'snapshot/%(snap)s' % {'snap': snap_name}
with mock.patch.object(self.rest, 'create_resource',
return_value=(202, self.data.job_list[0])):
self.rest.create_volume_snap(
self.data.array, snap_name, device_id, extra_specs)
self.rest.create_resource.assert_called_once_with(
self.data.array, 'replication', resource_type,
payload, private='/private')
def test_modify_volume_snap(self):
array = self.data.array
source_id = self.data.device_id
target_id = (self.data.volume_snap_vx
['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice'])
snap_name = (self.data.volume_snap_vx
['snapshotSrcs'][0]['snapshotName'])
extra_specs = self.data.extra_specs
payload = {"deviceNameListSource": [{"name": source_id}],
"deviceNameListTarget": [
{"name": target_id}],
"copy": 'true', "action": "",
"star": 'false', "force": 'false',
"exact": 'false', "remote": 'false',
"symforce": 'false', "nocopy": 'false'}
with mock.patch.object(
self.rest, 'modify_resource', return_value=(
202, self.data.job_list[0])) as mock_modify:
# link
payload["action"] = "Link"
self.rest.modify_volume_snap(
array, source_id, target_id, snap_name, extra_specs, link=True)
self.rest.modify_resource.assert_called_once_with(
array, 'replication', 'snapshot', payload,
resource_name=snap_name, private='/private')
# unlink
mock_modify.reset_mock()
payload["action"] = "Unlink"
self.rest.modify_volume_snap(
array, source_id, target_id, snap_name,
extra_specs, unlink=True)
self.rest.modify_resource.assert_called_once_with(
array, 'replication', 'snapshot', payload,
resource_name=snap_name, private='/private')
# none selected
mock_modify.reset_mock()
self.rest.modify_volume_snap(
array, source_id, target_id, snap_name,
extra_specs)
self.rest.modify_resource.assert_not_called()
def test_delete_volume_snap(self):
array = self.data.array
snap_name = (self.data.volume_snap_vx
['snapshotSrcs'][0]['snapshotName'])
source_device_id = self.data.device_id
payload = {"deviceNameListSource": [{"name": source_device_id}]}
with mock.patch.object(self.rest, 'delete_resource'):
self.rest.delete_volume_snap(array, snap_name, source_device_id)
self.rest.delete_resource.assert_called_once_with(
array, 'replication', 'snapshot', snap_name,
payload=payload, private='/private')
def test_get_volume_snap_info(self):
array = self.data.array
source_device_id = self.data.device_id
ref_snap_info = self.data.volume_snap_vx
snap_info = self.rest.get_volume_snap_info(array, source_device_id)
self.assertEqual(ref_snap_info, snap_info)
def test_get_volume_snap(self):
array = self.data.array
snap_name = (self.data.volume_snap_vx
['snapshotSrcs'][0]['snapshotName'])
device_id = self.data.device_id
ref_snap = self.data.volume_snap_vx['snapshotSrcs'][0]
snap = self.rest.get_volume_snap(array, device_id, snap_name)
self.assertEqual(ref_snap, snap)
def test_get_volume_snap_none(self):
array = self.data.array
snap_name = (self.data.volume_snap_vx
['snapshotSrcs'][0]['snapshotName'])
device_id = self.data.device_id
with mock.patch.object(self.rest, 'get_volume_snap_info',
return_value=None):
snap = self.rest.get_volume_snap(array, device_id, snap_name)
self.assertIsNone(snap)
with mock.patch.object(self.rest, 'get_volume_snap_info',
return_value={'snapshotSrcs': []}):
snap = self.rest.get_volume_snap(array, device_id, snap_name)
self.assertIsNone(snap)
def test_get_sync_session(self):
array = self.data.array
source_id = self.data.device_id
target_id = (self.data.volume_snap_vx
['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice'])
snap_name = (self.data.volume_snap_vx
['snapshotSrcs'][0]['snapshotName'])
ref_sync = (self.data.volume_snap_vx
['snapshotSrcs'][0]['linkedDevices'][0])
sync = self.rest.get_sync_session(
array, source_id, snap_name, target_id)
self.assertEqual(ref_sync, sync)
def test_find_snap_vx_sessions(self):
array = self.data.array
source_id = self.data.device_id
ref_sessions = [{'snap_name': 'temp-1',
'source_vol': self.data.device_id,
'target_vol_list': [self.data.device_id2]},
{'snap_name': 'temp-1',
'source_vol': self.data.device_id,
'target_vol_list': [self.data.device_id2]}]
sessions = self.rest.find_snap_vx_sessions(array, source_id)
self.assertEqual(ref_sessions, sessions)
def test_find_snap_vx_sessions_tgt_only(self):
array = self.data.array
source_id = self.data.device_id
ref_sessions = [{'snap_name': 'temp-1',
'source_vol': self.data.device_id,
'target_vol_list': [self.data.device_id2]}]
sessions = self.rest.find_snap_vx_sessions(
array, source_id, tgt_only=True)
self.assertEqual(ref_sessions, sessions)
def test_update_storagegroup_qos(self):
sg_qos = {"srp": self.data.srp, "num_of_vols": 2, "cap_gb": 2,
"storageGroupId": "OS-QOS-SG",
"slo": self.data.slo, "workload": self.data.workload,
"hostIOLimit": {"host_io_limit_io_sec": "4000",
"dynamicDistribution": "Always",
"host_io_limit_mb_sec": "4000"}}
self.data.sg_details.append(sg_qos)
array = self.data.array
extra_specs = self.data.extra_specs
extra_specs['qos'] = {
'total_iops_sec': '4000', 'DistributionType': 'Always'}
return_value = self.rest.update_storagegroup_qos(
array, "OS-QOS-SG", extra_specs)
self.assertEqual(False, return_value)
extra_specs['qos'] = {
'DistributionType': 'onFailure', 'total_bytes_sec': '419430400'}
return_value = self.rest.update_storagegroup_qos(
array, "OS-QOS-SG", extra_specs)
self.assertTrue(return_value)
def test_update_storagegroup_qos_exception(self):
array = self.data.array
storage_group = self.data.defaultstoragegroup_name
extra_specs = self.data.extra_specs
extra_specs['qos'] = {
'total_iops_sec': '4000', 'DistributionType': 'Wrong',
'total_bytes_sec': '4194304000'}
with mock.patch.object(self.rest, 'check_status_code_success',
side_effect=[None, None, None, Exception]):
self.assertRaises(exception.VolumeBackendAPIException,
self.rest.update_storagegroup_qos, array,
storage_group, extra_specs)
extra_specs['qos']['DistributionType'] = 'Always'
return_value = self.rest.update_storagegroup_qos(
array, "OS-QOS-SG", extra_specs)
self.assertFalse(return_value)
def test_validate_qos_input_exception(self):
qos_extra_spec = {
'total_iops_sec': 90, 'DistributionType': 'Wrong',
'total_bytes_sec': 100}
input_key = 'total_iops_sec'
sg_value = 4000
self.assertRaises(exception.VolumeBackendAPIException,
self.rest.validate_qos_input, input_key, sg_value,
qos_extra_spec, {})
input_key = 'total_bytes_sec'
sg_value = 4000
self.assertRaises(exception.VolumeBackendAPIException,
self.rest.validate_qos_input, input_key, sg_value,
qos_extra_spec, {})
def test_validate_qos_distribution_type(self):
qos_extra_spec = {
'total_iops_sec': 4000, 'DistributionType': 'Always',
'total_bytes_sec': 4194304000}
input_prop_dict = {'total_iops_sec': 4000}
sg_value = 'Always'
ret_prop_dict = self.rest.validate_qos_distribution_type(
sg_value, qos_extra_spec, input_prop_dict)
self.assertEqual(input_prop_dict, ret_prop_dict)
def test_get_rdf_group(self):
with mock.patch.object(self.rest, 'get_resource') as mock_get:
self.rest.get_rdf_group(self.data.array, self.data.rdf_group_no)
mock_get.assert_called_once_with(
self.data.array, 'replication', 'rdf_group',
self.data.rdf_group_no)
def test_get_rdf_group_list(self):
rdf_list = self.rest.get_rdf_group_list(self.data.array)
self.assertEqual(self.data.rdf_group_list, rdf_list)
def test_get_rdf_group_volume(self):
with mock.patch.object(self.rest, 'get_resource') as mock_get:
self.rest.get_rdf_group_volume(
self.data.array, self.data.rdf_group_no, self.data.device_id)
mock_get.assert_called_once_with(
self.data.array, 'replication', 'rdf_group', "70/volume/00001")
def test_are_vols_rdf_paired(self):
are_vols1, local_state, pair_state = self.rest.are_vols_rdf_paired(
self.data.array, self.data.remote_array, self.data.device_id,
self.data.device_id2, self.data.rdf_group_no)
self.assertTrue(are_vols1)
are_vols2, local_state, pair_state = self.rest.are_vols_rdf_paired(
self.data.array, "00012345", self.data.device_id,
self.data.device_id2, self.data.rdf_group_no)
self.assertFalse(are_vols2)
with mock.patch.object(self.rest, "get_rdf_group_volume",
return_value=None):
are_vols3, local, pair = self.rest.are_vols_rdf_paired(
self.data.array, self.data.remote_array, self.data.device_id,
self.data.device_id2, self.data.rdf_group_no)
self.assertFalse(are_vols3)
def test_get_rdf_group_number(self):
rdfg_num = self.rest.get_rdf_group_number(
self.data.array, self.data.rdf_group_name)
self.assertEqual(self.data.rdf_group_no, rdfg_num)
with mock.patch.object(self.rest, 'get_rdf_group_list',
return_value=None):
rdfg_num2 = self.rest.get_rdf_group_number(
self.data.array, self.data.rdf_group_name)
self.assertIsNone(rdfg_num2)
with mock.patch.object(self.rest, 'get_rdf_group',
return_value=None):
rdfg_num3 = self.rest.get_rdf_group_number(
self.data.array, self.data.rdf_group_name)
self.assertIsNone(rdfg_num3)
def test_create_rdf_device_pair(self):
ref_dict = {'array': self.data.remote_array,
'device_id': self.data.device_id2}
rdf_dict = self.rest.create_rdf_device_pair(
self.data.array, self.data.device_id, self.data.rdf_group_no,
self.data.device_id2, self.data.remote_array, "OS-2",
self.data.extra_specs)
self.assertEqual(ref_dict, rdf_dict)
def test_modify_rdf_device_pair(self):
resource_name = "70/volume/00001"
common_opts = {"force": 'false',
"symForce": 'false',
"star": 'false',
"hop2": 'false',
"bypass": 'false'}
split_opts = deepcopy(common_opts)
split_opts.update({"immediate": 'false'})
split_payload = {"action": "Split",
'executionOption': 'ASYNCHRONOUS',
"split": split_opts}
failover_opts = deepcopy(common_opts)
failover_opts.update({"establish": 'true',
"restore": 'false',
"remote": 'false',
"immediate": 'false'})
failover_payload = {"action": "Failover",
'executionOption': 'ASYNCHRONOUS',
"failover": failover_opts}
with mock.patch.object(
self.rest, "modify_resource",
return_value=(200, self.data.job_list[0])) as mock_mod:
self.rest.modify_rdf_device_pair(
self.data.array, self.data.device_id, self.data.rdf_group_no,
self.data.extra_specs, split=True)
mock_mod.assert_called_once_with(
self.data.array, 'replication', 'rdf_group',
split_payload, resource_name=resource_name,
private='/private')
mock_mod.reset_mock()
self.rest.modify_rdf_device_pair(
self.data.array, self.data.device_id, self.data.rdf_group_no,
self.data.extra_specs, split=False)
mock_mod.assert_called_once_with(
self.data.array, 'replication', 'rdf_group',
failover_payload, resource_name=resource_name,
private='/private')
def test_get_storage_group_rep(self):
array = self.data.array
source_group_name = self.data.storagegroup_name_source
ref_details = self.data.sg_details_rep[0]
volume_group = self.rest.get_storage_group_rep(array,
source_group_name)
self.assertEqual(volume_group, ref_details)
def test_get_volumes_in_storage_group(self):
array = self.data.array
storagegroup_name = self.data.storagegroup_name_source
ref_volumes = [self.data.device_id, self.data.device_id2]
volume_list = self.rest.get_volumes_in_storage_group(
array, storagegroup_name)
self.assertEqual(ref_volumes, volume_list)
def test_create_storagegroup_snap(self):
array = self.data.array
extra_specs = self.data.extra_specs
source_group = self.data.storagegroup_name_source
snap_name = self.data.group_snapshot_name
with mock.patch.object(
self.rest, "create_storagegroup_snap") as mock_create:
self.rest.create_storagegroup_snap(
array, source_group, snap_name, extra_specs)
mock_create.assert_called_once_with(array,
source_group,
snap_name,
extra_specs)
class VMAXProvisionTest(test.TestCase):
def setUp(self):
self.data = VMAXCommonData()
super(VMAXProvisionTest, self).setUp()
config_group = 'ProvisionTests'
self.fake_xml = FakeXML().create_fake_config_file(
config_group, self.data.port_group_name_i)
configuration = FakeConfiguration(self.fake_xml, config_group)
rest.VMAXRest._establish_rest_session = mock.Mock(
return_value=FakeRequestsSession())
provision.UNLINK_INTERVAL = 0
driver = iscsi.VMAXISCSIDriver(configuration=configuration)
self.driver = driver
self.common = self.driver.common
self.provision = self.common.provision
self.utils = self.common.utils
self.rest = self.common.rest
def test_create_storage_group(self):
array = self.data.array
storagegroup_name = self.data.storagegroup_name_f
srp = self.data.srp
slo = self.data.slo
workload = self.data.workload
extra_specs = self.data.extra_specs
storagegroup = self.provision.create_storage_group(
array, storagegroup_name, srp, slo, workload, extra_specs)
self.assertEqual(storagegroup_name, storagegroup)
def test_create_volume_from_sg(self):
array = self.data.array
storagegroup_name = self.data.storagegroup_name_f
volumeId = self.data.test_volume.id
volume_name = self.utils.get_volume_element_name(volumeId)
volume_size = self.data.test_volume.size
extra_specs = self.data.extra_specs
ref_dict = self.data.provider_location
volume_dict = self.provision.create_volume_from_sg(
array, volume_name, storagegroup_name, volume_size, extra_specs)
self.assertEqual(ref_dict, volume_dict)
def test_delete_volume_from_srp(self):
array = self.data.array
device_id = self.data.device_id
volume_name = self.data.volume_details[0]['volume_identifier']
with mock.patch.object(self.provision.rest, 'delete_volume'):
self.provision.delete_volume_from_srp(
array, device_id, volume_name)
self.provision.rest.delete_volume.assert_called_once_with(
array, device_id)
def test_create_volume_snap_vx(self):
array = self.data.array
source_device_id = self.data.device_id
snap_name = self.data.snap_location['snap_name']
extra_specs = self.data.extra_specs
with mock.patch.object(self.provision.rest, 'create_volume_snap'):
self.provision.create_volume_snapvx(
array, source_device_id, snap_name, extra_specs)
self.provision.rest.create_volume_snap.assert_called_once_with(
array, snap_name, source_device_id, extra_specs)
def test_create_volume_replica_create_snap_true(self):
array = self.data.array
source_device_id = self.data.device_id
target_device_id = self.data.device_id2
snap_name = self.data.snap_location['snap_name']
extra_specs = self.data.extra_specs
with mock.patch.object(self.provision, 'create_volume_snapvx'):
with mock.patch.object(self.provision.rest, 'modify_volume_snap'):
self.provision.create_volume_replica(
array, source_device_id, target_device_id,
snap_name, extra_specs, create_snap=True)
self.provision.rest.modify_volume_snap.assert_called_once_with(
array, source_device_id, target_device_id, snap_name,
extra_specs, link=True)
self.provision.create_volume_snapvx.assert_called_once_with(
array, source_device_id, snap_name, extra_specs)
def test_create_volume_replica_create_snap_false(self):
array = self.data.array
source_device_id = self.data.device_id
target_device_id = self.data.device_id2
snap_name = self.data.snap_location['snap_name']
extra_specs = self.data.extra_specs
with mock.patch.object(self.provision, 'create_volume_snapvx'):
with mock.patch.object(self.provision.rest, 'modify_volume_snap'):
self.provision.create_volume_replica(
array, source_device_id, target_device_id,
snap_name, extra_specs, create_snap=False)
self.provision.rest.modify_volume_snap.assert_called_once_with(
array, source_device_id, target_device_id, snap_name,
extra_specs, link=True)
self.provision.create_volume_snapvx.assert_not_called()
def test_break_replication_relationship(self):
array = self.data.array
source_device_id = self.data.device_id
target_device_id = self.data.device_id2
snap_name = self.data.snap_location['snap_name']
extra_specs = self.data.extra_specs
with mock.patch.object(self.provision.rest, 'modify_volume_snap'):
self.provision.break_replication_relationship(
array, target_device_id, source_device_id, snap_name,
extra_specs)
(self.provision.rest.modify_volume_snap.
assert_called_once_with(
array, source_device_id, target_device_id,
snap_name, extra_specs, unlink=True))
def test_unlink_volume(self):
with mock.patch.object(self.rest, 'modify_volume_snap') as mock_mod:
self.provision._unlink_volume(
self.data.array, self.data.device_id, self.data.device_id2,
self.data.snap_location['snap_name'], self.data.extra_specs)
mock_mod.assert_called_once_with(
self.data.array, self.data.device_id, self.data.device_id2,
self.data.snap_location['snap_name'], self.data.extra_specs,
unlink=True)
def test_unlink_volume_exception(self):
with mock.patch.object(
self.rest, 'modify_volume_snap', side_effect=[
exception.VolumeBackendAPIException(data=''), '']
) as mock_mod:
self.provision._unlink_volume(
self.data.array, self.data.device_id, self.data.device_id2,
self.data.snap_location['snap_name'], self.data.extra_specs)
self.assertEqual(2, mock_mod.call_count)
def test_delete_volume_snap(self):
array = self.data.array
source_device_id = self.data.device_id
snap_name = self.data.snap_location['snap_name']
with mock.patch.object(self.provision.rest, 'delete_volume_snap'):
self.provision.delete_volume_snap(
array, snap_name, source_device_id)
self.provision.rest.delete_volume_snap.assert_called_once_with(
array, snap_name, source_device_id)
def test_extend_volume(self):
array = self.data.array
device_id = self.data.device_id
new_size = '3'
extra_specs = self.data.extra_specs
with mock.patch.object(self.provision.rest, 'extend_volume'):
self.provision.extend_volume(array, device_id, new_size,
extra_specs)
self.provision.rest.extend_volume.assert_called_once_with(
array, device_id, new_size, extra_specs)
def test_get_srp_pool_stats_no_wlp(self):
array = self.data.array
array_info = self.common.pool_info['arrays_info'][0]
ref_stats = (self.data.srp_details['total_usable_cap_gb'],
float(self.data.srp_details['total_usable_cap_gb']
- self.data.srp_details['total_allocated_cap_gb']),
self.data.srp_details['total_subscribed_cap_gb'],
self.data.srp_details['reserved_cap_percent'], False)
with mock.patch.object(self.provision,
'_get_remaining_slo_capacity_wlp',
return_value=-1):
stats = self.provision.get_srp_pool_stats(array, array_info)
self.assertEqual(ref_stats, stats)
def test_get_srp_pool_stats_wlp_enabled(self):
array = self.data.array
array_info = self.common.pool_info['arrays_info'][0]
srp = self.data.srp
headroom_capacity = self.provision.rest.get_headroom_capacity(
array, srp, array_info['SLO'], array_info['Workload'])
ref_stats = (self.data.srp_details['total_usable_cap_gb'],
float(headroom_capacity
- self.data.srp_details['total_allocated_cap_gb']),
self.data.srp_details['total_subscribed_cap_gb'],
self.data.srp_details['reserved_cap_percent'], True)
stats = self.provision.get_srp_pool_stats(array, array_info)
self.assertEqual(ref_stats, stats)
def test_get_srp_pool_stats_errors(self):
# cannot retrieve srp
array = self.data.array
array_info = {'srpName': self.data.failed_resource}
ref_stats = (0, 0, 0, 0, False)
stats = self.provision.get_srp_pool_stats(array, array_info)
self.assertEqual(ref_stats, stats)
# cannot report on all stats
with mock.patch.object(self.provision.rest, 'get_srp_by_name',
return_value={'total_usable_cap_gb': 33}):
with mock.patch.object(self.provision,
'_get_remaining_slo_capacity_wlp',
return_value=(-1)):
ref_stats = (33, 0, 0, 0, False)
stats = self.provision.get_srp_pool_stats(array, array_info)
self.assertEqual(ref_stats, stats)
def test_get_remaining_slo_capacity_wlp(self):
array = self.data.array
array_info = self.common.pool_info['arrays_info'][0]
srp = self.data.srp
ref_capacity = self.provision.rest.get_headroom_capacity(
array, srp, array_info['SLO'], array_info['Workload'])
remaining_capacity = (
self.provision._get_remaining_slo_capacity_wlp(
array, srp, array_info))
self.assertEqual(ref_capacity, remaining_capacity)
def test_get_remaining_slo_capacity_no_slo_or_wlp(self):
array = self.data.array
array_info = self.common.pool_info['arrays_info'][0]
srp = self.data.srp
ref_capacity = -1
with mock.patch.object(self.provision.rest, 'get_headroom_capacity',
return_value=None):
remaining_capacity = (
self.provision._get_remaining_slo_capacity_wlp(
array, srp, {'SLO': None}))
self.assertEqual(ref_capacity, remaining_capacity)
self.provision.rest.get_headroom_capacity.assert_not_called()
remaining_capacity = (
self.provision._get_remaining_slo_capacity_wlp(
array, srp, array_info))
self.assertEqual(ref_capacity, remaining_capacity)
def test_verify_slo_workload_true(self):
# with slo and workload
array = self.data.array
slo = self.data.slo
workload = self.data.workload
srp = self.data.srp
valid_slo, valid_workload = self.provision.verify_slo_workload(
array, slo, workload, srp)
self.assertTrue(valid_slo)
self.assertTrue(valid_workload)
# slo and workload = none
slo2 = None
workload2 = None
valid_slo2, valid_workload2 = self.provision.verify_slo_workload(
array, slo2, workload2, srp)
self.assertTrue(valid_slo2)
self.assertTrue(valid_workload2)
slo2 = None
workload2 = 'None'
valid_slo2, valid_workload2 = self.provision.verify_slo_workload(
array, slo2, workload2, srp)
self.assertTrue(valid_slo2)
self.assertTrue(valid_workload2)
def test_verify_slo_workload_false(self):
# Both wrong
array = self.data.array
slo = 'Diamante'
workload = 'DSSS'
srp = self.data.srp
valid_slo, valid_workload = self.provision.verify_slo_workload(
array, slo, workload, srp)
self.assertFalse(valid_slo)
self.assertFalse(valid_workload)
# Workload set, no slo set
valid_slo, valid_workload = self.provision.verify_slo_workload(
array, None, self.data.workload, srp)
self.assertTrue(valid_slo)
self.assertFalse(valid_workload)
def test_get_slo_workload_settings_from_storage_group(self):
ref_settings = "Diamond+DSS"
sg_slo_settings = (
self.provision.get_slo_workload_settings_from_storage_group(
self.data.array, self.data.defaultstoragegroup_name))
self.assertEqual(ref_settings, sg_slo_settings)
# No workload
with mock.patch.object(self.provision.rest, 'get_storage_group',
return_value={'slo': 'Silver'}):
ref_settings2 = "Silver+NONE"
sg_slo_settings2 = (
self.provision.get_slo_workload_settings_from_storage_group(
self.data.array, 'no_workload_sg'))
self.assertEqual(ref_settings2, sg_slo_settings2)
def test_break_rdf_relationship(self):
array = self.data.array
device_id = self.data.device_id
target_device = self.data.device_id2
rdf_group_name = self.data.rdf_group_name
rep_extra_specs = self.data.rep_extra_specs
with mock.patch.object(
self.provision.rest, 'modify_rdf_device_pair') as mod_rdf:
with mock.patch.object(
self.provision.rest, 'delete_rdf_pair') as del_rdf:
self.provision.break_rdf_relationship(
array, device_id, target_device,
rdf_group_name, rep_extra_specs, "Synchronized")
mod_rdf.assert_called_once_with(
array, device_id, rdf_group_name, rep_extra_specs,
split=True)
del_rdf.assert_called_once_with(
array, device_id, rdf_group_name)
def test_failover_volume(self):
array = self.data.array
device_id = self.data.device_id
rdf_group_name = self.data.rdf_group_name
extra_specs = self.data.extra_specs
with mock.patch.object(
self.provision.rest, 'modify_rdf_device_pair') as mod_rdf:
self.provision.failover_volume(
array, device_id, rdf_group_name,
extra_specs, '', True)
mod_rdf.assert_called_once_with(
array, device_id, rdf_group_name, extra_specs,
split=False)
mod_rdf.reset_mock()
self.provision.failover_volume(
array, device_id, rdf_group_name,
extra_specs, '', False)
mod_rdf.assert_called_once_with(
array, device_id, rdf_group_name, extra_specs,
split=False)
def test_create_volume_group_success(self):
array = self.data.array
group_name = self.data.storagegroup_name_source
extra_specs = self.data.extra_specs
ref_value = self.data.storagegroup_name_source
storagegroup = self.provision.create_volume_group(array,
group_name,
extra_specs)
self.assertEqual(ref_value, storagegroup)
def test_create_group_replica(self):
array = self.data.array
source_group = self.data.storagegroup_name_source
snap_name = self.data.group_snapshot_name
extra_specs = self.data.extra_specs
with mock.patch.object(
self.provision,
'create_group_replica') as mock_create_replica:
self.provision.create_group_replica(
array, source_group, snap_name, extra_specs)
mock_create_replica.assert_called_once_with(
array, source_group, snap_name, extra_specs)
def test_delete_group_replica(self):
array = self.data.array
snap_name = self.data.group_snapshot_name
source_group_name = self.data.storagegroup_name_source
with mock.patch.object(
self.provision,
'delete_group_replica') as mock_delete_replica:
self.provision.delete_group_replica(array,
snap_name,
source_group_name)
mock_delete_replica.assert_called_once_with(
array, snap_name, source_group_name)
def test_link_and_break_replica(self):
array = self.data.array
source_group_name = self.data.storagegroup_name_source
target_group_name = self.data.target_group_name
snap_name = self.data.group_snapshot_name
extra_specs = self.data.extra_specs
deleteSnapshot = False
with mock.patch.object(
self.provision,
'link_and_break_replica') as mock_link_and_break_replica:
self.provision.link_and_break_replica(
array, source_group_name,
target_group_name, snap_name,
extra_specs, deleteSnapshot)
mock_link_and_break_replica.assert_called_once_with(
array, source_group_name,
target_group_name, snap_name,
extra_specs, deleteSnapshot)
def test_unlink_group(self):
with mock.patch.object(self.rest,
'modify_storagegroup_snap') as mock_mod:
self.provision._unlink_group(
self.data.array, self.data.storagegroup_name_source,
self.data.target_group_name,
self.data.group_snapshot_name, self.data.extra_specs)
mock_mod.assert_called_once_with(
self.data.array, self.data.storagegroup_name_source,
self.data.target_group_name,
self.data.group_snapshot_name, self.data.extra_specs,
unlink=True)
class VMAXCommonTest(test.TestCase):
def setUp(self):
self.data = VMAXCommonData()
super(VMAXCommonTest, self).setUp()
config_group = 'CommonTests'
self.fake_xml = FakeXML().create_fake_config_file(
config_group, self.data.port_group_name_f)
configuration = FakeConfiguration(self.fake_xml, config_group,
1, 1)
rest.VMAXRest._establish_rest_session = mock.Mock(
return_value=FakeRequestsSession())
driver = fc.VMAXFCDriver(configuration=configuration)
self.driver = driver
self.common = self.driver.common
self.masking = self.common.masking
self.provision = self.common.provision
self.rest = self.common.rest
self.utils = self.common.utils
self.utils.get_volumetype_extra_specs = (
mock.Mock(return_value=self.data.vol_type_extra_specs))
@mock.patch.object(rest.VMAXRest,
'set_rest_credentials')
@mock.patch.object(common.VMAXCommon,
'_get_slo_workload_combinations',
return_value=[])
@mock.patch.object(utils.VMAXUtils,
'parse_file_to_get_array_map',
return_value=[])
def test_gather_info_no_opts(self, mock_parse, mock_combo, mock_rest):
configuration = FakeConfiguration(None, 'config_group', None, None)
fc.VMAXFCDriver(configuration=configuration)
def test_get_slo_workload_combinations_success(self):
array_info = self.utils.parse_file_to_get_array_map(
self.common.pool_info['config_file'])
finalarrayinfolist = self.common._get_slo_workload_combinations(
array_info)
self.assertTrue(len(finalarrayinfolist) > 1)
def test_get_slo_workload_combinations_failed(self):
array_info = {}
self.assertRaises(exception.VolumeBackendAPIException,
self.common._get_slo_workload_combinations,
array_info)
def test_create_volume(self):
ref_model_update = (
{'provider_location': six.text_type(self.data.provider_location)})
model_update = self.common.create_volume(self.data.test_volume)
self.assertEqual(ref_model_update, model_update)
def test_create_volume_from_snapshot(self):
ref_model_update = (
{'provider_location': six.text_type(
self.data.provider_location)})
model_update = self.common.create_volume_from_snapshot(
self.data.test_clone_volume, self.data.test_snapshot)
self.assertEqual(ref_model_update, model_update)
# Test from legacy snapshot
model_update = self.common.create_volume_from_snapshot(
self.data.test_clone_volume, self.data.test_legacy_snapshot)
self.assertEqual(ref_model_update, model_update)
def test_cloned_volume(self):
ref_model_update = (
{'provider_location': six.text_type(
self.data.provider_location)})
model_update = self.common.create_cloned_volume(
self.data.test_clone_volume, self.data.test_volume)
self.assertEqual(ref_model_update, model_update)
def test_delete_volume(self):
with mock.patch.object(self.common, '_delete_volume'):
self.common.delete_volume(self.data.test_volume)
self.common._delete_volume.assert_called_once_with(
self.data.test_volume)
def test_create_snapshot(self):
ref_model_update = (
{'provider_location': six.text_type(
self.data.snap_location)})
model_update = self.common.create_snapshot(
self.data.test_snapshot, self.data.test_volume)
self.assertEqual(ref_model_update, model_update)
def test_delete_snapshot(self):
snap_name = self.data.snap_location['snap_name']
sourcedevice_id = self.data.snap_location['source_id']
with mock.patch.object(self.provision, 'delete_volume_snap'):
self.common.delete_snapshot(self.data.test_snapshot,
self.data.test_volume)
self.provision.delete_volume_snap.assert_called_once_with(
self.data.array, snap_name, sourcedevice_id)
def test_delete_snapshot_not_found(self):
with mock.patch.object(self.common, '_parse_snap_info',
return_value=(None, 'Something')):
with mock.patch.object(self.provision, 'delete_volume_snap'):
self.common.delete_snapshot(self.data.test_snapshot,
self.data.test_volume)
self.provision.delete_volume_snap.assert_not_called()
def test_delete_legacy_snap(self):
with mock.patch.object(self.common, '_delete_volume') as mock_del:
self.common.delete_snapshot(self.data.test_legacy_snapshot,
self.data.test_legacy_vol)
mock_del.assert_called_once_with(self.data.test_legacy_snapshot)
def test_remove_members(self):
array = self.data.array
device_id = self.data.device_id
volume = self.data.test_volume
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
with mock.patch.object(self.masking,
'remove_and_reset_members') as mock_rm:
self.common._remove_members(array, volume, device_id,
extra_specs, self.data.connector)
mock_rm.assert_called_once_with(
array, device_id, volume_name,
extra_specs, True, self.data.connector)
def test_unmap_lun(self):
array = self.data.array
device_id = self.data.device_id
volume = self.data.test_volume
extra_specs = deepcopy(self.data.extra_specs_intervals_set)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
connector = self.data.connector
with mock.patch.object(self.common, '_remove_members'):
self.common._unmap_lun(volume, connector)
self.common._remove_members.assert_called_once_with(
array, volume, device_id, extra_specs, connector)
def test_unmap_lun_not_mapped(self):
volume = self.data.test_volume
connector = self.data.connector
with mock.patch.object(self.common, 'find_host_lun_id',
return_value=({}, False, [])):
with mock.patch.object(self.common, '_remove_members'):
self.common._unmap_lun(volume, connector)
self.common._remove_members.assert_not_called()
def test_unmap_lun_connector_is_none(self):
array = self.data.array
device_id = self.data.device_id
volume = self.data.test_volume
extra_specs = deepcopy(self.data.extra_specs_intervals_set)
extra_specs['storagetype:portgroupname'] = (
self.data.port_group_name_f)
with mock.patch.object(self.common, '_remove_members'):
self.common._unmap_lun(volume, None)
self.common._remove_members.assert_called_once_with(
array, volume, device_id, extra_specs, None)
def test_initialize_connection_already_mapped(self):
volume = self.data.test_volume
connector = self.data.connector
host_lun = (self.data.maskingview[0]['maskingViewConnection'][0]
['host_lun_address'])
ref_dict = {'hostlunid': int(host_lun, 16),
'maskingview': self.data.masking_view_name_f,
'array': self.data.array,
'device_id': self.data.device_id}
device_info_dict = self.common.initialize_connection(volume, connector)
self.assertEqual(ref_dict, device_info_dict)
def test_initialize_connection_not_mapped(self):
volume = self.data.test_volume
connector = self.data.connector
extra_specs = deepcopy(self.data.extra_specs_intervals_set)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
with mock.patch.object(self.common, 'find_host_lun_id',
return_value=({}, False, [])):
with mock.patch.object(
self.common, '_attach_volume', return_value=(
{}, self.data.port_group_name_f)):
device_info_dict = self.common.initialize_connection(volume,
connector)
self.assertEqual({}, device_info_dict)
self.common._attach_volume.assert_called_once_with(
volume, connector, extra_specs, masking_view_dict, False)
def test_attach_volume_success(self):
volume = self.data.test_volume
connector = self.data.connector
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
host_lun = (self.data.maskingview[0]['maskingViewConnection'][0]
['host_lun_address'])
ref_dict = {'hostlunid': int(host_lun, 16),
'maskingview': self.data.masking_view_name_f,
'array': self.data.array,
'device_id': self.data.device_id}
with mock.patch.object(self.masking, 'setup_masking_view',
return_value={
utils.PORTGROUPNAME:
self.data.port_group_name_f}):
device_info_dict, pg = self.common._attach_volume(
volume, connector, extra_specs, masking_view_dict)
self.assertEqual(ref_dict, device_info_dict)
def test_attach_volume_failed(self):
volume = self.data.test_volume
connector = self.data.connector
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
with mock.patch.object(self.masking, 'setup_masking_view',
return_value={}):
with mock.patch.object(self.common, 'find_host_lun_id',
return_value=({}, False, [])):
with mock.patch.object(
self.masking,
'check_if_rollback_action_for_masking_required'):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._attach_volume, volume,
connector, extra_specs,
masking_view_dict)
device_id = self.data.device_id
(self.masking.
check_if_rollback_action_for_masking_required.
assert_called_once_with(self.data.array, device_id, {}))
def test_terminate_connection(self):
volume = self.data.test_volume
connector = self.data.connector
with mock.patch.object(self.common, '_unmap_lun'):
self.common.terminate_connection(volume, connector)
self.common._unmap_lun.assert_called_once_with(
volume, connector)
@mock.patch.object(common.VMAXCommon, '_sync_check')
@mock.patch.object(provision.VMAXProvision, 'extend_volume')
def test_extend_volume_success(self, mock_extend, mock_sync):
volume = self.data.test_volume
array = self.data.array
device_id = self.data.device_id
new_size = self.data.test_volume.size
ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
with mock.patch.object(self.rest, 'is_vol_in_rep_session',
return_value=(False, False, None)):
self.common.extend_volume(volume, new_size)
mock_extend.assert_called_once_with(
array, device_id, new_size, ref_extra_specs)
def test_extend_volume_failed_snap_src(self):
volume = self.data.test_volume
new_size = self.data.test_volume.size
with mock.patch.object(self.rest, 'is_vol_in_rep_session',
return_value=(False, True, None)):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.extend_volume, volume, new_size)
def test_extend_volume_failed_no_device_id(self):
volume = self.data.test_volume
new_size = self.data.test_volume.size
with mock.patch.object(self.common, '_find_device_on_array',
return_value=None):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.extend_volume, volume, new_size)
def test_extend_volume_failed_wrong_size(self):
volume = self.data.test_volume
new_size = 1
self.assertRaises(exception.VolumeBackendAPIException,
self.common.extend_volume, volume, new_size)
def test_update_volume_stats(self):
data = self.common.update_volume_stats()
self.assertEqual('CommonTests', data['volume_backend_name'])
def test_update_volume_stats_no_wlp(self):
with mock.patch.object(self.common, '_update_srp_stats',
return_value=('123s#SRP_1#None#None',
100, 90, 90, 10, False)):
data = self.common.update_volume_stats()
self.assertEqual('CommonTests', data['volume_backend_name'])
def test_set_config_file_and_get_extra_specs(self):
volume = self.data.test_volume
extra_specs, config_file, qos_specs = (
self.common._set_config_file_and_get_extra_specs(volume))
self.assertEqual(self.data.vol_type_extra_specs, extra_specs)
self.assertEqual(self.fake_xml, config_file)
def test_set_config_file_and_get_extra_specs_no_specs(self):
volume = self.data.test_volume
ref_config = '/etc/cinder/cinder_dell_emc_config.xml'
with mock.patch.object(self.utils, 'get_volumetype_extra_specs',
return_value=None):
extra_specs, config_file, qos_specs = (
self.common._set_config_file_and_get_extra_specs(volume))
self.assertIsNone(extra_specs)
self.assertEqual(ref_config, config_file)
def test_find_device_on_array_success(self):
volume = self.data.test_volume
extra_specs = self.data.extra_specs
ref_device_id = self.data.device_id
founddevice_id = self.common._find_device_on_array(volume, extra_specs)
self.assertEqual(ref_device_id, founddevice_id)
def test_find_device_on_array_different_device_id(self):
volume = self.data.test_volume
extra_specs = self.data.extra_specs
with mock.patch.object(
self.rest, 'find_volume_device_id',
return_value='01234'):
founddevice_id = self.common._find_device_on_array(
volume, extra_specs)
self.assertIsNone(founddevice_id)
def test_find_device_on_array_provider_location_not_string(self):
volume = fake_volume.fake_volume_obj(
context='cxt', provider_location=None)
extra_specs = self.data.extra_specs
founddevice_id = self.common._find_device_on_array(
volume, extra_specs)
self.assertIsNone(founddevice_id)
def test_find_legacy_device_on_array(self):
volume = self.data.test_legacy_vol
extra_specs = self.data.extra_specs
ref_device_id = self.data.device_id
founddevice_id = self.common._find_device_on_array(volume, extra_specs)
self.assertEqual(ref_device_id, founddevice_id)
def test_find_host_lun_id_attached(self):
volume = self.data.test_volume
extra_specs = self.data.extra_specs
host = 'HostX'
host_lun = (self.data.maskingview[0]['maskingViewConnection'][0]
['host_lun_address'])
ref_masked = {'hostlunid': int(host_lun, 16),
'maskingview': self.data.masking_view_name_f,
'array': self.data.array,
'device_id': self.data.device_id}
maskedvols, __, __ = self.common.find_host_lun_id(
volume, host, extra_specs)
self.assertEqual(ref_masked, maskedvols)
def test_find_host_lun_id_not_attached(self):
volume = self.data.test_volume
extra_specs = self.data.extra_specs
host = 'HostX'
with mock.patch.object(self.rest, 'find_mv_connections_for_vol',
return_value=None):
maskedvols, __, __ = self.common.find_host_lun_id(
volume, host, extra_specs)
self.assertEqual({}, maskedvols)
def test_get_masking_views_from_volume(self):
array = self.data.array
device_id = self.data.device_id
host = 'HostX'
ref_mv_list = [self.data.masking_view_name_f]
maskingview_list = self.common.get_masking_views_from_volume(
array, device_id, host)
self.assertEqual(ref_mv_list, maskingview_list)
def test_get_masking_views_from_volume_wrong_host(self):
array = self.data.array
device_id = self.data.device_id
host = 'DifferentHost'
maskingview_list = self.common.get_masking_views_from_volume(
array, device_id, host)
self.assertFalse(maskingview_list)
def test_find_host_lun_id_no_host_check(self):
volume = self.data.test_volume
extra_specs = self.data.extra_specs
host_lun = (self.data.maskingview[0]['maskingViewConnection'][0]
['host_lun_address'])
ref_masked = {'hostlunid': int(host_lun, 16),
'maskingview': self.data.masking_view_name_f,
'array': self.data.array,
'device_id': self.data.device_id}
maskedvols, __, __ = self.common.find_host_lun_id(
volume, None, extra_specs)
self.assertEqual(ref_masked, maskedvols)
def test_register_config_file_from_config_group_exists(self):
config_group_name = 'CommonTests'
config_file = self.common._register_config_file_from_config_group(
config_group_name)
self.assertEqual(self.fake_xml, config_file)
def test_register_config_file_from_config_group_does_not_exist(self):
config_group_name = 'IncorrectName'
self.assertRaises(exception.VolumeBackendAPIException,
self.common._register_config_file_from_config_group,
config_group_name)
def test_initial_setup_success(self):
volume = self.data.test_volume
ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
extra_specs = self.common._initial_setup(volume)
self.assertEqual(ref_extra_specs, extra_specs)
def test_initial_setup_failed(self):
volume = self.data.test_volume
with mock.patch.object(self.utils, 'parse_file_to_get_array_map',
return_value=None):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._initial_setup, volume)
def test_populate_masking_dict(self):
volume = self.data.test_volume
connector = self.data.connector
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
ref_mv_dict = self.data.masking_view_dict
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
self.assertEqual(ref_mv_dict, masking_view_dict)
def test_populate_masking_dict_no_slo(self):
volume = self.data.test_volume
connector = self.data.connector
extra_specs = {
'slo': None,
'workload': None,
'srp': self.data.srp,
'array': self.data.array,
utils.PORTGROUPNAME: self.data.port_group_name_f}
ref_mv_dict = self.data.masking_view_dict_no_slo
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
self.assertEqual(ref_mv_dict, masking_view_dict)
def test_populate_masking_dict_compr_disabled(self):
volume = self.data.test_volume
connector = self.data.connector
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
extra_specs[utils.DISABLECOMPRESSION] = "true"
ref_mv_dict = self.data.masking_view_dict_compression_disabled
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
self.assertEqual(ref_mv_dict, masking_view_dict)
def test_create_cloned_volume(self):
volume = self.data.test_clone_volume
source_volume = self.data.test_volume
extra_specs = self.data.extra_specs
ref_dict = self.data.provider_location
clone_dict = self.common._create_cloned_volume(
volume, source_volume, extra_specs)
self.assertEqual(ref_dict, clone_dict)
def test_create_cloned_volume_is_snapshot(self):
volume = self.data.test_snapshot
source_volume = self.data.test_volume
extra_specs = self.data.extra_specs
ref_dict = self.data.snap_location
clone_dict = self.common._create_cloned_volume(
volume, source_volume, extra_specs, True, False)
self.assertEqual(ref_dict, clone_dict)
def test_create_cloned_volume_from_snapshot(self):
volume = self.data.test_clone_volume
source_volume = self.data.test_snapshot
extra_specs = self.data.extra_specs
ref_dict = self.data.provider_location
clone_dict = self.common._create_cloned_volume(
volume, source_volume, extra_specs, False, True)
self.assertEqual(ref_dict, clone_dict)
def test_create_cloned_volume_not_licenced(self):
volume = self.data.test_clone_volume
source_volume = self.data.test_volume
extra_specs = self.data.extra_specs
with mock.patch.object(self.rest, 'is_snapvx_licensed',
return_value=False):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._create_cloned_volume,
volume, source_volume, extra_specs)
def test_parse_snap_info_found(self):
ref_device_id = self.data.device_id
ref_snap_name = self.data.snap_location['snap_name']
sourcedevice_id, foundsnap_name = self.common._parse_snap_info(
self.data.array, self.data.test_snapshot)
self.assertEqual(ref_device_id, sourcedevice_id)
self.assertEqual(ref_snap_name, foundsnap_name)
def test_parse_snap_info_not_found(self):
ref_snap_name = None
with mock.patch.object(self.rest, 'get_volume_snap',
return_value=None):
__, foundsnap_name = self.common._parse_snap_info(
self.data.array, self.data.test_snapshot)
self.assertIsNone(ref_snap_name, foundsnap_name)
def test_parse_snap_info_exception(self):
with mock.patch.object(
self.rest, 'get_volume_snap',
side_effect=exception.VolumeBackendAPIException):
__, foundsnap_name = self.common._parse_snap_info(
self.data.array, self.data.test_snapshot)
self.assertIsNone(foundsnap_name)
def test_parse_snap_info_provider_location_not_string(self):
snapshot = fake_snapshot.fake_snapshot_obj(
context='ctxt', provider_loaction={'not': 'string'})
sourcedevice_id, foundsnap_name = self.common._parse_snap_info(
self.data.array, snapshot)
self.assertIsNone(foundsnap_name)
def test_create_snapshot_success(self):
array = self.data.array
snapshot = self.data.test_snapshot
source_device_id = self.data.device_id
extra_specs = self.data.extra_specs
ref_dict = {'snap_name': self.data.test_snapshot_snap_name,
'source_id': self.data.device_id}
snap_dict = self.common._create_snapshot(
array, snapshot, source_device_id, extra_specs)
self.assertEqual(ref_dict, snap_dict)
def test_create_snapshot_exception(self):
array = self.data.array
snapshot = self.data.test_snapshot
source_device_id = self.data.device_id
extra_specs = self.data.extra_specs
with mock.patch.object(
self.provision, 'create_volume_snapvx',
side_effect=exception.VolumeBackendAPIException):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._create_snapshot,
array, snapshot, source_device_id, extra_specs)
@mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group')
def test_delete_volume_from_srp(self, mock_rm):
array = self.data.array
device_id = self.data.device_id
volume_name = self.data.test_volume.name
ref_extra_specs = self.data.extra_specs_intervals_set
ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
volume = self.data.test_volume
with mock.patch.object(self.common, '_sync_check'):
with mock.patch.object(self.common, '_delete_from_srp'):
self.common._delete_volume(volume)
self.common._delete_from_srp.assert_called_once_with(
array, device_id, volume_name, ref_extra_specs)
def test_delete_volume_not_found(self):
volume = self.data.test_volume
with mock.patch.object(self.common, '_find_device_on_array',
return_value=None):
with mock.patch.object(self.common, '_delete_from_srp'):
self.common._delete_volume(volume)
self.common._delete_from_srp.assert_not_called()
def test_create_volume_success(self):
volume_name = '1'
volume_size = self.data.test_volume.size
extra_specs = self.data.extra_specs
ref_dict = self.data.provider_location
volume_dict = self.common._create_volume(
volume_name, volume_size, extra_specs)
self.assertEqual(ref_dict, volume_dict)
def test_create_volume_failed(self):
volume_name = self.data.test_volume.name
volume_size = self.data.test_volume.size
extra_specs = self.data.extra_specs
with mock.patch.object(self.masking,
'get_or_create_default_storage_group',
return_value=self.data.failed_resource):
with mock.patch.object(self.rest, 'delete_storage_group'):
# path 1: not last vol in sg
with mock.patch.object(self.rest, 'get_num_vols_in_sg',
return_value=2):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._create_volume,
volume_name, volume_size, extra_specs)
self.rest.delete_storage_group.assert_not_called()
# path 2: last vol in sg, delete sg
with mock.patch.object(self.rest, 'get_num_vols_in_sg',
return_value=0):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._create_volume,
volume_name, volume_size, extra_specs)
(self.rest.delete_storage_group.
assert_called_once_with(self.data.array,
self.data.failed_resource))
def test_create_volume_incorrect_slo(self):
volume_name = self.data.test_volume.name
volume_size = self.data.test_volume.size
extra_specs = {'slo': 'Diamondz',
'workload': 'DSSSS',
'srp': self.data.srp,
'array': self.data.array}
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._create_volume,
volume_name, volume_size, extra_specs)
def test_set_vmax_extra_specs(self):
srp_record = self.utils.parse_file_to_get_array_map(
self.fake_xml)
extra_specs = self.common._set_vmax_extra_specs(
self.data.vol_type_extra_specs, srp_record)
ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
self.assertEqual(ref_extra_specs, extra_specs)
def test_set_vmax_extra_specs_no_srp_name(self):
srp_record = self.utils.parse_file_to_get_array_map(
self.fake_xml)
extra_specs = self.common._set_vmax_extra_specs({}, srp_record)
self.assertEqual('Optimized', extra_specs['slo'])
def test_set_vmax_extra_specs_compr_disabled(self):
with mock.patch.object(self.rest, 'is_compression_capable',
return_value=True):
srp_record = self.utils.parse_file_to_get_array_map(
self.fake_xml)
extra_specs = self.common._set_vmax_extra_specs(
self.data.vol_type_extra_specs_compr_disabled, srp_record)
ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
ref_extra_specs[utils.DISABLECOMPRESSION] = "true"
self.assertEqual(ref_extra_specs, extra_specs)
def test_set_vmax_extra_specs_compr_disabled_not_compr_capable(self):
srp_record = self.utils.parse_file_to_get_array_map(
self.fake_xml)
extra_specs = self.common._set_vmax_extra_specs(
self.data.vol_type_extra_specs_compr_disabled, srp_record)
ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
self.assertEqual(ref_extra_specs, extra_specs)
def test_set_vmax_extra_specs_portgroup_as_spec(self):
srp_record = self.utils.parse_file_to_get_array_map(
self.fake_xml)
extra_specs = self.common._set_vmax_extra_specs(
{utils.PORTGROUPNAME: 'extra_spec_pg'}, srp_record)
self.assertEqual('extra_spec_pg', extra_specs[utils.PORTGROUPNAME])
def test_set_vmax_extra_specs_no_portgroup_set(self):
fake_xml = FakeXML().create_fake_config_file(
'test_no_pg_set', '')
srp_record = self.utils.parse_file_to_get_array_map(fake_xml)
self.assertRaises(exception.VolumeBackendAPIException,
self.common._set_vmax_extra_specs,
{}, srp_record)
def test_delete_volume_from_srp_success(self):
array = self.data.array
device_id = self.data.device_id
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
with mock.patch.object(
self.provision, 'delete_volume_from_srp') as mock_del:
self.common._delete_from_srp(array, device_id, volume_name,
extra_specs)
mock_del.assert_called_once_with(array, device_id, volume_name)
def test_delete_volume_from_srp_failed(self):
array = self.data.array
device_id = self.data.failed_resource
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
with mock.patch.object(self.masking,
'add_volume_to_default_storage_group'):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._delete_from_srp, array,
device_id, volume_name, extra_specs)
(self.masking.add_volume_to_default_storage_group.
assert_called_once_with(
array, device_id, volume_name, extra_specs))
@mock.patch.object(utils.VMAXUtils, 'is_replication_enabled',
side_effect=[False, True])
def test_remove_vol_and_cleanup_replication(self, mock_rep_enabled):
array = self.data.array
device_id = self.data.device_id
volume = self.data.test_volume
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
with mock.patch.object(
self.masking, 'remove_and_reset_members') as mock_rm:
with mock.patch.object(
self.common, 'cleanup_lun_replication') as mock_clean:
self.common._remove_vol_and_cleanup_replication(
array, device_id, volume_name, extra_specs)
mock_rm.assert_called_once_with(
array, device_id, volume_name, extra_specs, False)
mock_clean.assert_not_called()
self.common._remove_vol_and_cleanup_replication(
array, device_id, volume_name, extra_specs, volume)
mock_clean.assert_called_once_with(
volume, volume_name, device_id, extra_specs)
@mock.patch.object(common.VMAXCommon, '_get_replication_extra_specs',
return_value=VMAXCommonData.rep_extra_specs)
def test_get_target_wwns_from_masking_view(self, mock_rep_specs):
target_wwns = self.common.get_target_wwns_from_masking_view(
self.data.test_volume, self.data.connector)
ref_wwns = [self.data.wwnn1]
self.assertEqual(ref_wwns, target_wwns)
# Volume is failed over
with mock.patch.object(self.utils, 'is_volume_failed_over',
return_value=True):
self.common.get_target_wwns_from_masking_view(
self.data.test_volume, self.data.connector)
mock_rep_specs.assert_called_once()
def test_get_target_wwns_from_masking_view_no_mv(self):
with mock.patch.object(self.common, 'get_masking_views_from_volume',
return_value=None):
target_wwns = self.common.get_target_wwns_from_masking_view(
self.data.test_volume, self.data.connector)
self.assertFalse(target_wwns)
def test_get_port_group_from_masking_view(self):
array = self.data.array
maskingview_name = self.data.masking_view_name_f
with mock.patch.object(self.rest,
'get_element_from_masking_view'):
self.common.get_port_group_from_masking_view(
array, maskingview_name)
self.rest.get_element_from_masking_view.assert_called_once_with(
array, maskingview_name, portgroup=True)
def test_get_initiator_group_from_masking_view(self):
array = self.data.array
maskingview_name = self.data.masking_view_name_f
with mock.patch.object(self.rest,
'get_element_from_masking_view'):
self.common.get_initiator_group_from_masking_view(
array, maskingview_name)
self.rest.get_element_from_masking_view.assert_called_once_with(
array, maskingview_name, host=True)
def test_get_common_masking_views(self):
array = self.data.array
portgroup_name = self.data.port_group_name_f
initiator_group_name = self.data.initiatorgroup_name_f
with mock.patch.object(self.rest, 'get_common_masking_views'):
self.common.get_common_masking_views(
array, portgroup_name, initiator_group_name)
self.rest.get_common_masking_views.assert_called_once_with(
array, portgroup_name, initiator_group_name)
def test_get_ip_and_iqn(self):
ref_ip_iqn = [{'iqn': self.data.initiator,
'ip': self.data.ip}]
port = self.data.portgroup[1]['symmetrixPortKey'][0]['portId']
ip_iqn_list = self.common._get_ip_and_iqn(self.data.array, port)
self.assertEqual(ref_ip_iqn, ip_iqn_list)
def test_find_ip_and_iqns(self):
ref_ip_iqn = [{'iqn': self.data.initiator,
'ip': self.data.ip}]
ip_iqn_list = self.common._find_ip_and_iqns(
self.data.array, self.data.port_group_name_i)
self.assertEqual(ref_ip_iqn, ip_iqn_list)
def test_create_replica_snap_name(self):
array = self.data.array
clone_volume = self.data.test_clone_volume
source_device_id = self.data.device_id
snap_name = self.data.snap_location['snap_name']
ref_dict = self.data.provider_location
clone_dict = self.common._create_replica(
array, clone_volume, source_device_id,
self.data.extra_specs, snap_name)
self.assertEqual(ref_dict, clone_dict)
def test_create_replica_no_snap_name(self):
array = self.data.array
clone_volume = self.data.test_clone_volume
source_device_id = self.data.device_id
snap_name = "temp-" + source_device_id + clone_volume.id
ref_dict = self.data.provider_location
with mock.patch.object(self.utils, 'get_temp_snap_name',
return_value=snap_name):
clone_dict = self.common._create_replica(
array, clone_volume, source_device_id,
self.data.extra_specs)
self.assertEqual(ref_dict, clone_dict)
self.utils.get_temp_snap_name.assert_called_once_with(
('OS-' + clone_volume.id), source_device_id)
def test_create_replica_failed_cleanup_target(self):
array = self.data.array
clone_volume = self.data.test_clone_volume
device_id = self.data.device_id
snap_name = self.data.failed_resource
clone_name = 'OS-' + clone_volume.id
extra_specs = self.data.extra_specs
with mock.patch.object(self.common, '_cleanup_target'):
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._create_replica, array, clone_volume,
device_id, self.data.extra_specs, snap_name)
self.common._cleanup_target.assert_called_once_with(
array, device_id, device_id, clone_name,
snap_name, extra_specs)
def test_create_replica_failed_no_target(self):
array = self.data.array
clone_volume = self.data.test_clone_volume
source_device_id = self.data.device_id
snap_name = self.data.failed_resource
with mock.patch.object(self.common, '_create_volume',
return_value={'device_id': None}):
with mock.patch.object(self.common, '_cleanup_target'):
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._create_replica, array, clone_volume,
source_device_id, self.data.extra_specs, snap_name)
self.common._cleanup_target.assert_not_called()
@mock.patch.object(
masking.VMAXMasking,
'remove_and_reset_members')
def test_cleanup_target_sync_present(self, mock_remove):
array = self.data.array
clone_volume = self.data.test_clone_volume
source_device_id = self.data.device_id
target_device_id = self.data.device_id2
snap_name = self.data.failed_resource
clone_name = clone_volume.name
extra_specs = self.data.extra_specs
with mock.patch.object(self.rest, 'get_sync_session',
return_value='session'):
with mock.patch.object(self.provision,
'break_replication_relationship'):
self.common._cleanup_target(
array, target_device_id, source_device_id,
clone_name, snap_name, extra_specs)
(self.provision.break_replication_relationship.
assert_called_with(
array, target_device_id, source_device_id,
snap_name, extra_specs))
def test_cleanup_target_no_sync(self):
array = self.data.array
clone_volume = self.data.test_clone_volume
source_device_id = self.data.device_id
target_device_id = self.data.device_id2
snap_name = self.data.failed_resource
clone_name = clone_volume.name
extra_specs = self.data.extra_specs
with mock.patch.object(self.rest, 'get_sync_session',
return_value=None):
with mock.patch.object(self.common,
'_delete_from_srp'):
self.common._cleanup_target(
array, target_device_id, source_device_id,
clone_name, snap_name, extra_specs)
self.common._delete_from_srp.assert_called_once_with(
array, target_device_id, clone_name,
extra_specs)
@mock.patch.object(
provision.VMAXProvision,
'delete_volume_snap')
@mock.patch.object(
provision.VMAXProvision,
'break_replication_relationship')
def test_sync_check_temp_snap(self, mock_break, mock_delete):
array = self.data.array
device_id = self.data.device_id
target = self.data.volume_details[1]['volumeId']
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
snap_name = 'temp-1'
with mock.patch.object(self.rest, 'get_volume_snap',
return_value=snap_name):
self.common._sync_check(array, device_id, volume_name,
extra_specs)
mock_break.assert_called_with(
array, target, device_id, snap_name, extra_specs)
mock_delete.assert_called_with(array, snap_name, device_id)
# Delete legacy temp snap
mock_delete.reset_mock()
snap_name2 = 'EMC_SMI_12345'
sessions = [{'source_vol': device_id,
'snap_name': snap_name2,
'target_vol_list': []}]
with mock.patch.object(self.rest, 'find_snap_vx_sessions',
return_value=sessions):
with mock.patch.object(self.rest, 'get_volume_snap',
return_value=snap_name2):
self.common._sync_check(array, device_id, volume_name,
extra_specs)
mock_delete.assert_called_once_with(
array, snap_name2, device_id)
@mock.patch.object(
provision.VMAXProvision,
'delete_volume_snap')
@mock.patch.object(
provision.VMAXProvision,
'break_replication_relationship')
def test_sync_check_not_temp_snap(self, mock_break, mock_delete):
array = self.data.array
device_id = self.data.device_id
target = self.data.volume_details[1]['volumeId']
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
snap_name = 'OS-1'
sessions = [{'source_vol': device_id,
'snap_name': snap_name,
'target_vol_list': [target]}]
with mock.patch.object(self.rest, 'find_snap_vx_sessions',
return_value=sessions):
self.common._sync_check(array, device_id, volume_name,
extra_specs)
mock_break.assert_called_with(
array, target, device_id, snap_name, extra_specs)
mock_delete.assert_not_called()
@mock.patch.object(
provision.VMAXProvision,
'break_replication_relationship')
def test_sync_check_no_sessions(self, mock_break):
array = self.data.array
device_id = self.data.device_id
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
with mock.patch.object(self.rest, 'find_snap_vx_sessions',
return_value=None):
self.common._sync_check(array, device_id, volume_name,
extra_specs)
mock_break.assert_not_called()
def test_manage_existing_success(self):
external_ref = {u'source-name': u'00002'}
provider_location = {'device_id': u'00002', 'array': u'000197800123'}
ref_update = {'provider_location': six.text_type(provider_location)}
with mock.patch.object(
self.common, '_check_lun_valid_for_cinder_management'):
model_update = self.common.manage_existing(
self.data.test_volume, external_ref)
self.assertEqual(ref_update, model_update)
@mock.patch.object(
rest.VMAXRest, 'get_masking_views_from_storage_group',
return_value=None)
@mock.patch.object(
rest.VMAXRest, 'is_vol_in_rep_session',
return_value=(False, False, None))
def test_check_lun_valid_for_cinder_management(self, mock_rep, mock_mv):
external_ref = {u'source-name': u'00001'}
self.common._check_lun_valid_for_cinder_management(
self.data.array, '00001',
self.data.test_volume.id, external_ref)
@mock.patch.object(
rest.VMAXRest, 'get_volume',
side_effect=[
None,
VMAXCommonData.volume_details[0],
VMAXCommonData.volume_details[0],
VMAXCommonData.volume_details[1]])
@mock.patch.object(
rest.VMAXRest, 'get_masking_views_from_storage_group',
side_effect=[VMAXCommonData.sg_details[1]['maskingview'],
None])
@mock.patch.object(rest.VMAXRest, 'get_storage_groups_from_volume',
return_value=[VMAXCommonData.defaultstoragegroup_name])
@mock.patch.object(rest.VMAXRest, 'is_vol_in_rep_session',
side_effect=[(True, False, []), (False, False, None)])
def test_check_lun_valid_for_cinder_management_exception(
self, mock_rep, mock_sg, mock_mvs, mock_get_vol):
external_ref = {u'source-name': u'00001'}
for x in range(0, 3):
self.assertRaises(
exception.ManageExistingInvalidReference,
self.common._check_lun_valid_for_cinder_management,
self.data.array, '00001',
self.data.test_volume.id, external_ref)
self.assertRaises(exception.ManageExistingAlreadyManaged,
self.common._check_lun_valid_for_cinder_management,
self.data.array, '00001',
self.data.test_volume.id, external_ref)
def test_manage_existing_get_size(self):
external_ref = {u'source-name': u'00001'}
size = self.common.manage_existing_get_size(
self.data.test_volume, external_ref)
self.assertEqual(2, size)
def test_manage_existing_get_size_exception(self):
external_ref = {u'source-name': u'00001'}
with mock.patch.object(self.rest, 'get_size_of_device_on_array',
return_value=3.5):
self.assertRaises(exception.ManageExistingInvalidReference,
self.common.manage_existing_get_size,
self.data.test_volume, external_ref)
@mock.patch.object(common.VMAXCommon,
'_remove_vol_and_cleanup_replication')
def test_unmanage_success(self, mock_rm):
volume = self.data.test_volume
with mock.patch.object(self.rest, 'rename_volume'):
self.common.unmanage(volume)
self.rest.rename_volume.assert_called_once_with(
self.data.array, self.data.device_id,
self.data.test_volume.id)
def test_unmanage_device_not_found(self):
volume = self.data.test_volume
with mock.patch.object(self.common, '_find_device_on_array',
return_value=None):
with mock.patch.object(self.rest, 'rename_volume'):
self.common.unmanage(volume)
self.rest.rename_volume.assert_not_called()
@mock.patch.object(common.VMAXCommon,
'_slo_workload_migration')
def test_retype(self, mock_migrate):
device_id = self.data.device_id
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs_intervals_set
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
volume = self.data.test_volume
new_type = {'extra_specs': {}}
host = {'host': self.data.new_host}
self.common.retype(volume, new_type, host)
mock_migrate.assert_called_once_with(
device_id, volume, host, volume_name, new_type, extra_specs)
mock_migrate.reset_mock()
with mock.patch.object(
self.common, '_find_device_on_array', return_value=None):
self.common.retype(volume, new_type, host)
mock_migrate.assert_not_called()
mock_migrate.reset_mock()
volume2 = self.data.test_attached_volume
self.common.retype(volume2, new_type, host)
mock_migrate.assert_not_called()
def test_slo_workload_migration_valid(self):
device_id = self.data.device_id
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
new_type = {'extra_specs': {}}
volume = self.data.test_volume
host = {'host': self.data.new_host}
with mock.patch.object(self.common, '_migrate_volume'):
self.common._slo_workload_migration(
device_id, volume, host, volume_name, new_type, extra_specs)
self.common._migrate_volume.assert_called_once_with(
extra_specs[utils.ARRAY], device_id,
extra_specs[utils.SRP], 'Silver',
'OLTP', volume_name, new_type, extra_specs)
def test_slo_workload_migration_not_valid(self):
device_id = self.data.device_id
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
volume = self.data.test_volume
new_type = {'extra_specs': {}}
host = {'host': self.data.new_host}
with mock.patch.object(self.common,
'_is_valid_for_storage_assisted_migration',
return_value=(False, 'Silver', 'OLTP')):
migrate_status = self.common._slo_workload_migration(
device_id, volume, host, volume_name, new_type, extra_specs)
self.assertFalse(migrate_status)
def test_slo_workload_migration_same_hosts(self):
device_id = self.data.device_id
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
volume = self.data.test_volume
host = {'host': self.data.fake_host}
new_type = {'extra_specs': {}}
migrate_status = self.common._slo_workload_migration(
device_id, volume, host, volume_name, new_type, extra_specs)
self.assertFalse(migrate_status)
def test_slo_workload_migration_same_host_change_compression(self):
device_id = self.data.device_id
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
volume = self.data.test_volume
host = {'host': self.data.fake_host}
new_type = {'extra_specs': {utils.DISABLECOMPRESSION: "true"}}
with mock.patch.object(
self.common, '_is_valid_for_storage_assisted_migration',
return_value=(True, self.data.slo, self.data.workload)):
with mock.patch.object(self.common, '_migrate_volume'):
migrate_status = self.common._slo_workload_migration(
device_id, volume, host, volume_name, new_type,
extra_specs)
self.assertTrue(migrate_status)
self.common._migrate_volume.assert_called_once_with(
extra_specs[utils.ARRAY], device_id,
extra_specs[utils.SRP], self.data.slo,
self.data.workload, volume_name, new_type, extra_specs)
@mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members')
def test_migrate_volume_success(self, mock_remove):
with mock.patch.object(self.rest, 'is_volume_in_storagegroup',
return_value=True):
device_id = self.data.device_id
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
new_type = {'extra_specs': {}}
migrate_status = self.common._migrate_volume(
self.data.array, device_id, self.data.srp, self.data.slo,
self.data.workload, volume_name, new_type, extra_specs)
self.assertTrue(migrate_status)
target_extra_specs = {
'array': self.data.array, 'interval': 3,
'retries': 120, 'slo': self.data.slo,
'srp': self.data.srp, 'workload': self.data.workload}
mock_remove.assert_called_once_with(
self.data.array, device_id, volume_name,
target_extra_specs, reset=True)
mock_remove.reset_mock()
with mock.patch.object(
self.rest, 'get_storage_groups_from_volume',
return_value=[]):
migrate_status = self.common._migrate_volume(
self.data.array, device_id, self.data.srp, self.data.slo,
self.data.workload, volume_name, new_type, extra_specs)
self.assertTrue(migrate_status)
mock_remove.assert_not_called()
@mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members')
def test_migrate_volume_failed_get_new_sg_failed(self, mock_remove):
device_id = self.data.device_id
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
new_type = {'extra_specs': {}}
with mock.patch.object(
self.masking, 'get_or_create_default_storage_group',
side_effect=exception.VolumeBackendAPIException):
migrate_status = self.common._migrate_volume(
self.data.array, device_id, self.data.srp, self.data.slo,
self.data.workload, volume_name, new_type, extra_specs)
self.assertFalse(migrate_status)
def test_migrate_volume_failed_vol_not_added(self):
device_id = self.data.device_id
volume_name = self.data.test_volume.name
extra_specs = self.data.extra_specs
new_type = {'extra_specs': {}}
with mock.patch.object(
self.rest, 'is_volume_in_storagegroup',
return_value=False):
migrate_status = self.common._migrate_volume(
self.data.array, device_id, self.data.srp, self.data.slo,
self.data.workload, volume_name, new_type, extra_specs)
self.assertFalse(migrate_status)
def test_is_valid_for_storage_assisted_migration_true(self):
device_id = self.data.device_id
host = {'host': self.data.new_host}
volume_name = self.data.test_volume.name
ref_return = (True, 'Silver', 'OLTP')
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array,
self.data.srp, volume_name, False)
self.assertEqual(ref_return, return_val)
# No current sgs found
with mock.patch.object(self.rest, 'get_storage_groups_from_volume',
return_value=None):
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array, self.data.srp,
volume_name, False)
self.assertEqual(ref_return, return_val)
def test_is_valid_for_storage_assisted_migration_false(self):
device_id = self.data.device_id
volume_name = self.data.test_volume.name
ref_return = (False, None, None)
# IndexError
host = {'host': 'HostX@Backend#Silver+SRP_1+000197800123'}
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array,
self.data.srp, volume_name, False)
self.assertEqual(ref_return, return_val)
# Wrong array
host2 = {'host': 'HostX@Backend#Silver+OLTP+SRP_1+00012345678'}
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host2, self.data.array,
self.data.srp, volume_name, False)
self.assertEqual(ref_return, return_val)
# Wrong srp
host3 = {'host': 'HostX@Backend#Silver+OLTP+SRP_2+000197800123'}
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host3, self.data.array,
self.data.srp, volume_name, False)
self.assertEqual(ref_return, return_val)
# Already in correct sg
host4 = {'host': self.data.fake_host}
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host4, self.data.array,
self.data.srp, volume_name, False)
self.assertEqual(ref_return, return_val)
def test_find_volume_group_name_from_id(self):
array = self.data.array
group_id = 'GrpId'
group_name = None
ref_group_name = self.data.storagegroup_name_with_id
with mock.patch.object(
self.rest, 'get_storage_group_list',
return_value=self.data.sg_list_rep):
group_name = self.common._find_volume_group_name_from_id(
array, group_id)
self.assertEqual(ref_group_name, group_name)
def test_find_volume_group_name_from_id_not_found(self):
array = self.data.array
group_id = 'GrpId'
group_name = None
group_name = self.common._find_volume_group_name_from_id(
array, group_id)
self.assertIsNone(group_name)
def test_find_volume_group(self):
group = self.data.test_group_1
array = self.data.array
volume_group = self.common._find_volume_group(array, group)
ref_group = self.data.sg_details_rep[0]
self.assertEqual(ref_group, volume_group)
def test_get_volume_device_ids(self):
array = self.data.array
volumes = [self.data.test_volume]
ref_device_ids = [self.data.device_id]
device_ids = self.common._get_volume_device_ids(volumes, array)
self.assertEqual(ref_device_ids, device_ids)
def test_get_members_of_volume_group(self):
array = self.data.array
group_name = self.data.storagegroup_name_source
ref_volumes = [self.data.device_id, self.data.device_id2]
member_device_ids = self.common._get_members_of_volume_group(
array, group_name)
self.assertEqual(ref_volumes, member_device_ids)
def test_get_members_of_volume_group_empty(self):
array = self.data.array
group_name = self.data.storagegroup_name_source
with mock.patch.object(
self.rest, 'get_volumes_in_storage_group',
return_value=None):
member_device_ids = self.common._get_members_of_volume_group(
array, group_name
)
self.assertIsNone(member_device_ids)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_create_group_replica(self, mock_check):
source_group = self.data.test_group_1
snap_name = self.data.group_snapshot_name
with mock.patch.object(
self.common,
'_create_group_replica') as mock_create_replica:
self.common._create_group_replica(
source_group, snap_name)
mock_create_replica.assert_called_once_with(
source_group, snap_name)
def test_create_group_replica_exception(self):
source_group = self.data.test_group_failed
snap_name = self.data.group_snapshot_name
with mock.patch.object(
volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._create_group_replica,
source_group,
snap_name)
def test_create_group_snapshot(self):
context = None
group_snapshot = self.data.test_group_snapshot_1
snapshots = []
ref_model_update = {'status': fields.GroupStatus.AVAILABLE}
with mock.patch.object(
volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
model_update, snapshots_model_update = (
self.common.create_group_snapshot(
context, group_snapshot, snapshots))
self.assertEqual(ref_model_update, model_update)
def test_create_group_snapshot_exception(self):
context = None
group_snapshot = self.data.test_group_snapshot_failed
snapshots = []
with mock.patch.object(
volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.create_group_snapshot,
context,
group_snapshot,
snapshots)
def test_create_group(self):
ref_model_update = {'status': fields.GroupStatus.AVAILABLE}
context = None
group = self.data.test_group_1
with mock.patch.object(
volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
model_update = self.common.create_group(context, group)
self.assertEqual(ref_model_update, model_update)
def test_create_group_exception(self):
context = None
group = self.data.test_group_snapshot_failed
with mock.patch.object(
volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.create_group,
context,
group)
def test_delete_group_snapshot(self):
group_snapshot = self.data.test_group_snapshot_1
snapshots = []
context = None
ref_model_update = {'status': fields.GroupSnapshotStatus.DELETED}
with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
model_update, snapshots_model_update = (
self.common.delete_group_snapshot(context,
group_snapshot, snapshots))
self.assertEqual(ref_model_update, model_update)
def test_delete_group_snapshot_success(self):
group_snapshot = self.data.test_group_snapshot_1
snapshots = []
ref_model_update = {'status': fields.GroupSnapshotStatus.DELETED}
with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
model_update, snapshots_model_update = (
self.common._delete_group_snapshot(group_snapshot,
snapshots))
self.assertEqual(ref_model_update, model_update)
def test_delete_group_snapshot_failed(self):
group_snapshot = self.data.test_group_snapshot_failed
snapshots = []
ref_model_update = (
{'status': fields.GroupSnapshotStatus.ERROR_DELETING})
with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
model_update, snapshots_model_update = (
self.common._delete_group_snapshot(group_snapshot,
snapshots))
self.assertEqual(ref_model_update, model_update)
def test_update_group(self):
group = self.data.test_group_1
add_vols = [self.data.test_volume]
remove_vols = []
ref_model_update = {'status': fields.GroupStatus.AVAILABLE}
with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
model_update, __, __ = self.common.update_group(group,
add_vols,
remove_vols)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_update_group_not_found(self, mock_check):
group = self.data.test_group_1
add_vols = []
remove_vols = []
with mock.patch.object(
self.common, '_find_volume_group',
return_value=None):
self.assertRaises(exception.GroupNotFound,
self.common.update_group,
group,
add_vols,
remove_vols)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_update_group_exception(self, mock_check):
group = self.data.test_group_1
add_vols = []
remove_vols = []
with mock.patch.object(
self.common, '_find_volume_group',
side_effect=exception.VolumeBackendAPIException):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.update_group,
group, add_vols, remove_vols)
def test_delete_group(self):
group = self.data.test_group_1
volumes = [self.data.test_volume]
context = None
ref_model_update = {'status': fields.GroupStatus.DELETED}
with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True),\
mock.patch.object(self.rest, 'get_volumes_in_storage_group',
return_value=[]):
model_update, __ = self.common.delete_group(
context, group, volumes)
self.assertEqual(ref_model_update, model_update)
def test_delete_group_success(self):
group = self.data.test_group_1
volumes = []
ref_model_update = {'status': fields.GroupStatus.DELETED}
with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True),\
mock.patch.object(self.rest, 'get_volumes_in_storage_group',
return_value=[]):
model_update, __ = self.common._delete_group(group, volumes)
self.assertEqual(ref_model_update, model_update)
def test_delete_group_already_deleted(self):
group = self.data.test_group_failed
ref_model_update = {'status': fields.GroupStatus.DELETED}
volumes = []
with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
model_update, __ = self.common._delete_group(group, volumes)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
def test_delete_group_failed(self, mock_check):
group = self.data.test_group_1
volumes = []
ref_model_update = {'status': fields.GroupStatus.ERROR_DELETING}
with mock.patch.object(
self.rest, 'delete_storage_group',
side_effect=exception.VolumeBackendAPIException):
model_update, __ = self.common._delete_group(
group, volumes)
self.assertEqual(ref_model_update, model_update)
def test_create_group_from_src_success(self):
context = None
group = self.data.test_group_1
group_snapshot = self.data.test_group_snapshot_1
snapshots = []
volumes = [self.data.test_volume]
source_group = None
source_vols = []
ref_model_update = {'status': fields.GroupStatus.AVAILABLE}
with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True):
model_update, volumes_model_update = (
self.common.create_group_from_src(
context, group, volumes,
group_snapshot, snapshots,
source_group, source_vols))
self.assertEqual(ref_model_update, model_update)
class VMAXFCTest(test.TestCase):
def setUp(self):
self.data = VMAXCommonData()
super(VMAXFCTest, self).setUp()
config_group = 'FCTests'
self.fake_xml = FakeXML().create_fake_config_file(
config_group, self.data.port_group_name_f)
self.configuration = FakeConfiguration(self.fake_xml, config_group)
rest.VMAXRest._establish_rest_session = mock.Mock(
return_value=FakeRequestsSession())
driver = fc.VMAXFCDriver(configuration=self.configuration)
self.driver = driver
self.common = self.driver.common
self.masking = self.common.masking
self.utils = self.common.utils
self.utils.get_volumetype_extra_specs = (
mock.Mock(return_value=self.data.vol_type_extra_specs))
def test_create_volume(self):
with mock.patch.object(self.common, 'create_volume'):
self.driver.create_volume(self.data.test_volume)
self.common.create_volume.assert_called_once_with(
self.data.test_volume)
def test_create_volume_from_snapshot(self):
volume = self.data.test_clone_volume
snapshot = self.data.test_snapshot
with mock.patch.object(self.common, 'create_volume_from_snapshot'):
self.driver.create_volume_from_snapshot(volume, snapshot)
self.common.create_volume_from_snapshot.assert_called_once_with(
volume, snapshot)
def test_create_cloned_volume(self):
volume = self.data.test_clone_volume
src_volume = self.data.test_volume
with mock.patch.object(self.common, 'create_cloned_volume'):
self.driver.create_cloned_volume(volume, src_volume)
self.common.create_cloned_volume.assert_called_once_with(
volume, src_volume)
def test_delete_volume(self):
with mock.patch.object(self.common, 'delete_volume'):
self.driver.delete_volume(self.data.test_volume)
self.common.delete_volume.assert_called_once_with(
self.data.test_volume)
def test_create_snapshot(self):
with mock.patch.object(self.common, 'create_snapshot'):
self.driver.create_snapshot(self.data.test_snapshot)
self.common.create_snapshot.assert_called_once_with(
self.data.test_snapshot, self.data.test_snapshot.volume)
def test_delete_snapshot(self):
with mock.patch.object(self.common, 'delete_snapshot'):
self.driver.delete_snapshot(self.data.test_snapshot)
self.common.delete_snapshot.assert_called_once_with(
self.data.test_snapshot, self.data.test_snapshot.volume)
def test_initialize_connection(self):
with mock.patch.object(self.common, 'initialize_connection',
return_value=self.data.fc_device_info):
with mock.patch.object(self.driver, 'populate_data'):
self.driver.initialize_connection(self.data.test_volume,
self.data.connector)
self.common.initialize_connection.assert_called_once_with(
self.data.test_volume, self.data.connector)
self.driver.populate_data.assert_called_once_with(
self.data.fc_device_info, self.data.test_volume,
self.data.connector)
def test_populate_data(self):
with mock.patch.object(self.driver, '_build_initiator_target_map',
return_value=([], {})):
ref_data = {
'driver_volume_type': 'fibre_channel',
'data': {'target_lun': self.data.fc_device_info['hostlunid'],
'target_discovered': True,
'target_wwn': [],
'initiator_target_map': {}}}
data = self.driver.populate_data(self.data.fc_device_info,
self.data.test_volume,
self.data.connector)
self.assertEqual(ref_data, data)
self.driver._build_initiator_target_map.assert_called_once_with(
self.data.test_volume, self.data.connector)
def test_terminate_connection(self):
with mock.patch.object(self.common, 'terminate_connection'):
self.driver.terminate_connection(self.data.test_volume,
self.data.connector)
self.common.terminate_connection.assert_called_once_with(
self.data.test_volume, self.data.connector)
def test_terminate_connection_no_zoning_mappings(self):
with mock.patch.object(self.driver, '_get_zoning_mappings',
return_value=None):
with mock.patch.object(self.common, 'terminate_connection'):
self.driver.terminate_connection(self.data.test_volume,
self.data.connector)
self.common.terminate_connection.assert_not_called()
def test_get_zoning_mappings(self):
ref_mappings = self.data.zoning_mappings
zoning_mappings = self.driver._get_zoning_mappings(
self.data.test_volume, self.data.connector)
self.assertEqual(ref_mappings, zoning_mappings)
# Legacy vol
zoning_mappings2 = self.driver._get_zoning_mappings(
self.data.test_legacy_vol, self.data.connector)
self.assertEqual(ref_mappings, zoning_mappings2)
def test_get_zoning_mappings_no_mv(self):
with mock.patch.object(self.common, 'get_masking_views_from_volume',
return_value=None):
zoning_mappings = self.driver._get_zoning_mappings(
self.data.test_volume, self.data.connector)
self.assertFalse(zoning_mappings)
def test_cleanup_zones_other_vols_mapped(self):
ref_data = {'driver_volume_type': 'fibre_channel',
'data': {}}
data = self.driver._cleanup_zones(self.data.zoning_mappings)
self.assertEqual(ref_data, data)
def test_cleanup_zones_no_vols_mapped(self):
zoning_mappings = self.data.zoning_mappings
ref_data = {'driver_volume_type': 'fibre_channel',
'data': {'target_wwn': zoning_mappings['target_wwns'],
'initiator_target_map':
zoning_mappings['init_targ_map']}}
with mock.patch.object(self.common, 'get_common_masking_views',
return_value=[]):
data = self.driver._cleanup_zones(self.data.zoning_mappings)
self.assertEqual(ref_data, data)
def test_build_initiator_target_map(self):
ref_target_map = {'123456789012345': ['543210987654321'],
'123456789054321': ['123450987654321']}
with mock.patch.object(fczm_utils, 'create_lookup_service',
return_value=FakeLookupService()):
driver = fc.VMAXFCDriver(configuration=self.configuration)
with mock.patch.object(driver.common,
'get_target_wwns_from_masking_view',
return_value=self.data.target_wwns):
targets, target_map = driver._build_initiator_target_map(
self.data.test_volume, self.data.connector)
self.assertEqual(ref_target_map, target_map)
def test_extend_volume(self):
with mock.patch.object(self.common, 'extend_volume'):
self.driver.extend_volume(self.data.test_volume, '3')
self.common.extend_volume.assert_called_once_with(
self.data.test_volume, '3')
def test_get_volume_stats(self):
with mock.patch.object(self.driver, 'update_volume_stats'):
# no refresh
self.driver.get_volume_stats()
self.driver.update_volume_stats.assert_not_called()
# with refresh
self.driver.get_volume_stats(True)
self.driver.update_volume_stats.assert_called_once_with()
def test_update_volume_stats(self):
with mock.patch.object(self.common, 'update_volume_stats',
return_value={}):
self.driver.update_volume_stats()
self.common.update_volume_stats.assert_called_once_with()
def test_check_for_setup_error(self):
self.driver.check_for_setup_error()
def test_ensure_export(self):
self.driver.ensure_export('context', 'volume')
def test_create_export(self):
self.driver.create_export('context', 'volume', 'connector')
def test_remove_export(self):
self.driver.remove_export('context', 'volume')
def test_check_for_export(self):
self.driver.check_for_export('context', 'volume_id')
def test_manage_existing(self):
with mock.patch.object(self.common, 'manage_existing',
return_value={}):
external_ref = {u'source-name': u'00002'}
self.driver.manage_existing(self.data.test_volume, external_ref)
self.common.manage_existing.assert_called_once_with(
self.data.test_volume, external_ref)
def test_manage_existing_get_size(self):
with mock.patch.object(self.common, 'manage_existing_get_size',
return_value='1'):
external_ref = {u'source-name': u'00002'}
self.driver.manage_existing_get_size(
self.data.test_volume, external_ref)
self.common.manage_existing_get_size.assert_called_once_with(
self.data.test_volume, external_ref)
def test_unmanage_volume(self):
with mock.patch.object(self.common, 'unmanage',
return_value={}):
self.driver.unmanage(self.data.test_volume)
self.common.unmanage.assert_called_once_with(
self.data.test_volume)
def test_retype(self):
host = {'host': self.data.new_host}
new_type = {'extra_specs': {}}
with mock.patch.object(self.common, 'retype',
return_value=True):
self.driver.retype({}, self.data.test_volume, new_type, '', host)
self.common.retype.assert_called_once_with(
self.data.test_volume, new_type, host)
def test_failover_host(self):
with mock.patch.object(
self.common, 'failover_host',
return_value=(self.data.remote_array, [], [])) as mock_fo:
self.driver.failover_host(self.data.ctx, [self.data.test_volume])
mock_fo.assert_called_once_with([self.data.test_volume], None,
None)
class VMAXISCSITest(test.TestCase):
def setUp(self):
self.data = VMAXCommonData()
super(VMAXISCSITest, self).setUp()
config_group = 'ISCSITests'
self.fake_xml = FakeXML().create_fake_config_file(
config_group, self.data.port_group_name_i)
configuration = FakeConfiguration(self.fake_xml, config_group)
rest.VMAXRest._establish_rest_session = mock.Mock(
return_value=FakeRequestsSession())
driver = iscsi.VMAXISCSIDriver(configuration=configuration)
self.driver = driver
self.common = self.driver.common
self.masking = self.common.masking
self.utils = self.common.utils
self.utils.get_volumetype_extra_specs = (
mock.Mock(return_value=self.data.vol_type_extra_specs))
def test_create_volume(self):
with mock.patch.object(self.common, 'create_volume'):
self.driver.create_volume(self.data.test_volume)
self.common.create_volume.assert_called_once_with(
self.data.test_volume)
def test_create_volume_from_snapshot(self):
volume = self.data.test_clone_volume
snapshot = self.data.test_snapshot
with mock.patch.object(self.common, 'create_volume_from_snapshot'):
self.driver.create_volume_from_snapshot(volume, snapshot)
self.common.create_volume_from_snapshot.assert_called_once_with(
volume, snapshot)
def test_create_cloned_volume(self):
volume = self.data.test_clone_volume
src_volume = self.data.test_volume
with mock.patch.object(self.common, 'create_cloned_volume'):
self.driver.create_cloned_volume(volume, src_volume)
self.common.create_cloned_volume.assert_called_once_with(
volume, src_volume)
def test_delete_volume(self):
with mock.patch.object(self.common, 'delete_volume'):
self.driver.delete_volume(self.data.test_volume)
self.common.delete_volume.assert_called_once_with(
self.data.test_volume)
def test_create_snapshot(self):
with mock.patch.object(self.common, 'create_snapshot'):
self.driver.create_snapshot(self.data.test_snapshot)
self.common.create_snapshot.assert_called_once_with(
self.data.test_snapshot, self.data.test_snapshot.volume)
def test_delete_snapshot(self):
with mock.patch.object(self.common, 'delete_snapshot'):
self.driver.delete_snapshot(self.data.test_snapshot)
self.common.delete_snapshot.assert_called_once_with(
self.data.test_snapshot, self.data.test_snapshot.volume)
def test_initialize_connection(self):
ref_dict = {'maskingview': self.data.masking_view_name_f,
'array': self.data.array,
'hostlunid': 3,
'device_id': self.data.device_id,
'ip_and_iqn': [{'ip': self.data.ip,
'iqn': self.data.initiator}],
'is_multipath': False}
with mock.patch.object(self.driver, 'get_iscsi_dict'):
with mock.patch.object(
self.common, 'get_port_group_from_masking_view',
return_value=self.data.port_group_name_i):
self.driver.initialize_connection(self.data.test_volume,
self.data.connector)
self.driver.get_iscsi_dict.assert_called_once_with(
ref_dict, self.data.test_volume)
def test_get_iscsi_dict_success(self):
ip_and_iqn = self.common._find_ip_and_iqns(
self.data.array, self.data.port_group_name_i)
host_lun_id = self.data.iscsi_device_info['hostlunid']
volume = self.data.test_volume
device_info = self.data.iscsi_device_info
ref_data = {'driver_volume_type': 'iscsi', 'data': {}}
with mock.patch.object(
self.driver, 'vmax_get_iscsi_properties', return_value={}):
data = self.driver.get_iscsi_dict(device_info, volume)
self.assertEqual(ref_data, data)
self.driver.vmax_get_iscsi_properties.assert_called_once_with(
volume, ip_and_iqn, True, host_lun_id)
def test_get_iscsi_dict_exception(self):
device_info = {'ip_and_iqn': ''}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.get_iscsi_dict,
device_info, self.data.test_volume)
def test_vmax_get_iscsi_properties_one_target_no_auth(self):
vol = deepcopy(self.data.test_volume)
ip_and_iqn = self.common._find_ip_and_iqns(
self.data.array, self.data.port_group_name_i)
host_lun_id = self.data.iscsi_device_info['hostlunid']
ref_properties = {
'target_discovered': True,
'target_iqn': ip_and_iqn[0]['iqn'].split(",")[0],
'target_portal': ip_and_iqn[0]['ip'] + ":3260",
'target_lun': host_lun_id,
'volume_id': self.data.test_volume.id}
iscsi_properties = self.driver.vmax_get_iscsi_properties(
vol, ip_and_iqn, True, host_lun_id)
self.assertEqual(type(ref_properties), type(iscsi_properties))
self.assertEqual(ref_properties, iscsi_properties)
def test_vmax_get_iscsi_properties_multiple_targets(self):
ip_and_iqn = [{'ip': self.data.ip, 'iqn': self.data.initiator},
{'ip': self.data.ip, 'iqn': self.data.iqn}]
host_lun_id = self.data.iscsi_device_info['hostlunid']
ref_properties = {
'target_portals': (
[t['ip'] + ":3260" for t in ip_and_iqn]),
'target_iqns': (
[t['iqn'].split(",")[0] for t in ip_and_iqn]),
'target_luns': [host_lun_id] * len(ip_and_iqn),
'target_discovered': True,
'target_iqn': ip_and_iqn[0]['iqn'].split(",")[0],
'target_portal': ip_and_iqn[0]['ip'] + ":3260",
'target_lun': host_lun_id,
'volume_id': self.data.test_volume.id}
iscsi_properties = self.driver.vmax_get_iscsi_properties(
self.data.test_volume, ip_and_iqn, True, host_lun_id)
self.assertEqual(ref_properties, iscsi_properties)
def test_vmax_get_iscsi_properties_auth(self):
vol = deepcopy(self.data.test_volume)
vol.provider_auth = "auth_method auth_username auth_secret"
ip_and_iqn = [{'ip': self.data.ip, 'iqn': self.data.initiator},
{'ip': self.data.ip, 'iqn': self.data.iqn}]
host_lun_id = self.data.iscsi_device_info['hostlunid']
ref_properties = {
'target_portals': (
[t['ip'] + ":3260" for t in ip_and_iqn]),
'target_iqns': (
[t['iqn'].split(",")[0] for t in ip_and_iqn]),
'target_luns': [host_lun_id] * len(ip_and_iqn),
'target_discovered': True,
'target_iqn': ip_and_iqn[0]['iqn'].split(",")[0],
'target_portal': ip_and_iqn[0]['ip'] + ":3260",
'target_lun': host_lun_id,
'volume_id': self.data.test_volume.id,
'auth_method': 'auth_method',
'auth_username': 'auth_username',
'auth_password': 'auth_secret'}
iscsi_properties = self.driver.vmax_get_iscsi_properties(
vol, ip_and_iqn, True, host_lun_id)
self.assertEqual(ref_properties, iscsi_properties)
def test_terminate_connection(self):
with mock.patch.object(self.common, 'terminate_connection'):
self.driver.terminate_connection(self.data.test_volume,
self.data.connector)
self.common.terminate_connection.assert_called_once_with(
self.data.test_volume, self.data.connector)
def test_extend_volume(self):
with mock.patch.object(self.common, 'extend_volume'):
self.driver.extend_volume(self.data.test_volume, '3')
self.common.extend_volume.assert_called_once_with(
self.data.test_volume, '3')
def test_get_volume_stats(self):
with mock.patch.object(self.driver, 'update_volume_stats'):
# no refresh
self.driver.get_volume_stats()
self.driver.update_volume_stats.assert_not_called()
# with refresh
self.driver.get_volume_stats(True)
self.driver.update_volume_stats.assert_called_once_with()
def test_update_volume_stats(self):
with mock.patch.object(self.common, 'update_volume_stats',
return_value={}):
self.driver.update_volume_stats()
self.common.update_volume_stats.assert_called_once_with()
def test_check_for_setup_error(self):
self.driver.check_for_setup_error()
def test_ensure_export(self):
self.driver.ensure_export('context', 'volume')
def test_create_export(self):
self.driver.create_export('context', 'volume', 'connector')
def test_remove_export(self):
self.driver.remove_export('context', 'volume')
def test_check_for_export(self):
self.driver.check_for_export('context', 'volume_id')
def test_manage_existing(self):
with mock.patch.object(self.common, 'manage_existing',
return_value={}):
external_ref = {u'source-name': u'00002'}
self.driver.manage_existing(self.data.test_volume, external_ref)
self.common.manage_existing.assert_called_once_with(
self.data.test_volume, external_ref)
def test_manage_existing_get_size(self):
with mock.patch.object(self.common, 'manage_existing_get_size',
return_value='1'):
external_ref = {u'source-name': u'00002'}
self.driver.manage_existing_get_size(
self.data.test_volume, external_ref)
self.common.manage_existing_get_size.assert_called_once_with(
self.data.test_volume, external_ref)
def test_unmanage_volume(self):
with mock.patch.object(self.common, 'unmanage',
return_value={}):
self.driver.unmanage(self.data.test_volume)
self.common.unmanage.assert_called_once_with(
self.data.test_volume)
def test_retype(self):
host = {'host': self.data.new_host}
new_type = {'extra_specs': {}}
with mock.patch.object(self.common, 'retype',
return_value=True):
self.driver.retype({}, self.data.test_volume, new_type, '', host)
self.common.retype.assert_called_once_with(
self.data.test_volume, new_type, host)
def test_failover_host(self):
with mock.patch.object(self.common, 'failover_host',
return_value={}) as mock_fo:
self.driver.failover_host({}, [self.data.test_volume])
mock_fo.assert_called_once_with([self.data.test_volume], None,
None)
class VMAXMaskingTest(test.TestCase):
def setUp(self):
self.data = VMAXCommonData()
super(VMAXMaskingTest, self).setUp()
configuration = mock.Mock()
configuration.safe_get.return_value = 'MaskingTests'
configuration.config_group = 'MaskingTests'
self._gather_info = common.VMAXCommon._gather_info
common.VMAXCommon._gather_info = mock.Mock()
driver = common.VMAXCommon(
'iSCSI', common.VMAXCommon.VERSION, configuration=configuration)
driver_fc = common.VMAXCommon(
'FC', common.VMAXCommon.VERSION, configuration=configuration)
self.driver = driver
self.driver_fc = driver_fc
self.mask = self.driver.masking
self.extra_specs = self.data.extra_specs
self.extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_i
self.maskingviewdict = self.driver._populate_masking_dict(
self.data.test_volume, self.data.connector, self.extra_specs)
self.maskingviewdict['extra_specs'] = self.extra_specs
self.device_id = self.data.device_id
self.volume_name = self.data.volume_details[0]['volume_identifier']
def tearDown(self):
super(VMAXMaskingTest, self).tearDown()
common.VMAXCommon._gather_info = self._gather_info
@mock.patch.object(
masking.VMAXMasking,
'get_or_create_masking_view_and_map_lun')
def test_setup_masking_view(self, mock_get_or_create_mv):
self.driver.masking.setup_masking_view(
self.data.array, self.maskingviewdict, self.extra_specs)
mock_get_or_create_mv.assert_called_once()
@mock.patch.object(
masking.VMAXMasking,
'_check_adding_volume_to_storage_group')
@mock.patch.object(
masking.VMAXMasking,
'_move_vol_from_default_sg',
return_value=None)
@mock.patch.object(
masking.VMAXMasking,
'_get_or_create_masking_view',
side_effect=[None, "Error in masking view retrieval",
exception.VolumeBackendAPIException])
@mock.patch.object(
rest.VMAXRest,
'get_element_from_masking_view',
side_effect=[VMAXCommonData.port_group_name_i, Exception])
def test_get_or_create_masking_view_and_map_lun(
self, mock_masking_view_element, mock_masking, mock_move,
mock_add_volume):
rollback_dict = (
self.driver.masking.get_or_create_masking_view_and_map_lun(
self.data.array, self.maskingviewdict['maskingview_name'],
self.maskingviewdict, self.extra_specs))
self.assertEqual(self.maskingviewdict, rollback_dict)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.masking.get_or_create_masking_view_and_map_lun,
self.data.array, self.maskingviewdict['maskingview_name'],
self.maskingviewdict, self.extra_specs)
self.maskingviewdict['slo'] = None
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.masking.get_or_create_masking_view_and_map_lun,
self.data.array, self.maskingviewdict['maskingview_name'],
self.maskingviewdict, self.extra_specs)
@mock.patch.object(
masking.VMAXMasking,
'_check_adding_volume_to_storage_group',
return_value=None)
@mock.patch.object(
rest.VMAXRest,
'move_volume_between_storage_groups',
side_effect=[None, exception.VolumeBackendAPIException(data='')])
@mock.patch.object(
rest.VMAXRest,
'is_volume_in_storagegroup',
side_effect=[True, False, True])
def test_move_vol_from_default_sg(
self, mock_volume_in_sg, mock_move_volume, mock_add):
msg = None
for x in range(0, 2):
msg = self.driver.masking._move_vol_from_default_sg(
self.data.array, self.device_id, self.volume_name,
self.data.defaultstoragegroup_name,
self.data.storagegroup_name_i, self.extra_specs)
mock_move_volume.assert_called_once()
mock_add.assert_called_once()
self.assertIsNone(msg)
msg = self.driver.masking._move_vol_from_default_sg(
self.data.array, self.device_id, self.volume_name,
self.data.defaultstoragegroup_name,
self.data.storagegroup_name_i, self.extra_specs)
self.assertIsNotNone(msg)
@mock.patch.object(
rest.VMAXRest,
'get_masking_view',
side_effect=[VMAXCommonData.maskingview,
VMAXCommonData.maskingview, None])
@mock.patch.object(
masking.VMAXMasking,
'_validate_existing_masking_view',
side_effect=[(VMAXCommonData.maskingview[1]['storageGroupId'],
None), (None, "Error Message")])
@mock.patch.object(
masking.VMAXMasking,
'_create_new_masking_view',
return_value=None)
def test_get_or_create_masking_view(
self, mock_create_mv, mock_validate_mv,
mock_get_mv):
for x in range(0, 3):
self.driver.masking._get_or_create_masking_view(
self.data.array, self.maskingviewdict,
self.data.defaultstoragegroup_name, self.extra_specs)
mock_create_mv.assert_called_once()
@mock.patch.object(
masking.VMAXMasking,
'_get_or_create_storage_group',
side_effect=["Storage group not found", None,
"Storage group not found", None, None, None,
None, None, None, None, None])
@mock.patch.object(
masking.VMAXMasking,
'_check_port_group',
side_effect=[(None, "Port group error"), (None, None), (None, None),
(None, None)])
@mock.patch.object(
masking.VMAXMasking,
'_get_or_create_initiator_group',
side_effect=[(None, "Initiator group error"), (None, None),
(None, None)])
@mock.patch.object(
masking.VMAXMasking,
'_move_vol_from_default_sg',
side_effect=["Storage group error", None])
@mock.patch.object(
masking.VMAXMasking,
'create_masking_view',
return_value=None)
def test_create_new_masking_view(
self, mock_create_mv, mock_move, mock_create_IG,
mock_check_PG, mock_create_SG):
for x in range(0, 6):
self.driver.masking._create_new_masking_view(
self.data.array, self.maskingviewdict,
self.maskingviewdict['maskingview_name'],
self.data.defaultstoragegroup_name, self.extra_specs)
mock_create_mv.assert_called_once()
@mock.patch.object(
masking.VMAXMasking,
'_check_existing_storage_group',
side_effect=[(VMAXCommonData.storagegroup_name_i, None),
(VMAXCommonData.storagegroup_name_i, None),
(None, "Error Checking existing storage group")])
@mock.patch.object(
rest.VMAXRest,
'get_element_from_masking_view',
return_value=VMAXCommonData.port_group_name_i)
@mock.patch.object(
masking.VMAXMasking,
'_check_port_group',
side_effect=[(None, None), (None, "Error checking pg")])
@mock.patch.object(
masking.VMAXMasking,
'_check_existing_initiator_group',
return_value=(VMAXCommonData.initiatorgroup_name_i, None))
def test_validate_existing_masking_view(
self, mock_check_ig, mock_check_pg, mock_get_mv_element,
mock_check_sg):
for x in range(0, 3):
self.driver.masking._validate_existing_masking_view(
self.data.array, self.maskingviewdict,
self.maskingviewdict['maskingview_name'],
self.data.defaultstoragegroup_name, self.extra_specs)
self.assertEqual(3, mock_check_sg.call_count)
mock_get_mv_element.assert_called_with(
self.data.array, self.maskingviewdict['maskingview_name'],
portgroup=True)
mock_check_ig.assert_called_once()
@mock.patch.object(
rest.VMAXRest,
'get_storage_group',
side_effect=[VMAXCommonData.storagegroup_name_i, None, None])
@mock.patch.object(
provision.VMAXProvision,
'create_storage_group',
side_effect=[VMAXCommonData.storagegroup_name_i, None])
def test_get_or_create_storage_group(self, mock_sg, mock_get_sg):
for x in range(0, 2):
self.driver.masking._get_or_create_storage_group(
self.data.array, self.maskingviewdict,
self.data.storagegroup_name_i, self.extra_specs)
self.driver.masking._get_or_create_storage_group(
self.data.array, self.maskingviewdict,
self.data.storagegroup_name_i, self.extra_specs, True)
self.assertEqual(3, mock_get_sg.call_count)
self.assertEqual(2, mock_sg.call_count)
@mock.patch.object(
masking.VMAXMasking,
'_move_vol_from_default_sg',
return_value=None)
@mock.patch.object(
masking.VMAXMasking,
'_get_or_create_storage_group',
return_value=None)
@mock.patch.object(
rest.VMAXRest,
'get_element_from_masking_view',
return_value=VMAXCommonData.parent_sg_i)
@mock.patch.object(
rest.VMAXRest,
'is_child_sg_in_parent_sg',
side_effect=[True, False])
@mock.patch.object(
masking.VMAXMasking,
'_check_add_child_sg_to_parent_sg',
return_value=None)
def test_check_existing_storage_group_success(
self, mock_add_sg, mock_is_child, mock_get_mv_element,
mock_create_sg, mock_move):
masking_view_dict = deepcopy(self.data.masking_view_dict)
masking_view_dict['extra_specs'] = self.data.extra_specs
with mock.patch.object(self.driver.rest, 'get_storage_group',
side_effect=[
VMAXCommonData.parent_sg_i,
VMAXCommonData.storagegroup_name_i]):
_, msg = (
self.driver.masking._check_existing_storage_group(
self.data.array, self.maskingviewdict['maskingview_name'],
self.data.defaultstoragegroup_name, masking_view_dict))
self.assertIsNone(msg)
mock_create_sg.assert_not_called()
with mock.patch.object(self.driver.rest, 'get_storage_group',
side_effect=[
VMAXCommonData.parent_sg_i, None]):
_, msg = (
self.driver.masking._check_existing_storage_group(
self.data.array, self.maskingviewdict['maskingview_name'],
self.data.defaultstoragegroup_name, masking_view_dict))
self.assertIsNone(msg)
mock_create_sg.assert_called_once_with(
self.data.array, masking_view_dict,
VMAXCommonData.storagegroup_name_f,
self.data.extra_specs)
@mock.patch.object(
masking.VMAXMasking,
'_move_vol_from_default_sg',
side_effect=[None, "Error Message"])
@mock.patch.object(
rest.VMAXRest,
'is_child_sg_in_parent_sg',
side_effect=[True, False, False])
@mock.patch.object(
rest.VMAXRest,
'get_element_from_masking_view',
return_value=VMAXCommonData.parent_sg_i)
@mock.patch.object(
rest.VMAXRest,
'get_storage_group',
side_effect=[None, VMAXCommonData.parent_sg_i, None,
VMAXCommonData.parent_sg_i, None,
VMAXCommonData.parent_sg_i, None])
def test_check_existing_storage_group_failed(
self, mock_get_sg, mock_get_mv_element, mock_child, mock_move):
masking_view_dict = deepcopy(self.data.masking_view_dict)
masking_view_dict['extra_specs'] = self.data.extra_specs
for x in range(0, 4):
_, msg = (
self.driver.masking._check_existing_storage_group(
self.data.array, self.maskingviewdict['maskingview_name'],
self.data.defaultstoragegroup_name, masking_view_dict))
self.assertIsNotNone(msg)
self.assertEqual(7, mock_get_sg.call_count)
self.assertEqual(1, mock_move.call_count)
@mock.patch.object(rest.VMAXRest, 'get_portgroup',
side_effect=[VMAXCommonData.port_group_name_i, None])
def test_check_port_group(
self, mock_get_pg):
for x in range(0, 2):
_, msg = self.driver.masking._check_port_group(
self.data.array, self.maskingviewdict['maskingview_name'])
self.assertIsNotNone(msg)
self.assertEqual(2, mock_get_pg.call_count)
@mock.patch.object(
masking.VMAXMasking, '_find_initiator_group',
side_effect=[VMAXCommonData.initiatorgroup_name_i, None, None])
@mock.patch.object(masking.VMAXMasking, '_create_initiator_group',
side_effect=[VMAXCommonData.initiatorgroup_name_i, None]
)
def test_get_or_create_initiator_group(self, mock_create_ig, mock_find_ig):
self.driver.masking._get_or_create_initiator_group(
self.data.array, self.data.initiatorgroup_name_i,
self.data.connector, self.extra_specs)
mock_create_ig.assert_not_called()
found_init_group, msg = (
self.driver.masking._get_or_create_initiator_group(
self.data.array, self.data.initiatorgroup_name_i,
self.data.connector, self.extra_specs))
self.assertIsNone(msg)
found_init_group, msg = (
self.driver.masking._get_or_create_initiator_group(
self.data.array, self.data.initiatorgroup_name_i,
self.data.connector, self.extra_specs))
self.assertIsNotNone(msg)
def test_check_existing_initiator_group(self):
with mock.patch.object(
rest.VMAXRest, 'get_element_from_masking_view',
return_value=VMAXCommonData.inititiatorgroup):
ig_from_mv, msg = (
self.driver.masking._check_existing_initiator_group(
self.data.array, self.maskingviewdict['maskingview_name'],
self.maskingviewdict, self.data.storagegroup_name_i,
self.data.port_group_name_i, self.extra_specs))
self.assertEqual(self.data.inititiatorgroup, ig_from_mv)
def test_check_adding_volume_to_storage_group(self):
with mock.patch.object(
masking.VMAXMasking, '_create_initiator_group'):
with mock.patch.object(
rest.VMAXRest, 'is_volume_in_storagegroup',
side_effect=[True, False]):
msg = (
self.driver.masking._check_adding_volume_to_storage_group(
self.data.array, self.device_id,
self.data.storagegroup_name_i,
self.maskingviewdict[utils.VOL_NAME],
self.maskingviewdict[utils.EXTRA_SPECS]))
self.assertIsNone(msg)
msg = (
self.driver.masking._check_adding_volume_to_storage_group(
self.data.array, self.device_id,
self.data.storagegroup_name_i,
self.maskingviewdict[utils.VOL_NAME],
self.maskingviewdict[utils.EXTRA_SPECS]))
@mock.patch.object(rest.VMAXRest, 'add_vol_to_sg')
def test_add_volume_to_storage_group(self, mock_add_volume):
self.driver.masking.add_volume_to_storage_group(
self.data.array, self.device_id, self.data.storagegroup_name_i,
self.volume_name, self.extra_specs)
mock_add_volume.assert_called_once()
@mock.patch.object(rest.VMAXRest, 'remove_vol_from_sg')
def test_remove_vol_from_storage_group(self, mock_remove_volume):
with mock.patch.object(
rest.VMAXRest, 'is_volume_in_storagegroup',
side_effect=[False, True]):
self.driver.masking.remove_vol_from_storage_group(
self.data.array, self.device_id, self.data.storagegroup_name_i,
self.volume_name, self.extra_specs)
mock_remove_volume.assert_called_once()
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.masking.remove_vol_from_storage_group,
self.data.array, self.device_id, self.data.storagegroup_name_i,
self.volume_name, self.extra_specs)
def test_find_initiator_names(self):
foundinitiatornames = self.driver.masking.find_initiator_names(
self.data.connector)
self.assertEqual(self.data.connector['initiator'],
foundinitiatornames[0])
foundinitiatornames = self.driver_fc.masking.find_initiator_names(
self.data.connector)
self.assertEqual(self.data.connector['wwpns'][0],
foundinitiatornames[0])
connector = {'ip': self.data.ip, 'initiator': None, 'host': 'HostX'}
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.masking.find_initiator_names, connector)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver_fc.masking.find_initiator_names, connector)
def test_find_initiator_group(self):
with mock.patch.object(
rest.VMAXRest, 'get_in_use_initiator_list_from_array',
return_value=self.data.initiator_list[2]['initiatorId']):
with mock.patch.object(
rest.VMAXRest, 'get_initiator_group_from_initiator',
return_value=self.data.initiator_list):
found_init_group_nam = (
self.driver.masking._find_initiator_group(
self.data.array, ['FA-1D:4:123456789012345']))
self.assertEqual(self.data.initiator_list,
found_init_group_nam)
found_init_group_nam = (
self.driver.masking._find_initiator_group(
self.data.array, ['Error']))
self.assertIsNone(found_init_group_nam)
def test_create_masking_view(self):
with mock.patch.object(rest.VMAXRest, 'create_masking_view',
side_effect=[None, Exception]):
error_message = self.driver.masking.create_masking_view(
self.data.array, self.maskingviewdict['maskingview_name'],
self.data.storagegroup_name_i, self.data.port_group_name_i,
self.data.initiatorgroup_name_i, self.extra_specs)
self.assertIsNone(error_message)
error_message = self.driver.masking.create_masking_view(
self.data.array, self.maskingviewdict['maskingview_name'],
self.data.storagegroup_name_i, self.data.port_group_name_i,
self.data.initiatorgroup_name_i, self.extra_specs)
self.assertIsNotNone(error_message)
@mock.patch.object(masking.VMAXMasking, '_check_ig_rollback')
def test_check_if_rollback_action_for_masking_required(self,
mock_check_ig):
with mock.patch.object(rest.VMAXRest,
'get_storage_groups_from_volume',
side_effect=[
exception.VolumeBackendAPIException,
self.data.defaultstoragegroup_name,
self.data.defaultstoragegroup_name, None,
None, ]):
self.assertRaises(
exception.VolumeBackendAPIException,
self.mask.check_if_rollback_action_for_masking_required,
self.data.array, self.device_id, self.maskingviewdict)
with mock.patch.object(masking.VMAXMasking,
'remove_and_reset_members'):
self.maskingviewdict[
'default_sg_name'] = self.data.defaultstoragegroup_name
error_message = (
self.mask.check_if_rollback_action_for_masking_required(
self.data.array, self.device_id, self.maskingviewdict))
self.assertIsNone(error_message)
@mock.patch.object(rest.VMAXRest, 'delete_masking_view')
@mock.patch.object(rest.VMAXRest, 'delete_initiator_group')
@mock.patch.object(rest.VMAXRest, 'get_initiator_group')
@mock.patch.object(masking.VMAXMasking, '_find_initiator_group',
return_value=VMAXCommonData.initiatorgroup_name_i)
def test_verify_initiator_group_from_masking_view(
self, mock_find_ig, mock_get_ig, mock_delete_ig, mock_delete_mv):
self.mask._verify_initiator_group_from_masking_view(
self.data.array, self.maskingviewdict['maskingview_name'],
self.maskingviewdict, self.data.initiatorgroup_name_i,
self.data.storagegroup_name_i, self.data.port_group_name_i,
self.extra_specs)
mock_get_ig.assert_not_called()
mock_get_ig.return_value = False
self.mask._verify_initiator_group_from_masking_view(
self.data.array, self.maskingviewdict['maskingview_name'],
self.maskingviewdict, 'OS-Wrong-Host-I-IG',
self.data.storagegroup_name_i, self.data.port_group_name_i,
self.extra_specs)
mock_get_ig.assert_called()
@mock.patch.object(rest.VMAXRest, 'delete_masking_view')
@mock.patch.object(rest.VMAXRest, 'delete_initiator_group')
@mock.patch.object(rest.VMAXRest, 'get_initiator_group',
return_value=True)
@mock.patch.object(masking.VMAXMasking, '_find_initiator_group',
return_value=VMAXCommonData.initiatorgroup_name_i)
def test_verify_initiator_group_from_masking_view2(
self, mock_find_ig, mock_get_ig, mock_delete_ig, mock_delete_mv):
mock_delete_mv.side_effect = [None, Exception]
self.mask._verify_initiator_group_from_masking_view(
self.data.array, self.maskingviewdict['maskingview_name'],
self.maskingviewdict, 'OS-Wrong-Host-I-IG',
self.data.storagegroup_name_i, self.data.port_group_name_i,
self.extra_specs)
mock_delete_mv.assert_called()
_, found_ig_from_connector = (
self.mask._verify_initiator_group_from_masking_view(
self.data.array, self.maskingviewdict['maskingview_name'],
self.maskingviewdict, 'OS-Wrong-Host-I-IG',
self.data.storagegroup_name_i, self.data.port_group_name_i,
self.extra_specs))
self.assertEqual(self.data.initiatorgroup_name_i,
found_ig_from_connector)
@mock.patch.object(rest.VMAXRest, 'create_initiator_group')
def test_create_initiator_group(self, mock_create_ig):
initiator_names = self.mask.find_initiator_names(self.data.connector)
ret_init_group_name = self.mask._create_initiator_group(
self.data.array, self.data.initiatorgroup_name_i, initiator_names,
self.extra_specs)
self.assertEqual(self.data.initiatorgroup_name_i, ret_init_group_name)
@mock.patch.object(masking.VMAXMasking,
'_last_volume_delete_initiator_group')
def test_check_ig_rollback(self, mock_last_volume):
with mock.patch.object(masking.VMAXMasking, '_find_initiator_group',
side_effect=[
None, 'FAKE-I-IG',
self.data.initiatorgroup_name_i]):
for x in range(0, 2):
self.mask._check_ig_rollback(self.data.array,
self.data.initiatorgroup_name_i,
self.data.connector)
mock_last_volume.assert_not_called()
self.mask._check_ig_rollback(
self.data.array, self.data.initiatorgroup_name_i,
self.data.connector)
mock_last_volume.assert_called()
@mock.patch.object(masking.VMAXMasking, '_cleanup_deletion')
def test_remove_and_reset_members(self, mock_cleanup):
self.mask.remove_and_reset_members(self.data.array, self.device_id,
self.volume_name, self.extra_specs,
reset=False)
mock_cleanup.assert_called_once()
@mock.patch.object(rest.VMAXRest, 'get_storage_groups_from_volume',
side_effect=[[VMAXCommonData.storagegroup_name_i],
[VMAXCommonData.storagegroup_name_i,
VMAXCommonData.storagegroup_name_f]])
@mock.patch.object(masking.VMAXMasking, 'remove_volume_from_sg')
@mock.patch.object(masking.VMAXMasking,
'add_volume_to_default_storage_group')
def test_cleanup_deletion(self, mock_add, mock_remove_vol, mock_get_sg):
self.mask._cleanup_deletion(
self.data.array, self.device_id, self.volume_name,
self.extra_specs, None, True)
mock_add.assert_not_called()
self.mask._cleanup_deletion(
self.data.array, self.device_id, self.volume_name,
self.extra_specs, None, True)
mock_add.assert_called_once_with(self.data.array, self.device_id,
self.volume_name, self.extra_specs)
@mock.patch.object(masking.VMAXMasking, '_last_vol_in_sg')
@mock.patch.object(masking.VMAXMasking, '_multiple_vols_in_sg')
def test_remove_volume_from_sg(self, mock_multiple_vols, mock_last_vol):
with mock.patch.object(
rest.VMAXRest, 'get_masking_views_from_storage_group',
return_value=None):
with mock.patch.object(
rest.VMAXRest, 'get_num_vols_in_sg',
side_effect=[2, 1]):
self.mask.remove_volume_from_sg(
self.data.array, self.device_id, self.volume_name,
self.data.defaultstoragegroup_name, self.extra_specs)
mock_last_vol.assert_not_called()
self.mask.remove_volume_from_sg(
self.data.array, self.device_id, self.volume_name,
self.data.defaultstoragegroup_name, self.extra_specs)
mock_last_vol.assert_called()
@mock.patch.object(masking.VMAXMasking, '_last_vol_in_sg')
@mock.patch.object(masking.VMAXMasking, '_multiple_vols_in_sg')
def test_remove_volume_from_sg_2(self, mock_multiple_vols, mock_last_vol):
with mock.patch.object(
rest.VMAXRest, 'is_volume_in_storagegroup',
return_value=True):
with mock.patch.object(
rest.VMAXRest, 'get_masking_views_from_storage_group',
return_value=[self.data.masking_view_name_i]):
with mock.patch.object(
rest.VMAXRest, 'get_num_vols_in_sg',
side_effect=[2, 1]):
self.mask.remove_volume_from_sg(
self.data.array, self.device_id, self.volume_name,
self.data.storagegroup_name_i, self.extra_specs)
mock_last_vol.assert_not_called()
self.mask.remove_volume_from_sg(
self.data.array, self.device_id, self.volume_name,
self.data.storagegroup_name_i, self.extra_specs)
mock_last_vol.assert_called()
@mock.patch.object(masking.VMAXMasking, '_last_vol_masking_views',
return_value=True)
@mock.patch.object(masking.VMAXMasking, '_last_vol_no_masking_views',
return_value=True)
def test_last_vol_in_sg(self, mock_no_mv, mock_mv):
mv_list = [self.data.masking_view_name_i,
self.data.masking_view_name_f]
with mock.patch.object(rest.VMAXRest,
'get_masking_views_from_storage_group',
side_effect=[mv_list, []]):
for x in range(0, 2):
self.mask._last_vol_in_sg(
self.data.array, self.device_id, self.volume_name,
self.data.storagegroup_name_i, self.extra_specs,
self.data.connector)
self.assertEqual(1, mock_mv.call_count)
self.assertEqual(1, mock_no_mv.call_count)
@mock.patch.object(masking.VMAXMasking, '_remove_last_vol_and_delete_sg')
@mock.patch.object(masking.VMAXMasking, '_delete_cascaded_storage_groups')
@mock.patch.object(rest.VMAXRest, 'get_num_vols_in_sg',
side_effect=[1, 3])
@mock.patch.object(rest.VMAXRest, 'delete_storage_group')
@mock.patch.object(masking.VMAXMasking, 'get_parent_sg_from_child',
side_effect=[None, 'parent_sg_name', 'parent_sg_name'])
def test_last_vol_no_masking_views(
self, mock_get_parent, mock_delete, mock_num_vols,
mock_delete_casc, mock_remove):
for x in range(0, 3):
self.mask._last_vol_no_masking_views(
self.data.array, self.data.storagegroup_name_i,
self.device_id, self.volume_name, self.extra_specs,
False)
self.assertEqual(1, mock_delete.call_count)
self.assertEqual(1, mock_delete_casc.call_count)
self.assertEqual(1, mock_remove.call_count)
@mock.patch.object(masking.VMAXMasking, '_remove_last_vol_and_delete_sg')
@mock.patch.object(masking.VMAXMasking, '_delete_mv_ig_and_sg')
@mock.patch.object(masking.VMAXMasking, '_get_num_vols_from_mv',
side_effect=[(1, 'parent_name'), (3, 'parent_name')])
def test_last_vol_masking_views(
self, mock_num_vols, mock_delete_all, mock_remove):
for x in range(0, 2):
self.mask._last_vol_masking_views(
self.data.array, self.data.storagegroup_name_i,
[self.data.masking_view_name_i], self.device_id,
self.volume_name, self.extra_specs, self.data.connector,
True)
self.assertEqual(1, mock_delete_all.call_count)
self.assertEqual(1, mock_remove.call_count)
@mock.patch.object(masking.VMAXMasking,
'add_volume_to_default_storage_group')
@mock.patch.object(rest.VMAXRest, 'get_num_vols_in_sg')
@mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group')
def test_multiple_vols_in_sg(self, mock_remove_vol, mock_get_volumes,
mock_add):
self.mask._multiple_vols_in_sg(
self.data.array, self.device_id, self.data.storagegroup_name_i,
self.volume_name, self.extra_specs, False)
mock_remove_vol.assert_called_once()
self.mask._multiple_vols_in_sg(
self.data.array, self.device_id, self.data.storagegroup_name_i,
self.volume_name, self.extra_specs, True)
mock_add.assert_called_once()
@mock.patch.object(rest.VMAXRest, 'get_element_from_masking_view')
@mock.patch.object(masking.VMAXMasking, '_last_volume_delete_masking_view')
@mock.patch.object(masking.VMAXMasking,
'_last_volume_delete_initiator_group')
@mock.patch.object(masking.VMAXMasking, '_delete_cascaded_storage_groups')
def test_delete_mv_ig_and_sg(self, mock_delete_sg, mock_delete_ig,
mock_delete_mv, mock_get_element):
self.mask._delete_mv_ig_and_sg(
self.data.array, self.data.device_id,
self.data.masking_view_name_i,
self.data.storagegroup_name_i, self.data.parent_sg_i,
self.data.connector, True, self.data.extra_specs)
mock_delete_sg.assert_called_once()
@mock.patch.object(rest.VMAXRest, 'delete_masking_view')
def test_last_volume_delete_masking_view(self, mock_delete_mv):
self.mask._last_volume_delete_masking_view(
self.data.array, self.data.masking_view_name_i)
mock_delete_mv.assert_called_once()
@mock.patch.object(rest.VMAXRest, 'move_volume_between_storage_groups')
@mock.patch.object(masking.VMAXMasking,
'get_or_create_default_storage_group')
@mock.patch.object(masking.VMAXMasking, 'add_volume_to_storage_group')
def test_add_volume_to_default_storage_group(
self, mock_add_sg, mock_get_sg, mock_move):
self.mask.add_volume_to_default_storage_group(
self.data.array, self.device_id, self.volume_name,
self.extra_specs)
mock_add_sg.assert_called_once()
self.mask.add_volume_to_default_storage_group(
self.data.array, self.device_id, self.volume_name,
self.extra_specs, src_sg=self.data.storagegroup_name_i)
mock_move.assert_called_once()
@mock.patch.object(provision.VMAXProvision, 'create_storage_group')
def test_get_or_create_default_storage_group(self, mock_create_sg):
with mock.patch.object(
rest.VMAXRest, 'get_vmax_default_storage_group',
return_value=(None, self.data.storagegroup_name_i)):
storage_group_name = self.mask.get_or_create_default_storage_group(
self.data.array, self.data.srp, self.data.slo,
self.data.workload, self.extra_specs)
self.assertEqual(self.data.storagegroup_name_i, storage_group_name)
with mock.patch.object(
rest.VMAXRest, 'get_vmax_default_storage_group',
return_value=("test_sg", self.data.storagegroup_name_i)):
with mock.patch.object(
rest.VMAXRest, 'get_masking_views_from_storage_group',
return_value=self.data.masking_view_name_i):
self.assertRaises(
exception.VolumeBackendAPIException,
self.mask.get_or_create_default_storage_group,
self.data.array, self.data.srp, self.data.slo,
self.data.workload, self.extra_specs)
@mock.patch.object(masking.VMAXMasking,
'add_volume_to_default_storage_group')
@mock.patch.object(rest.VMAXRest, 'remove_child_sg_from_parent_sg')
@mock.patch.object(rest.VMAXRest, 'delete_storage_group')
@mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group')
def test_remove_last_vol_and_delete_sg(self, mock_vol_sg,
mock_delete_sg, mock_rm, mock_add):
self.mask._remove_last_vol_and_delete_sg(
self.data.array, self.device_id, self.volume_name,
self.data.storagegroup_name_i, self.extra_specs)
self.mask._remove_last_vol_and_delete_sg(
self.data.array, self.device_id, self.volume_name,
self.data.storagegroup_name_i, self.extra_specs,
self.data.parent_sg_i, True)
self.assertEqual(2, mock_delete_sg.call_count)
self.assertEqual(1, mock_vol_sg.call_count)
self.assertEqual(1, mock_rm.call_count)
self.assertEqual(1, mock_add.call_count)
@mock.patch.object(rest.VMAXRest, 'delete_initiator_group')
def test_last_volume_delete_initiator_group(self, mock_delete_ig):
self.mask._last_volume_delete_initiator_group(
self.data.array, self.data.initiatorgroup_name_f, 'Wrong_Host')
mock_delete_ig.assert_not_called()
self.mask._last_volume_delete_initiator_group(
self.data.array, self.data.initiatorgroup_name_f, None)
mock_delete_ig.assert_not_called()
mv_list = [self.data.masking_view_name_i,
self.data.masking_view_name_f]
with mock.patch.object(rest.VMAXRest,
'get_masking_views_by_initiator_group',
side_effect=[mv_list, []]):
self.mask._last_volume_delete_initiator_group(
self.data.array, self.data.initiatorgroup_name_i,
self.data.connector['host'])
mock_delete_ig.assert_not_called()
self.mask._last_volume_delete_initiator_group(
self.data.array, self.data.initiatorgroup_name_i,
self.data.connector['host'])
mock_delete_ig.assert_called_once()
def test_populate_masking_dict_init_check_false(self):
extra_specs = self.data.extra_specs
connector = self.data.connector
with mock.patch.object(self.driver, '_get_initiator_check_flag',
return_value=False):
masking_view_dict = self.driver._populate_masking_dict(
self.data.test_volume, connector, extra_specs)
self.assertFalse(masking_view_dict['initiator_check'])
def test_populate_masking_dict_init_check_true(self):
extra_specs = self.data.extra_specs
connector = self.data.connector
with mock.patch.object(self.driver, '_get_initiator_check_flag',
return_value=True):
masking_view_dict = self.driver._populate_masking_dict(
self.data.test_volume, connector, extra_specs)
self.assertTrue(masking_view_dict['initiator_check'])
def test_check_existing_initiator_group_verify_true(self):
mv_dict = deepcopy(self.data.masking_view_dict)
mv_dict['initiator_check'] = True
with mock.patch.object(
rest.VMAXRest, 'get_element_from_masking_view',
return_value=VMAXCommonData.initiatorgroup_name_f):
with mock.patch.object(
self.mask, '_verify_initiator_group_from_masking_view',
return_value=(True, self.data.initiatorgroup_name_f)):
self.mask._check_existing_initiator_group(
self.data.array, self.data.masking_view_name_f,
mv_dict, self.data.storagegroup_name_f,
self.data.port_group_name_f, self.data.extra_specs)
(self.mask._verify_initiator_group_from_masking_view.
assert_called_once_with(
self.data.array, self.data.masking_view_name_f,
mv_dict, self.data.initiatorgroup_name_f,
self.data.storagegroup_name_f,
self.data.port_group_name_f, self.data.extra_specs))
@mock.patch.object(masking.VMAXMasking, 'add_child_sg_to_parent_sg',
side_effect=[
None, exception.VolumeBackendAPIException])
@mock.patch.object(rest.VMAXRest, 'is_child_sg_in_parent_sg',
side_effect=[True, False, False])
def test_check_add_child_sg_to_parent_sg(self, mock_is_child, mock_add):
for x in range(0, 3):
message = self.mask._check_add_child_sg_to_parent_sg(
self.data.array, self.data.storagegroup_name_i,
self.data.parent_sg_i, self.data.extra_specs)
self.assertIsNotNone(message)
@mock.patch.object(rest.VMAXRest, 'add_child_sg_to_parent_sg')
@mock.patch.object(rest.VMAXRest, 'is_child_sg_in_parent_sg',
side_effect=[True, False])
def test_add_child_sg_to_parent_sg(self, mock_is_child, mock_add):
for x in range(0, 2):
self.mask.add_child_sg_to_parent_sg(
self.data.array, self.data.storagegroup_name_i,
self.data.parent_sg_i, self.data.extra_specs)
self.assertEqual(1, mock_add.call_count)
def test_get_parent_sg_from_child(self):
with mock.patch.object(self.driver.rest, 'get_storage_group',
side_effect=[None, self.data.sg_details[1]]):
sg_name = self.mask.get_parent_sg_from_child(
self.data.array, self.data.storagegroup_name_i)
self.assertIsNone(sg_name)
sg_name2 = self.mask.get_parent_sg_from_child(
self.data.array, self.data.storagegroup_name_f)
self.assertEqual(self.data.parent_sg_f, sg_name2)
@mock.patch.object(rest.VMAXRest, 'get_element_from_masking_view',
return_value='parent_sg')
@mock.patch.object(rest.VMAXRest, 'get_num_vols_in_sg',
return_value=2)
def test_get_num_vols_from_mv(self, mock_num, mock_element):
num_vols, sg = self.mask._get_num_vols_from_mv(
self.data.array, self.data.masking_view_name_f)
self.assertEqual(2, num_vols)
@mock.patch.object(masking.VMAXMasking,
'add_volume_to_default_storage_group')
@mock.patch.object(rest.VMAXRest, 'delete_storage_group')
def test_delete_cascaded(self, mock_delete, mock_add):
self.mask._delete_cascaded_storage_groups(
self.data.array, self.data.masking_view_name_f,
self.data.parent_sg_f, self.data.extra_specs,
self.data.device_id, False)
self.assertEqual(2, mock_delete.call_count)
mock_add.assert_not_called()
# Delete legacy masking view, parent sg = child sg
mock_delete.reset_mock()
self.mask._delete_cascaded_storage_groups(
self.data.array, self.data.masking_view_name_f,
self.data.masking_view_name_f, self.data.extra_specs,
self.data.device_id, True)
self.assertEqual(1, mock_delete.call_count)
mock_add.assert_called_once()
@mock.patch.object(masking.VMAXMasking, 'add_child_sg_to_parent_sg')
@mock.patch.object(masking.VMAXMasking,
'move_volume_between_storage_groups')
@mock.patch.object(provision.VMAXProvision, 'create_storage_group')
def test_pre_live_migration(self, mock_create_sg, mock_move, mock_add):
with mock.patch.object(
rest.VMAXRest, 'get_storage_group',
side_effect=[None, self.data.sg_details[1]["storageGroupId"]]
):
source_sg = self.data.sg_details[2]["storageGroupId"]
source_parent_sg = self.data.sg_details[4]["storageGroupId"]
source_nf_sg = source_parent_sg[:-2] + 'NONFAST'
self.data.iscsi_device_info['device_id'] = self.data.device_id
self.mask.pre_live_migration(
source_nf_sg, source_sg, source_parent_sg, False,
self.data.iscsi_device_info, None)
mock_create_sg.assert_called_once()
@mock.patch.object(rest.VMAXRest, 'delete_storage_group')
@mock.patch.object(rest.VMAXRest, 'remove_child_sg_from_parent_sg')
def test_post_live_migration(self, mock_remove_child_sg, mock_delete_sg):
self.data.iscsi_device_info['source_sg'] = self.data.sg_details[2][
"storageGroupId"]
self.data.iscsi_device_info['source_parent_sg'] = self.data.sg_details[
4]["storageGroupId"]
with mock.patch.object(
rest.VMAXRest, 'get_num_vols_in_sg', side_effect=[0, 1]):
self.mask.post_live_migration(self.data.iscsi_device_info, None)
mock_remove_child_sg.assert_called_once()
mock_delete_sg.assert_called_once()
@mock.patch.object(masking.VMAXMasking,
'move_volume_between_storage_groups')
@mock.patch.object(rest.VMAXRest, 'delete_storage_group')
@mock.patch.object(rest.VMAXRest, 'remove_child_sg_from_parent_sg')
@mock.patch.object(masking.VMAXMasking, 'remove_volume_from_sg')
def test_failed_live_migration(
self, mock_remove_volume, mock_remove_child_sg, mock_delete_sg,
mock_move):
device_dict = self.data.iscsi_device_info
device_dict['device_id'] = self.data.device_id
device_dict['source_sg'] = self.data.sg_details[2]["storageGroupId"]
device_dict['source_parent_sg'] = self.data.sg_details[4][
"storageGroupId"]
device_dict['source_nf_sg'] = (
self.data.sg_details[4]["storageGroupId"][:-2] + 'NONFAST')
sg_list = [device_dict['source_nf_sg']]
with mock.patch.object(
rest.VMAXRest, 'is_child_sg_in_parent_sg',
side_effect=[True, False]):
self.mask.failed_live_migration(device_dict, sg_list, None)
mock_remove_volume.assert_not_called()
mock_remove_child_sg.assert_called_once()
class VMAXCommonReplicationTest(test.TestCase):
def setUp(self):
self.data = VMAXCommonData()
super(VMAXCommonReplicationTest, self).setUp()
config_group = 'CommonReplicationTests'
self.fake_xml = FakeXML().create_fake_config_file(
config_group, self.data.port_group_name_f)
self.replication_device = {
'target_device_id': self.data.remote_array,
'remote_port_group': self.data.port_group_name_f,
'remote_pool': self.data.srp2,
'rdf_group_label': self.data.rdf_group_name,
'allow_extend': 'True'}
configuration = FakeConfiguration(
self.fake_xml, config_group,
replication_device=self.replication_device)
rest.VMAXRest._establish_rest_session = mock.Mock(
return_value=FakeRequestsSession())
driver = fc.VMAXFCDriver(configuration=configuration)
self.driver = driver
self.common = self.driver.common
self.masking = self.common.masking
self.provision = self.common.provision
self.rest = self.common.rest
self.utils = self.common.utils
self.utils.get_volumetype_extra_specs = (
mock.Mock(
return_value=self.data.vol_type_extra_specs_rep_enabled))
self.extra_specs = deepcopy(self.data.extra_specs_rep_enabled)
self.extra_specs['retries'] = 0
self.extra_specs['interval'] = 0
def test_get_replication_info(self):
self.common._get_replication_info()
self.assertTrue(self.common.replication_enabled)
def test_create_replicated_volume(self):
extra_specs = deepcopy(self.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
vol_identifier = self.utils.get_volume_element_name(
self.data.test_volume.id)
with mock.patch.object(self.common, '_replicate_volume',
return_value={}) as mock_rep:
self.common.create_volume(self.data.test_volume)
volume_dict = self.data.provider_location
mock_rep.assert_called_once_with(
self.data.test_volume, vol_identifier, volume_dict,
extra_specs)
def test_create_cloned_replicated_volume(self):
extra_specs = deepcopy(self.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
with mock.patch.object(self.common, '_replicate_volume',
return_value={}) as mock_rep:
self.common.create_cloned_volume(
self.data.test_clone_volume, self.data.test_volume)
volume_dict = self.data.provider_location
mock_rep.assert_called_once_with(
self.data.test_clone_volume,
self.data.test_clone_volume.name, volume_dict, extra_specs)
def test_create_replicated_volume_from_snap(self):
extra_specs = deepcopy(self.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
with mock.patch.object(self.common, '_replicate_volume',
return_value={}) as mock_rep:
self.common.create_volume_from_snapshot(
self.data.test_clone_volume, self.data.test_snapshot)
volume_dict = self.data.provider_location
mock_rep.assert_called_once_with(
self.data.test_clone_volume,
"snapshot-%s" % self.data.snapshot_id,
volume_dict,
extra_specs)
def test_replicate_volume(self):
volume_dict = self.data.provider_location
rs_enabled = fields.ReplicationStatus.ENABLED
with mock.patch.object(self.common, 'setup_volume_replication',
return_value=(rs_enabled, {})) as mock_setup:
self.common._replicate_volume(
self.data.test_volume, "1", volume_dict, self.extra_specs)
mock_setup.assert_called_once_with(
self.data.array, self.data.test_volume,
self.data.device_id, self.extra_specs)
def test_replicate_volume_exception(self):
volume_dict = self.data.provider_location
with mock.patch.object(
self.common, 'setup_volume_replication',
side_effect=exception.VolumeBackendAPIException(data='')):
with mock.patch.object(
self.common, '_cleanup_replication_source') as mock_clean:
self.assertRaises(exception.VolumeBackendAPIException,
self.common._replicate_volume,
self.data.test_volume,
"1", volume_dict, self.extra_specs)
mock_clean.assert_called_once_with(
self.data.array, self.data.test_volume, "1",
volume_dict, self.extra_specs)
@mock.patch.object(common.VMAXCommon, '_remove_members')
@mock.patch.object(common.VMAXCommon,
'_get_replication_extra_specs',
return_value=VMAXCommonData.rep_extra_specs)
@mock.patch.object(utils.VMAXUtils, 'is_volume_failed_over',
return_value=True)
def test_unmap_lun_volume_failed_over(self, mock_fo, mock_es, mock_rm):
extra_specs = deepcopy(self.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
rep_config = self.utils.get_replication_config(
[self.replication_device])
self.common._unmap_lun(self.data.test_volume, self.data.connector)
mock_es.assert_called_once_with(extra_specs, rep_config)
@mock.patch.object(utils.VMAXUtils, 'is_volume_failed_over',
return_value=True)
def test_initialize_connection_vol_failed_over(self, mock_fo):
extra_specs = deepcopy(self.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
rep_extra_specs = deepcopy(VMAXCommonData.rep_extra_specs)
rep_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
rep_config = self.utils.get_replication_config(
[self.replication_device])
with mock.patch.object(self.common, '_get_replication_extra_specs',
return_value=rep_extra_specs) as mock_es:
self.common.initialize_connection(
self.data.test_volume, self.data.connector)
mock_es.assert_called_once_with(extra_specs, rep_config)
@mock.patch.object(common.VMAXCommon, '_sync_check')
def test_extend_volume_rep_enabled(self, mock_sync):
extra_specs = deepcopy(self.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
volume_name = self.data.test_volume.name
with mock.patch.object(self.rest, 'is_vol_in_rep_session',
return_value=(False, False, None)):
with mock.patch.object(
self.common, 'extend_volume_is_replicated') as mock_ex_re:
self.common.extend_volume(self.data.test_volume, '5')
mock_ex_re.assert_called_once_with(
self.data.array, self.data.test_volume,
self.data.device_id, volume_name, "5", extra_specs)
def test_set_config_file_get_extra_specs_rep_enabled(self):
extra_specs, _, _ = self.common._set_config_file_and_get_extra_specs(
self.data.test_volume)
self.assertTrue(extra_specs['replication_enabled'])
def test_populate_masking_dict_is_re(self):
extra_specs = deepcopy(self.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
masking_dict = self.common._populate_masking_dict(
self.data.test_volume, self.data.connector, extra_specs)
self.assertTrue(masking_dict['replication_enabled'])
self.assertEqual('OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-RE',
masking_dict[utils.SG_NAME])
@mock.patch.object(common.VMAXCommon,
'_replicate_volume',
return_value={})
def test_manage_existing_is_replicated(self, mock_rep):
extra_specs = deepcopy(self.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
external_ref = {u'source-name': u'00002'}
volume_name = self.utils.get_volume_element_name(
self.data.test_volume.id)
provider_location = {'device_id': u'00002', 'array': self.data.array}
with mock.patch.object(
self.common, '_check_lun_valid_for_cinder_management'):
self.common.manage_existing(
self.data.test_volume, external_ref)
mock_rep.assert_called_once_with(
self.data.test_volume, volume_name, provider_location,
extra_specs, delete_src=False)
@mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members')
def test_setup_volume_replication(self, mock_rm):
rep_status, rep_data = self.common.setup_volume_replication(
self.data.array, self.data.test_volume, self.data.device_id,
self.extra_specs)
self.assertEqual(fields.ReplicationStatus.ENABLED, rep_status)
self.assertEqual({'array': self.data.remote_array,
'device_id': self.data.device_id}, rep_data)
@mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members')
@mock.patch.object(common.VMAXCommon, '_create_volume')
def test_setup_volume_replication_target(self, mock_create, mock_rm):
rep_status, rep_data = self.common.setup_volume_replication(
self.data.array, self.data.test_volume, self.data.device_id,
self.extra_specs, self.data.device_id2)
self.assertEqual(fields.ReplicationStatus.ENABLED, rep_status)
self.assertEqual({'array': self.data.remote_array,
'device_id': self.data.device_id2}, rep_data)
mock_create.assert_not_called()
@mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members')
@mock.patch.object(common.VMAXCommon, '_cleanup_remote_target')
def test_cleanup_lun_replication_success(self, mock_clean, mock_rm):
rep_extra_specs = deepcopy(self.data.rep_extra_specs)
rep_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
self.common.cleanup_lun_replication(
self.data.test_volume, "1", self.data.device_id,
self.extra_specs)
mock_clean.assert_called_once_with(
self.data.array, self.data.remote_array, self.data.device_id,
self.data.device_id2, self.data.rdf_group_no, "1",
rep_extra_specs)
mock_rm.assert_called_once_with(
self.data.remote_array, self.data.device_id2, "1",
rep_extra_specs, False)
# Cleanup legacy replication
self.common.cleanup_lun_replication(
self.data.test_legacy_vol, "1", self.data.device_id,
self.extra_specs)
mock_clean.assert_called_once_with(
self.data.array, self.data.remote_array, self.data.device_id,
self.data.device_id2, self.data.rdf_group_no, "1",
rep_extra_specs)
@mock.patch.object(common.VMAXCommon, '_cleanup_remote_target')
def test_cleanup_lun_replication_no_target(self, mock_clean):
with mock.patch.object(self.common, 'get_remote_target_device',
return_value=(None, '', '', '', '')):
self.common.cleanup_lun_replication(
self.data.test_volume, "1", self.data.device_id,
self.extra_specs)
mock_clean.assert_not_called()
def test_cleanup_lun_replication_exception(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.cleanup_lun_replication,
self.data.test_volume, "1", self.data.device_id,
self.extra_specs)
@mock.patch.object(common.VMAXCommon, '_delete_from_srp')
@mock.patch.object(provision.VMAXProvision, 'break_rdf_relationship')
def test_cleanup_remote_target(self, mock_break, mock_del):
with mock.patch.object(self.rest, 'are_vols_rdf_paired',
return_value=(False, '', '')):
self.common._cleanup_remote_target(
self.data.array, self.data.remote_array, self.data.device_id,
self.data.device_id2, self.data.rdf_group_name,
"vol1", self.data.rep_extra_specs)
mock_break.assert_not_called()
self.common._cleanup_remote_target(
self.data.array, self.data.remote_array, self.data.device_id,
self.data.device_id2, self.data.rdf_group_name,
"vol1", self.data.rep_extra_specs)
mock_break.assert_called_once_with(
self.data.array, self.data.device_id,
self.data.device_id2, self.data.rdf_group_name,
self.data.rep_extra_specs, "Synchronized")
@mock.patch.object(common.VMAXCommon,
'_remove_vol_and_cleanup_replication')
@mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group')
@mock.patch.object(common.VMAXCommon, '_delete_from_srp')
def test_cleanup_replication_source(self, mock_del, mock_rm, mock_clean):
self.common._cleanup_replication_source(
self.data.array, self.data.test_volume, "vol1",
{'device_id': self.data.device_id}, self.extra_specs)
mock_del.assert_called_once_with(
self.data.array, self.data.device_id, "vol1", self.extra_specs)
def test_get_rdf_details(self):
rdf_group_no, remote_array = self.common.get_rdf_details(
self.data.array)
self.assertEqual(self.data.rdf_group_no, rdf_group_no)
self.assertEqual(self.data.remote_array, remote_array)
def test_get_rdf_details_exception(self):
with mock.patch.object(self.rest, 'get_rdf_group_number',
return_value=None):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.get_rdf_details, self.data.array)
def test_failover_host(self):
volumes = [self.data.test_volume, self.data.test_clone_volume]
with mock.patch.object(self.common, '_failover_volume',
return_value={}) as mock_fo:
self.common.failover_host(volumes)
self.assertEqual(2, mock_fo.call_count)
def test_failover_host_exception(self):
volumes = [self.data.test_volume, self.data.test_clone_volume]
self.assertRaises(exception.VolumeBackendAPIException,
self.common.failover_host,
volumes, secondary_id="default")
def test_failover_volume(self):
ref_model_update = {
'volume_id': self.data.test_volume.id,
'updates':
{'replication_status': fields.ReplicationStatus.FAILED_OVER,
'replication_driver_data': six.text_type(
self.data.provider_location),
'provider_location': six.text_type(
self.data.provider_location3)}}
model_update = self.common._failover_volume(
self.data.test_volume, True, self.extra_specs)
self.assertEqual(ref_model_update, model_update)
ref_model_update2 = {
'volume_id': self.data.test_volume.id,
'updates':
{'replication_status': fields.ReplicationStatus.ENABLED,
'replication_driver_data': six.text_type(
self.data.provider_location),
'provider_location': six.text_type(
self.data.provider_location3)}}
model_update2 = self.common._failover_volume(
self.data.test_volume, False, self.extra_specs)
self.assertEqual(ref_model_update2, model_update2)
def test_failover_legacy_volume(self):
ref_model_update = {
'volume_id': self.data.test_volume.id,
'updates':
{'replication_status': fields.ReplicationStatus.FAILED_OVER,
'replication_driver_data': six.text_type(
self.data.legacy_provider_location),
'provider_location': six.text_type(
self.data.legacy_provider_location2)}}
model_update = self.common._failover_volume(
self.data.test_legacy_vol, True, self.extra_specs)
self.assertEqual(ref_model_update, model_update)
def test_failover_volume_exception(self):
with mock.patch.object(
self.provision, 'failover_volume',
side_effect=exception.VolumeBackendAPIException):
ref_model_update = {
'volume_id': self.data.test_volume.id,
'updates': {'replication_status':
fields.ReplicationStatus.FAILOVER_ERROR,
'replication_driver_data': six.text_type(
self.data.provider_location3),
'provider_location': six.text_type(
self.data.provider_location)}}
model_update = self.common._failover_volume(
self.data.test_volume, True, self.extra_specs)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(
common.VMAXCommon, '_find_device_on_array',
side_effect=[None, VMAXCommonData.device_id,
VMAXCommonData.device_id, VMAXCommonData.device_id])
@mock.patch.object(
common.VMAXCommon, 'get_masking_views_from_volume',
side_effect=['OS-host-MV', None, exception.VolumeBackendAPIException])
def test_recover_volumes_on_failback(self, mock_mv, mock_dev):
recovery1 = self.common.recover_volumes_on_failback(
self.data.test_volume, self.extra_specs)
self.assertEqual('error', recovery1['updates']['status'])
recovery2 = self.common.recover_volumes_on_failback(
self.data.test_volume, self.extra_specs)
self.assertEqual('in-use', recovery2['updates']['status'])
recovery3 = self.common.recover_volumes_on_failback(
self.data.test_volume, self.extra_specs)
self.assertEqual('available', recovery3['updates']['status'])
recovery4 = self.common.recover_volumes_on_failback(
self.data.test_volume, self.extra_specs)
self.assertEqual('available', recovery4['updates']['status'])
def test_get_remote_target_device(self):
target_device1, _, _, _, _ = (
self.common.get_remote_target_device(
self.data.array, self.data.test_volume, self.data.device_id))
self.assertEqual(self.data.device_id2, target_device1)
target_device2, _, _, _, _ = (
self.common.get_remote_target_device(
self.data.array, self.data.test_clone_volume,
self.data.device_id))
self.assertIsNone(target_device2)
with mock.patch.object(self.rest, 'are_vols_rdf_paired',
return_value=(False, '')):
target_device3, _, _, _, _ = (
self.common.get_remote_target_device(
self.data.array, self.data.test_volume,
self.data.device_id))
self.assertIsNone(target_device3)
with mock.patch.object(self.rest, 'get_volume',
return_value=None):
target_device4, _, _, _, _ = (
self.common.get_remote_target_device(
self.data.array, self.data.test_volume,
self.data.device_id))
self.assertIsNone(target_device4)
@mock.patch.object(common.VMAXCommon, 'setup_volume_replication')
@mock.patch.object(provision.VMAXProvision, 'extend_volume')
@mock.patch.object(provision.VMAXProvision, 'break_rdf_relationship')
@mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members')
def test_extend_volume_is_replicated(self, mock_remove,
mock_break, mock_extend, mock_setup):
self.common.extend_volume_is_replicated(
self.data.array, self.data.test_volume, self.data.device_id,
'vol1', '5', self.data.extra_specs_rep_enabled)
self.assertEqual(2, mock_remove.call_count)
self.assertEqual(2, mock_extend.call_count)
def test_extend_volume_is_replicated_exception(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.extend_volume_is_replicated,
self.data.failed_resource, self.data.test_volume,
self.data.device_id, 'vol1', '1',
self.data.extra_specs_rep_enabled)
@mock.patch.object(common.VMAXCommon, 'add_volume_to_replication_group')
@mock.patch.object(masking.VMAXMasking, 'remove_and_reset_members')
def test_enable_rdf(self, mock_remove, mock_add):
rep_config = self.utils.get_replication_config(
[self.replication_device])
self.common.enable_rdf(
self.data.array, self.data.device_id, self.data.rdf_group_no,
rep_config, 'OS-1', self.data.remote_array, self.data.device_id2,
self.extra_specs)
self.assertEqual(2, mock_remove.call_count)
self.assertEqual(2, mock_add.call_count)
@mock.patch.object(masking.VMAXMasking, 'remove_vol_from_storage_group')
@mock.patch.object(common.VMAXCommon, '_cleanup_remote_target')
def test_enable_rdf_exception(self, mock_cleanup, mock_rm):
rep_config = self.utils.get_replication_config(
[self.replication_device])
self.assertRaises(
exception.VolumeBackendAPIException, self.common.enable_rdf,
self.data.array, self.data.device_id,
self.data.failed_resource, rep_config, 'OS-1',
self.data.remote_array, self.data.device_id2, self.extra_specs)
self.assertEqual(1, mock_cleanup.call_count)
def test_add_volume_to_replication_group(self):
sg_name = self.common.add_volume_to_replication_group(
self.data.array, self.data.device_id, 'vol1',
self.extra_specs)
self.assertEqual(self.data.default_sg_re_enabled, sg_name)
@mock.patch.object(masking.VMAXMasking,
'get_or_create_default_storage_group',
side_effect=exception.VolumeBackendAPIException)
def test_add_volume_to_replication_group_exception(self, mock_get):
self.assertRaises(
exception.VolumeBackendAPIException,
self.common.add_volume_to_replication_group,
self.data.array, self.data.device_id, 'vol1',
self.extra_specs)
def test_get_replication_extra_specs(self):
rep_config = self.utils.get_replication_config(
[self.replication_device])
# Path one - disable compression
extra_specs1 = deepcopy(self.extra_specs)
extra_specs1[utils.DISABLECOMPRESSION] = "true"
ref_specs1 = deepcopy(self.data.rep_extra_specs)
ref_specs1[utils.PORTGROUPNAME] = self.data.port_group_name_f
rep_extra_specs1 = self.common._get_replication_extra_specs(
extra_specs1, rep_config)
self.assertEqual(ref_specs1, rep_extra_specs1)
# Path two - disable compression, not all flash
ref_specs2 = deepcopy(self.data.rep_extra_specs)
ref_specs2[utils.PORTGROUPNAME] = self.data.port_group_name_f
with mock.patch.object(self.rest, 'is_compression_capable',
return_value=False):
rep_extra_specs2 = self.common._get_replication_extra_specs(
extra_specs1, rep_config)
self.assertEqual(ref_specs2, rep_extra_specs2)
# Path three - slo not valid
extra_specs3 = deepcopy(self.extra_specs)
ref_specs3 = deepcopy(ref_specs1)
ref_specs3['slo'] = None
ref_specs3['workload'] = None
with mock.patch.object(self.provision, 'verify_slo_workload',
return_value=(False, False)):
rep_extra_specs3 = self.common._get_replication_extra_specs(
extra_specs3, rep_config)
self.assertEqual(ref_specs3, rep_extra_specs3)
def test_get_secondary_stats(self):
rep_config = self.utils.get_replication_config(
[self.replication_device])
array_map = self.utils.parse_file_to_get_array_map(
self.common.pool_info['config_file'])
finalarrayinfolist = self.common._get_slo_workload_combinations(
array_map)
array_info = finalarrayinfolist[0]
ref_info = deepcopy(array_info)
ref_info['SerialNumber'] = six.text_type(rep_config['array'])
ref_info['srpName'] = rep_config['srp']
secondary_info = self.common.get_secondary_stats_info(
rep_config, array_info)
self.assertEqual(ref_info, secondary_info)
|
apache-2.0
| -9,174,695,730,427,647,000 | 45.366123 | 79 | 0.586879 | false |
cloudmesh/cmd3light
|
setup.py
|
1
|
6580
|
#!/usr/bin/env python
# ----------------------------------------------------------------------- #
# Copyright 2008-2010, Gregor von Laszewski #
# Copyright 2010-2013, Indiana University #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.#
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ------------------------------------------------------------------------#
from __future__ import print_function
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from setuptools.command.install import install
import os
import shutil
import sys
import platform
try:
import cloudmesh_base
print ("Using cloudmesh_base version:", cloudmesh_base.__version__)
except:
# os.system("pip install cloudmesh_base")
os.system("pip install git+https://github.com/cloudmesh/base.git")
from cloudmesh_base.util import banner
from cloudmesh_base.setup import os_execute, get_version_from_git
from cloudmesh_cmd3light import __version__
banner("Installing Cloudmesh_cmd3light {:}".format(__version__))
requirements = ['pyreadline<=1.7.1.dev-r0',
'colorama',
'cloudmesh_base',
'future',
'docopt',
'pyaml',
'simplejson',
'python-hostlist',
'prettytable',
'sqlalchemy',
'urllib3',
'requests',
'sandman',
'gitchangelog',
'six']
class UploadToPypi(install):
"""Upload the package to pypi. -- only for Maintainers."""
description = __doc__
def run(self):
os.system("make clean")
commands = """
python setup.py install
python setup.py bdist_wheel upload
python setup.py sdist --format=bztar,zip upload
"""
os_execute(commands)
class InstallBase(install):
"""Install the cloudmesh package."""
description = __doc__
def run(self):
banner("Install readline")
commands = None
this_platform = platform.system().lower()
if this_platform in ['darwin']:
commands = """
easy_install readline
"""
elif this_platform in ['windows']:
commands = """
pip install pyreadline
"""
if commands:
os_execute(commands)
import cloudmesh_cmd3light
banner("Install Cloudmesh_cmd3light {:}".format(__version__))
install.run(self)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
home = os.path.expanduser("~")
#home + '/.cloudmesh'
#print [ (home + '/.cloudmesh/' + d, [os.path.join(d, f) for f in files]) for d, folders, files in os.walk('etc')],
#sys.exit()
#data_files= [ (os.path.join(home, '.cloudmesh'),
# [os.path.join(d, f) for f in files]) for d, folders, files in os.walk(
# os.path.join('cloudmesh_cmd3light', 'etc'))]
import fnmatch
import os
#matches = []
#for root, dirnames, filenames in os.walk(os.path.join('cloudmesh_cmd3light', 'etc')):
# for filename in fnmatch.filter(filenames, '*'):
# matches.append(os.path.join(root, filename).lstrip('cloudmesh_cmd3light/'))
#data_dirs = matches
# Hack because for some reason requirements does not work
#
# os.system("pip install -r requirements.txt")
class Tox(TestCommand):
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
import shlex
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
errno = tox.cmdline(args=args)
sys.exit(errno)
APP = [os.path.join('cloudmesh_cmd3light', 'shell.py')]
OPTIONS = {'argv_emulation': True}
setup(
# setup_requires=['py2app'],
# options={'py2app': OPTIONS},
# app=APP,
version=__version__,
name="cloudmesh_cmd3light",
description="cloudmesh_cmd3light - A dynamic CMD shell with plugins",
long_description=read('README.rst'),
license="Apache License, Version 2.0",
author="Gregor von Laszewski",
author_email="laszewski@gmail.com",
url="https://github.com/cloudmesh/cloudmesh_cmd3light",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering",
"Topic :: System :: Clustering",
"Topic :: System :: Distributed Computing",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Console"
],
keywords="cloud cmd commandshell plugins",
packages=find_packages(),
install_requires=requirements,
include_package_data=True,
# data_files= data_files,
# package_data={'cloudmesh_cmd3light': data_dirs},
entry_points={
'console_scripts': [
'cml = cloudmesh_cmd3light.cm:main',
],
},
tests_require=['tox'],
cmdclass={
'install': InstallBase,
'pypi': UploadToPypi,
'test': Tox
},
dependency_links = []
)
|
apache-2.0
| -2,251,012,706,917,116,200 | 33.270833 | 115 | 0.566109 | false |
Jawoll/automatchthreads
|
modules/obsolete/match_thread.py
|
1
|
2766
|
from threading import Thread
from config import data as config
from modules.overgg import Overgg
from modules.pasta import Pasta
from modules.reddit import Reddit
import time
class MatchThread(Thread):
def __init__(self, url, reddit_title):
self.url = url
self.reddit_title = reddit_title
self.update_seconds = config['config']['match_update_seconds']
self.running = False
Thread.__init__(self)
def setup(self):
"""Fetches starting timestamp from over.gg match site."""
match = Overgg.scrape_match(self.url)
self.start_timestamp = int(match['timestamp'])
def update(self):
"""Creates the reddit thread with data scraped from over.gg"""
# scrape data
match = Overgg.scrape_match(self.url)
# check if late map in game
if len(match['maps']) > 1 and match['maps'][-2]['team_1_stats']:
self.update_seconds = config['config']['match_update_seconds_late']
# check if long time passed
if time.time() - self.start_timestamp > config['config']['match_long_time_seconds']:
self.update_seconds = config['config']['match_update_seconds_late']
# check if final
if match['state'] == 'final':
# create markdown for the thread
body_text = Pasta.match_pasta(match)
# create reddit thread
self.reddit_thread = Reddit.new_thread(self.reddit_title, body_text)
Reddit.setup_thread(self.reddit_thread, sticky=False, sort_new=False, spoiler=True)
print('thread created')
self.running = False
def wait_for_start(self):
"""Waits for configured minutes before the scheduled time of the first match of the event."""
while time.time() < self.start_timestamp:
# break point
if not self.running:
return
print('waiting(' + str(self.start_timestamp - time.time()) + ')', flush=True)
time.sleep(20)
def main(self):
"""Main loop."""
# update on first loop
t = self.update_seconds
while True:
# break point
if not self.running:
return
time.sleep(1)
t += 1
# check update time
if t >= self.update_seconds:
self.update()
print('updated (' + str(self.update_seconds) + ')')
t = 0
def run(self):
"""Entry point."""
self.running = True
print('setting up')
self.setup()
print('starting to wait')
self.wait_for_start()
print('starting main loop')
self.main()
self.running = False
|
mit
| -218,613,766,802,604,380 | 35.893333 | 101 | 0.564714 | false |
Salmista-94/Ninja_3.0_PyQt5
|
ninja_ide/gui/tools_dock/plugin_preferences.py
|
1
|
2404
|
# -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QTabWidget
from PyQt5.QtWidgets import QVBoxLayout
from ninja_ide.core import plugin_manager
from ninja_ide.tools.logger import NinjaLogger
logger = NinjaLogger('ninja_ide.gui.misc.plugin_preferences')
class PluginPreferences(QWidget):
"""
Plugins section widget in NINJA-IDE Preferences
"""
def __init__(self):
super(PluginPreferences, self).__init__()
self.plugin_manager = plugin_manager.PluginManager()
vbox = QVBoxLayout(self)
self._tabs = QTabWidget()
vbox.addWidget(self._tabs)
#load widgets
self._load_widgets()
def _load_widgets(self):
logger.info("Loading plugins preferences widgets")
#Collect the preferences widget for each active plugin
for plugin in self.plugin_manager.get_active_plugins():
plugin_name = plugin.metadata.get('name')
try:
preferences_widget = plugin.get_preferences_widget()
if preferences_widget:
self._tabs.addTab(preferences_widget, plugin_name)
except Exception as reason:
logger.error("Unable to add the preferences widget (%s): %s",
plugin_name, reason)
continue
def save(self):
logger.info("Saving plugins preferences")
for i in range(self._tabs.count()):
try:
self._tabs.widget(i).save()
except Exception as reason:
logger.error("Unable to save preferences (%s): %s",
self._tabs.tabText(i), reason)
continue
|
gpl-3.0
| -8,308,359,762,363,824,000 | 35.424242 | 77 | 0.650582 | false |
yarikoptic/NiPy-OLD
|
nipy/neurospin/viz/activation_maps.py
|
1
|
25526
|
#!/usr/bin/env python
"""
Functions to do automatic visualization of activation-like maps.
For 2D-only visualization, only matplotlib is required.
For 3D visualization, Mayavi, version 3.0 or greater, is required.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD
# Standard library imports
import os
import sys
# Standard scientific libraries imports (more specific imports are
# delayed, so that the part module can be used without them).
import numpy as np
import matplotlib as mp
import pylab as pl
# Local imports
from nipy.neurospin.utils.mask import compute_mask
from nipy.io.imageformats import load
from anat_cache import mni_sform, mni_sform_inv, _AnatCache
from coord_tools import coord_transform, find_activation, \
find_cut_coords
class SformError(Exception):
pass
class NiftiIndexError(IndexError):
pass
################################################################################
# Colormaps
def _rotate_cmap(cmap, name=None, swap_order=('green', 'red', 'blue')):
""" Utility function to swap the colors of a colormap.
"""
orig_cdict = cmap._segmentdata.copy()
cdict = dict()
cdict['green'] = [(p, c1, c2)
for (p, c1, c2) in orig_cdict[swap_order[0]]]
cdict['blue'] = [(p, c1, c2)
for (p, c1, c2) in orig_cdict[swap_order[1]]]
cdict['red'] = [(p, c1, c2)
for (p, c1, c2) in orig_cdict[swap_order[2]]]
if name is None:
name = '%s_rotated' % cmap.name
return mp.colors.LinearSegmentedColormap(name, cdict, 512)
def _pigtailed_cmap(cmap, name=None,
swap_order=('green', 'red', 'blue')):
""" Utility function to make a new colormap by concatenating a
colormap with its reverse.
"""
orig_cdict = cmap._segmentdata.copy()
cdict = dict()
cdict['green'] = [(0.5*(1-p), c1, c2)
for (p, c1, c2) in reversed(orig_cdict[swap_order[0]])]
cdict['blue'] = [(0.5*(1-p), c1, c2)
for (p, c1, c2) in reversed(orig_cdict[swap_order[1]])]
cdict['red'] = [(0.5*(1-p), c1, c2)
for (p, c1, c2) in reversed(orig_cdict[swap_order[2]])]
for color in ('red', 'green', 'blue'):
cdict[color].extend([(0.5*(1+p), c1, c2)
for (p, c1, c2) in orig_cdict[color]])
if name is None:
name = '%s_reversed' % cmap.name
return mp.colors.LinearSegmentedColormap(name, cdict, 512)
# Using a dict as a namespace, to micmic matplotlib's cm
_cm = dict(
cold_hot = _pigtailed_cmap(pl.cm.hot, name='cold_hot'),
brown_blue = _pigtailed_cmap(pl.cm.bone, name='brown_blue'),
cyan_copper = _pigtailed_cmap(pl.cm.copper, name='cyan_copper'),
cyan_orange = _pigtailed_cmap(pl.cm.YlOrBr_r, name='cyan_orange'),
blue_red = _pigtailed_cmap(pl.cm.Reds_r, name='blue_red'),
brown_cyan = _pigtailed_cmap(pl.cm.Blues_r, name='brown_cyan'),
purple_green = _pigtailed_cmap(pl.cm.Greens_r, name='purple_green',
swap_order=('red', 'blue', 'green')),
purple_blue = _pigtailed_cmap(pl.cm.Blues_r, name='purple_blue',
swap_order=('red', 'blue', 'green')),
blue_orange = _pigtailed_cmap(pl.cm.Oranges_r, name='blue_orange',
swap_order=('green', 'red', 'blue')),
black_blue = _rotate_cmap(pl.cm.hot, name='black_blue'),
black_purple = _rotate_cmap(pl.cm.hot, name='black_purple',
swap_order=('blue', 'red', 'green')),
black_pink = _rotate_cmap(pl.cm.hot, name='black_pink',
swap_order=('blue', 'green', 'red')),
black_green = _rotate_cmap(pl.cm.hot, name='black_green',
swap_order=('red', 'blue', 'green')),
black_red = pl.cm.hot,
)
_cm.update(pl.cm.datad)
class _CM(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__.update(self)
cm = _CM(**_cm)
################################################################################
# 2D plotting of activation maps
################################################################################
def plot_map_2d(map, sform, cut_coords, anat=None, anat_sform=None,
vmin=None, figure_num=None, axes=None, title='',
mask=None, **kwargs):
""" Plot three cuts of a given activation map (Frontal, Axial, and Lateral)
Parameters
----------
map : 3D ndarray
The activation map, as a 3D image.
sform : 4x4 ndarray
The affine matrix going from image voxel space to MNI space.
cut_coords: 3-tuple of floats
The MNI coordinates of the point where the cut is performed, in
MNI coordinates and order.
anat : 3D ndarray, optional or False
The anatomical image to be used as a background. If None, the
MNI152 T1 1mm template is used. If False, no anat is displayed.
anat_sform : 4x4 ndarray, optional
The affine matrix going from the anatomical image voxel space to
MNI space. This parameter is not used when the default
anatomical is used, but it is compulsory when using an
explicite anatomical image.
vmin : float, optional
The lower threshold of the positive activation. This
parameter is used to threshold the activation map.
figure_num : integer, optional
The number of the matplotlib figure used. If None is given, a
new figure is created.
axes : 4 tuple of float: (xmin, xmax, ymin, ymin), optional
The coordinates, in matplotlib figure space, of the axes
used to display the plot. If None, the complete figure is
used.
title : string, optional
The title dispayed on the figure.
mask : 3D ndarray, boolean, optional
The brain mask. If None, the mask is computed from the map.*
kwargs: extra keyword arguments, optional
Extra keyword arguments passed to pylab.imshow
Notes
-----
All the 3D arrays are in numpy convention: (x, y, z)
Cut coordinates are in Talairach coordinates. Warning: Talairach
coordinates are (y, x, z), if (x, y, z) are in voxel-ordering
convention.
"""
if anat is None:
anat, anat_sform, vmax_anat = _AnatCache.get_anat()
elif anat is not False:
vmax_anat = anat.max()
if mask is not None and (
np.all(mask) or np.all(np.logical_not(mask))):
mask = None
vmin_map = map.min()
vmax_map = map.max()
if vmin is not None and np.isfinite(vmin):
map = np.ma.masked_less(map, vmin)
elif mask is not None and not isinstance(map, np.ma.masked_array):
map = np.ma.masked_array(map, np.logical_not(mask))
vmin_map = map.min()
vmax_map = map.max()
if isinstance(map, np.ma.core.MaskedArray):
use_mask = False
if map._mask is False or np.all(np.logical_not(map._mask)):
map = np.asarray(map)
elif map._mask is True or np.all(map._mask):
map = np.asarray(map)
if use_mask and mask is not None:
map = np.ma.masked_array(map, np.logical_not(mask))
# Calculate the bounds
if anat is not False:
anat_bounds = np.zeros((4, 6))
anat_bounds[:3, -3:] = np.identity(3)*anat.shape
anat_bounds[-1, :] = 1
anat_bounds = np.dot(anat_sform, anat_bounds)
map_bounds = np.zeros((4, 6))
map_bounds[:3, -3:] = np.identity(3)*map.shape
map_bounds[-1, :] = 1
map_bounds = np.dot(sform, map_bounds)
# The coordinates of the center of the cut in different spaces.
y, x, z = cut_coords
x_map, y_map, z_map = [int(round(c)) for c in
coord_transform(x, y, z,
np.linalg.inv(sform))]
if anat is not False:
x_anat, y_anat, z_anat = [int(round(c)) for c in
coord_transform(x, y, z,
np.linalg.inv(anat_sform))]
fig = pl.figure(figure_num, figsize=(6.6, 2.6))
if axes is None:
axes = (0., 1., 0., 1.)
pl.clf()
ax_xmin, ax_xmax, ax_ymin, ax_ymax = axes
ax_width = ax_xmax - ax_xmin
ax_height = ax_ymax - ax_ymin
# Calculate the axes ratio size in a 'clever' way
if anat is not False:
shapes = np.array(anat.shape, 'f')
else:
shapes = np.array(map.shape, 'f')
shapes *= ax_width/shapes.sum()
###########################################################################
# Frontal
pl.axes([ax_xmin, ax_ymin, shapes[0], ax_height])
if anat is not False:
if y_anat < anat.shape[1]:
pl.imshow(np.rot90(anat[:, y_anat, :]),
cmap=pl.cm.gray,
vmin=-.5*vmax_anat,
vmax=vmax_anat,
extent=(anat_bounds[0, 3],
anat_bounds[0, 0],
anat_bounds[2, 0],
anat_bounds[2, 5]))
if y_map < map.shape[1]:
pl.imshow(np.rot90(map[:, y_map, :]),
vmin=vmin_map,
vmax=vmax_map,
extent=(map_bounds[0, 3],
map_bounds[0, 0],
map_bounds[2, 0],
map_bounds[2, 5]),
**kwargs)
pl.text(ax_xmin +shapes[0] + shapes[1] - 0.01, ax_ymin + 0.07, '%i' % x,
horizontalalignment='right',
verticalalignment='bottom',
transform=fig.transFigure)
xmin, xmax = pl.xlim()
ymin, ymax = pl.ylim()
pl.hlines(z, xmin, xmax, color=(.5, .5, .5))
pl.vlines(-x, ymin, ymax, color=(.5, .5, .5))
pl.axis('off')
###########################################################################
# Lateral
pl.axes([ax_xmin + shapes[0], ax_ymin, shapes[1], ax_height])
if anat is not False:
if x_anat < anat.shape[0]:
pl.imshow(np.rot90(anat[x_anat, ...]), cmap=pl.cm.gray,
vmin=-.5*vmax_anat,
vmax=vmax_anat,
extent=(anat_bounds[1, 0],
anat_bounds[1, 4],
anat_bounds[2, 0],
anat_bounds[2, 5]))
if x_map < map.shape[0]:
pl.imshow(np.rot90(map[x_map, ...]),
vmin=vmin_map,
vmax=vmax_map,
extent=(map_bounds[1, 0],
map_bounds[1, 4],
map_bounds[2, 0],
map_bounds[2, 5]),
**kwargs)
pl.text(ax_xmin + shapes[-1] - 0.01, ax_ymin + 0.07, '%i' % y,
horizontalalignment='right',
verticalalignment='bottom',
transform=fig.transFigure)
xmin, xmax = pl.xlim()
ymin, ymax = pl.ylim()
pl.hlines(z, xmin, xmax, color=(.5, .5, .5))
pl.vlines(y, ymin, ymax, color=(.5, .5, .5))
pl.axis('off')
###########################################################################
# Axial
pl.axes([ax_xmin + shapes[0] + shapes[1], ax_ymin, shapes[-1],
ax_height])
if anat is not False:
if z_anat < anat.shape[2]:
pl.imshow(np.rot90(anat[..., z_anat]),
cmap=pl.cm.gray,
vmin=-.5*vmax_anat,
vmax=vmax_anat,
extent=(anat_bounds[0, 0],
anat_bounds[0, 3],
anat_bounds[1, 0],
anat_bounds[1, 4]))
if z_map < map.shape[2]:
pl.imshow(np.rot90(map[..., z_map]),
vmin=vmin_map,
vmax=vmax_map,
extent=(map_bounds[0, 0],
map_bounds[0, 3],
map_bounds[1, 0],
map_bounds[1, 4]),
**kwargs)
pl.text(ax_xmax - 0.01, ax_ymin + 0.07, '%i' % z,
horizontalalignment='right',
verticalalignment='bottom',
transform=fig.transFigure)
xmin, xmax = pl.xlim()
ymin, ymax = pl.ylim()
pl.hlines(y, xmin, xmax, color=(.5, .5, .5))
pl.vlines(x, ymin, ymax, color=(.5, .5, .5))
pl.axis('off')
pl.text(ax_xmin + 0.01, ax_ymax - 0.01, title,
horizontalalignment='left',
verticalalignment='top',
transform=fig.transFigure)
pl.axis('off')
def demo_plot_map_2d():
map = np.zeros((182, 218, 182))
# Color a asymetric rectangle around Broadman area 26:
x, y, z = -6, -53, 9
x_map, y_map, z_map = coord_transform(x, y, z, mni_sform_inv)
map[x_map-30:x_map+30, y_map-3:y_map+3, z_map-10:z_map+10] = 1
map = np.ma.masked_less(map, 0.5)
plot_map_2d(map, mni_sform, cut_coords=(x, y, z),
figure_num=512)
def plot_map(map, sform, cut_coords, anat=None, anat_sform=None,
vmin=None, figure_num=None, title='', mask=None):
""" Plot a together a 3D volume rendering view of the activation, with an
outline of the brain, and 2D cuts. If Mayavi is not installed,
falls back to 2D views only.
Parameters
----------
map : 3D ndarray
The activation map, as a 3D image.
sform : 4x4 ndarray
The affine matrix going from image voxel space to MNI space.
cut_coords: 3-tuple of floats, optional
The MNI coordinates of the cut to perform, in MNI coordinates
and order. If None is given, the cut_coords are automaticaly
estimated.
anat : 3D ndarray, optional
The anatomical image to be used as a background. If None, the
MNI152 T1 1mm template is used.
anat_sform : 4x4 ndarray, optional
The affine matrix going from the anatomical image voxel space to
MNI space. This parameter is not used when the default
anatomical is used, but it is compulsory when using an
explicite anatomical image.
vmin : float, optional
The lower threshold of the positive activation. This
parameter is used to threshold the activation map.
figure_num : integer, optional
The number of the matplotlib and Mayavi figures used. If None is
given, a new figure is created.
title : string, optional
The title dispayed on the figure.
mask : 3D ndarray, boolean, optional
The brain mask. If None, the mask is computed from the map.
Notes
-----
All the 3D arrays are in numpy convention: (x, y, z)
Cut coordinates are in Talairach coordinates. Warning: Talairach
coordinates are (y, x, z), if (x, y, z) are in voxel-ordering
convention.
"""
try:
from enthought.mayavi import version
if not int(version.version[0]) > 2:
raise ImportError
except ImportError:
print >> sys.stderr, 'Mayavi > 3.x not installed, plotting only 2D'
return plot_map_2d(map, sform, cut_coords=cut_coords, anat=anat,
anat_sform=anat_sform, vmin=vmin,
title=title,
figure_num=figure_num, mask=mask)
from .maps_3d import plot_map_3d, m2screenshot
plot_map_3d(map, sform, cut_coords=cut_coords, anat=anat,
anat_sform=anat_sform, vmin=vmin,
figure_num=figure_num, mask=mask)
fig = pl.figure(figure_num, figsize=(10.6, 2.6))
ax = pl.axes((-0.01, 0, 0.3, 1))
m2screenshot(mpl_axes=ax)
plot_map_2d(map, sform, cut_coords=cut_coords, anat=anat,
anat_sform=anat_sform, vmin=vmin, mask=mask,
figure_num=fig.number, axes=(0.28, 1, 0, 1.), title=title)
def demo_plot_map():
map = np.zeros((182, 218, 182))
# Color a asymetric rectangle around Broadman area 26:
x, y, z = -6, -53, 9
x_map, y_map, z_map = coord_transform(x, y, z, mni_sform_inv)
map[x_map-30:x_map+30, y_map-3:y_map+3, z_map-10:z_map+10] = 1
plot_map(map, mni_sform, cut_coords=(x, y, z), vmin=0.5,
figure_num=512)
def auto_plot_map(map, sform, vmin=None, cut_coords=None, do3d=False,
anat=None, anat_sform=None, title='',
figure_num=None, mask=None, auto_sign=True):
""" Automatic plotting of an activation map.
Plot a together a 3D volume rendering view of the activation, with an
outline of the brain, and 2D cuts. If Mayavi is not installed,
falls back to 2D views only.
Parameters
----------
map : 3D ndarray
The activation map, as a 3D image.
sform : 4x4 ndarray
The affine matrix going from image voxel space to MNI space.
vmin : float, optional
The lower threshold of the positive activation. This
parameter is used to threshold the activation map.
cut_coords: 3-tuple of floats, optional
The MNI coordinates of the point where the cut is performed, in
MNI coordinates and order. If None is given, the cut_coords are
automaticaly estimated.
do3d : boolean, optional
If do3d is True, a 3D plot is created if Mayavi is installed.
anat : 3D ndarray, optional
The anatomical image to be used as a background. If None, the
MNI152 T1 1mm template is used.
anat_sform : 4x4 ndarray, optional
The affine matrix going from the anatomical image voxel space to
MNI space. This parameter is not used when the default
anatomical is used, but it is compulsory when using an
explicite anatomical image.
title : string, optional
The title dispayed on the figure.
figure_num : integer, optional
The number of the matplotlib and Mayavi figures used. If None is
given, a new figure is created.
mask : 3D ndarray, boolean, optional
The brain mask. If None, the mask is computed from the map.
auto_sign : boolean, optional
If auto_sign is True, the sign of the activation is
automaticaly computed: negative activation can thus be
plotted.
Returns
-------
vmin : float
The lower threshold of the activation used.
cut_coords : 3-tuple of floats
The Talairach coordinates of the cut performed for the 2D
view.
Notes
-----
All the 3D arrays are in numpy convention: (x, y, z)
Cut coordinates are in Talairach coordinates. Warning: Talairach
coordinates are (y, x, z), if (x, y, z) are in voxel-ordering
convention.
"""
if do3d:
if do3d == 'offscreen':
try:
from enthought.mayavi import mlab
mlab.options.offscreen = True
except:
pass
plotter = plot_map
else:
plotter = plot_map_2d
if mask is None:
mask = compute_mask(map)
if vmin is None:
vmin = np.inf
pvalue = 0.04
while not np.isfinite(vmin):
pvalue *= 1.25
vmax, vmin = find_activation(map, mask=mask, pvalue=pvalue)
if not np.isfinite(vmin) and auto_sign:
if np.isfinite(vmax):
vmin = -vmax
if mask is not None:
map[mask] *= -1
else:
map *= -1
if cut_coords is None:
x, y, z = find_cut_coords(map, activation_threshold=vmin)
# XXX: Careful with Voxel/MNI ordering
y, x, z = coord_transform(x, y, z, sform)
cut_coords = (x, y, z)
plotter(map, sform, vmin=vmin, cut_coords=cut_coords,
anat=anat, anat_sform=anat_sform, title=title,
figure_num=figure_num, mask=mask)
return vmin, cut_coords
def plot_niftifile(filename, outputname=None, do3d=False, vmin=None,
cut_coords=None, anat_filename=None, figure_num=None,
mask_filename=None, auto_sign=True):
""" Given a nifti filename, plot a view of it to a file (png by
default).
Parameters
----------
filename : string
The name of the Nifti file of the map to be plotted
outputname : string, optional
The file name of the output file created. By default
the name of the input file with a png extension is used.
do3d : boolean, optional
If do3d is True, a 3D plot is created if Mayavi is installed.
vmin : float, optional
The lower threshold of the positive activation. This
parameter is used to threshold the activation map.
cut_coords: 3-tuple of floats, optional
The MNI coordinates of the point where the cut is performed, in
MNI coordinates and order. If None is given, the cut_coords are
automaticaly estimated.
anat : string, optional
Name of the Nifti image file to be used as a background. If None,
the MNI152 T1 1mm template is used.
title : string, optional
The title dispayed on the figure.
figure_num : integer, optional
The number of the matplotlib and Mayavi figures used. If None is
given, a new figure is created.
mask_filename : string, optional
Name of the Nifti file to be used as brain mask. If None, the
mask is computed from the map.
auto_sign : boolean, optional
If auto_sign is True, the sign of the activation is
automaticaly computed: negative activation can thus be
plotted.
Notes
-----
Cut coordinates are in Talairach coordinates. Warning: Talairach
coordinates are (y, x, z), if (x, y, z) are in voxel-ordering
convention.
"""
if outputname is None:
outputname = os.path.splitext(filename)[0] + '.png'
if not os.path.exists(filename):
raise OSError, 'File %s does not exist' % filename
nim = load(filename)
sform = nim.get_affine()
if any(np.linalg.eigvals(sform)==0):
raise SformError, "sform affine is not inversible"
if anat_filename is not None:
anat_im = load(anat_filename)
anat = anat_im.data
anat_sform = anat_im.get_affine()
else:
anat = None
anat_sform = None
if mask_filename is not None:
mask_im = load(mask_filename)
mask = mask_im.data.astype(np.bool)
if not np.allclose(mask_im.get_affine(), sform):
raise SformError, 'Mask does not have same sform as image'
if not np.allclose(mask.shape, nim.data.shape[:3]):
raise NiftiIndexError, 'Mask does not have same shape as image'
else:
mask = None
output_files = list()
if nim.data.ndim == 3:
map = nim.data.T
auto_plot_map(map, sform, vmin=vmin, cut_coords=cut_coords,
do3d=do3d, anat=anat, anat_sform=anat_sform, mask=mask,
title=os.path.basename(filename), figure_num=figure_num,
auto_sign=auto_sign)
pl.savefig(outputname)
output_files.append(outputname)
elif nim.data.ndim == 4:
outputname, outputext = os.path.splitext(outputname)
if len(nim.data) < 10:
fmt = '%s_%i%s'
elif len(nim.data) < 100:
fmt = '%s_%02i%s'
elif len(nim.data) < 1000:
fmt = '%s_%03i%s'
else:
fmt = '%s_%04i%s'
if mask is None:
mask = compute_mask(nim.data.mean(axis=0)).T
for index, data in enumerate(nim.data):
map = data.T
auto_plot_map(map, sform, vmin=vmin, cut_coords=cut_coords,
do3d=do3d, anat=anat, anat_sform=anat_sform,
title='%s, %i' % (os.path.basename(filename), index),
figure_num=figure_num, mask=mask, auto_sign=auto_sign)
this_outputname = fmt % (outputname, index, outputext)
pl.savefig(this_outputname)
pl.clf()
output_files.append(this_outputname)
else:
raise NiftiIndexError, 'File %s: incorrect number of dimensions'
return output_files
|
bsd-3-clause
| -1,009,499,948,434,668,400 | 39.13522 | 80 | 0.53091 | false |
svebk/DeepSentiBank_memex
|
workflows/transform-ht-images-table-to-transition/transform-ht-images-table-to-transition.py
|
1
|
5269
|
import json
from datetime import datetime
import happybase
from pyspark import SparkContext, SparkConf
TTransportException = happybase._thriftpy.transport.TTransportException
def get_create_table(table_name, conn, families={'info': dict()}):
try:
# what exception would be raised if table does not exist, actually none.
# need to try to access families to get error
table = conn.table(table_name)
# this would fail if table does not exist
_ = table.families()
return table
except Exception as inst:
# TODO: act differently based on error type (connection issue or actually table missing)
if type(inst) == TTransportException:
raise inst
else:
print "[get_create_table: info] table {} does not exist (yet): {}{}".format(table_name, type(inst),
inst)
conn.create_table(table_name, families)
table = conn.table(table_name)
print "[get_create_table: info] created table {}".format(table_name)
return table
def get_list_value(json_x, field_tuple):
return [x["value"] for x in json_x if x["columnFamily"]==field_tuple[0] and x["qualifier"]==field_tuple[1]]
def get_list_qualifier_value(json_x, column_family):
return [(x["qualifier"], x["value"]) for x in json_x if x["columnFamily"] == column_family]
def get_ads_ids(json_x, key):
ads_ids = []
try:
ads_ids_str_list = get_list_value(json_x, ("info", "all_parent_ids"))
if ads_ids_str_list:
ads_ids = ads_ids_str_list[0].strip().split(',')
except Exception as inst:
print "[Error] could not get ads ids for row {}. {}".format(key, inst)
return ads_ids
def get_s3url(json_x, key):
s3url = None
try:
s3url_list = get_list_value(json_x, ("info", "s3_url"))
if s3url_list:
s3url = s3url_list[0].strip()
except Exception as inst:
print "[Error] could not get s3url for row {}. {}".format(key, inst)
return s3url
def get_sbimage_feat(json_x, key):
sb_feat = None
try:
sb_feat_list = get_list_value(json_x, ("info", "featnorm_cu"))
if sb_feat_list:
sb_feat = sb_feat_list[0]
except Exception as inst:
print "[Error] could not get sb_feat for row {}. {}".format(key, inst)
return sb_feat
def get_dlibface_feat(json_x, key):
dlibface_feat_list = []
try:
dlibface_feat_list = get_list_qualifier_value(json_x, "face")
if dlibface_feat_list:
# parse?
pass
except Exception as inst:
print "[Error] could not get sha1 for row {}. {}".format(key, inst)
return dlibface_feat_list
def transform(data):
key = data[0]
print key
json_x = [json.loads(x) for x in data[1].split("\n")]
fields = []
# Get ads ids
ads_ids = get_ads_ids(json_x, key)
#print "ads_ids",ads_ids
if ads_ids:
for ad_id in ads_ids:
# build timestamp as we would get it from CDR or Kafka topic...
ts = datetime.utcnow().isoformat()+'Z'
fields.append((key, [key, "ad", str(ad_id), str(ts)]))
else: # old image that cannot be linked to any ad...
return []
# Get s3url
s3_url = get_s3url(json_x, key)
#print "s3_url",s3_url
if s3_url:
fields.append((key, [key, "info", "s3_url", s3_url]))
else: # consider an image without s3_url is invalid
return []
# Get image feature
sb_feat = get_sbimage_feat(json_x, key)
if sb_feat:
fields.append((key, [key, "ext", "sbcmdline_feat_full_image", sb_feat]))
# Get face feature
dlibface_feat_list = get_dlibface_feat(json_x, key)
# format here would be a list of tuple (dlib_feat_dlib_face_64_31_108_74, featB64)
if dlibface_feat_list:
for dlibface_feat_id, dlibface_feat_value in dlibface_feat_list:
# Should we add a fake score to dlibface_feat_id?
fields.append((key, [key, "ext", dlibface_feat_id, dlibface_feat_value]))
#print key, fields
return fields
def transform_table(hbase_man_in, hbase_man_out):
in_rdd = hbase_man_in.read_hbase_table()
out_rdd = in_rdd.flatMap(lambda x: transform(x))
hbase_man_out.rdd2hbase(out_rdd)
if __name__ == '__main__':
from hbase_manager import HbaseManager
job_conf = json.load(open("job_conf.json","rt"))
print job_conf
tab_name_in = job_conf["tab_name_in"]
tab_name_out = job_conf["tab_name_out"]
# Try to create "tab_name_out"
tab_out_families = job_conf["tab_out_families"]
hbase_conn_timeout = None
nb_threads = 1
pool = happybase.ConnectionPool(size=nb_threads, host='10.1.94.57', timeout=hbase_conn_timeout)
with pool.connection() as conn:
get_create_table(tab_name_out, conn, tab_out_families)
hbase_host = job_conf["hbase_host"]
sc = SparkContext(appName='transform_'+tab_name_in+'_to_'+tab_name_out)
sc.setLogLevel("ERROR")
conf = SparkConf()
hbase_man_in = HbaseManager(sc, conf, hbase_host, tab_name_in)
hbase_man_out = HbaseManager(sc, conf, hbase_host, tab_name_out)
transform_table(hbase_man_in, hbase_man_out)
|
bsd-2-clause
| -8,967,046,424,435,025,000 | 36.906475 | 111 | 0.609224 | false |
fzimmermann89/pyload
|
module/plugins/crypter/SexuriaCom.py
|
1
|
5089
|
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.Crypter import Crypter, create_getInfo
class SexuriaCom(Crypter):
__name__ = "SexuriaCom"
__type__ = "crypter"
__version__ = "0.11"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?sexuria\.com/(v1/)?(Pornos_Kostenlos_.+?_(\d+)\.html|dl_links_\d+_\d+\.html|id=\d+\&part=\d+\&link=\d+)'
__config__ = [("activated" , "bool", "Activated" , True),
("use_subfolder" , "bool", "Save package to subfolder" , True),
("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
__description__ = """Sexuria.com decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("NETHead", "NETHead.AT.gmx.DOT.net")]
#: Constants
PATTERN_SUPPORTED_MAIN = r'http://(www\.)?sexuria\.com/(v1/)?Pornos_Kostenlos_.+?_(\d+)\.html'
PATTERN_SUPPORTED_CRYPT = r'http://(www\.)?sexuria\.com/(v1/)?dl_links_\d+_(?P<ID>\d+)\.html'
PATTERN_SUPPORTED_REDIRECT = r'http://(www\.)?sexuria\.com/out\.php\?id=(?P<ID>\d+)\&part=\d+\&link=\d+'
PATTERN_TITLE = r'<title> - (?P<TITLE>.*) Sexuria - Kostenlose Pornos - Rapidshare XXX Porn</title>'
PATTERN_PASSWORD = r'<strong>Passwort: </strong></div></td>.*?bgcolor="#EFEFEF">(?P<PWD>.*?)</td>'
PATTERN_DL_LINK_PAGE = r'"(dl_links_\d+_\d+\.html)"'
PATTERN_REDIRECT_LINKS = r'value="(http://sexuria\.com/out\.php\?id=\d+\&part=\d+\&link=\d+)" readonly'
LIST_PWDIGNORE = ["Kein Passwort", "-"]
def decrypt(self, pyfile):
#: Init
self.pyfile = pyfile
self.package = pyfile.package()
#: Decrypt and add links
package_name, self.links, folder_name, package_pwd = self.decrypt_links(self.pyfile.url)
if package_pwd:
self.pyfile.package().password = package_pwd
self.packages = [(package_name, self.links, folder_name)]
def decrypt_links(self, url):
linklist = []
name = self.package.name
folder = self.package.folder
password = None
if re.match(self.PATTERN_SUPPORTED_MAIN, url, re.I):
#: Processing main page
html = self.load(url)
links = re.findall(self.PATTERN_DL_LINK_PAGE, html, re.I)
for link in links:
linklist.append("http://sexuria.com/v1/" + link)
elif re.match(self.PATTERN_SUPPORTED_REDIRECT, url, re.I):
#: Processing direct redirect link (out.php), redirecting to main page
id = re.search(self.PATTERN_SUPPORTED_REDIRECT, url, re.I).group('ID')
if id:
linklist.append("http://sexuria.com/v1/Pornos_Kostenlos_liebe_%s.html" % id)
elif re.match(self.PATTERN_SUPPORTED_CRYPT, url, re.I):
#: Extract info from main file
id = re.search(self.PATTERN_SUPPORTED_CRYPT, url, re.I).group('ID')
html = self.load("http://sexuria.com/v1/Pornos_Kostenlos_info_%s.html" % id)
#: Webpage title / Package name
titledata = re.search(self.PATTERN_TITLE, html, re.I)
if not titledata:
self.log_warning("No title data found, has site changed?")
else:
title = titledata.group('TITLE').strip()
if title:
name = folder = title
self.log_debug("Package info found, name [%s] and folder [%s]" % (name, folder))
#: Password
pwddata = re.search(self.PATTERN_PASSWORD, html, re.I | re.S)
if not pwddata:
self.log_warning("No password data found, has site changed?")
else:
pwd = pwddata.group('PWD').strip()
if pwd and not (pwd in self.LIST_PWDIGNORE):
password = pwd
self.log_debug("Package info found, password [%s]" % password)
#: Process links (dl_link)
html = self.load(url)
links = re.findall(self.PATTERN_REDIRECT_LINKS, html, re.I)
if not links:
self.log_error(_("Broken for link: %s") % link)
else:
for link in links:
link = link.replace("http://sexuria.com/", "http://www.sexuria.com/")
finallink = self.load(link, just_header=True)['location']
if not finallink or ("sexuria.com/" in finallink):
self.log_error(_("Broken for link: %s") % link)
else:
linklist.append(finallink)
#: Log result
if not linklist:
self.fail(_("Unable to extract links (maybe plugin out of date?)"))
else:
for i, link in enumerate(linklist):
self.log_debug("Supported link %d/%d: %s" % (i+1, len(linklist), link))
#: All done, return to caller
return name, linklist, folder, password
getInfo = create_getInfo(SexuriaCom)
|
gpl-3.0
| 2,629,648,724,651,690,000 | 44.4375 | 141 | 0.538613 | false |
ptdtan/Ragout
|
ragout_api.py
|
1
|
10365
|
"""
Ragout API interface for ancestral genome reconstruction
"""
import os
import sys
import shutil
import logging
import argparse
from collections import namedtuple
from copy import deepcopy
ragout_root = os.path.dirname(os.path.realpath(__file__))
lib_absolute = os.path.join(ragout_root, "lib")
sys.path.insert(0, lib_absolute)
sys.path.insert(0, ragout_root)
os.environ["PATH"] = lib_absolute + os.pathsep + os.environ["PATH"]
import ragout.assembly_graph.assembly_refine as asref
import ragout.scaffolder.scaffolder as scfldr
import ragout.scaffolder.merge_iters as merge
import ragout.maf2synteny.maf2synteny as m2s
import ragout.overlap.overlap as overlap
import ragout.shared.config as config
from ragout.scaffolder.output_generator import OutputGenerator
from ragout.overlap.overlap import OverlapException
from ragout.phylogeny.phylogeny import Phylogeny, PhyloException
from ragout.breakpoint_graph.permutation import (PermutationContainer,
PermException)
from ragout.synteny_backend.synteny_backend import (SyntenyBackend,
BackendException)
from ragout.parsers.recipe_parser import parse_ragout_recipe, RecipeException, _make_dummy_recipe
from ragout.parsers.fasta_parser import read_fasta_dict, FastaError
from ragout.shared.debug import DebugConfig
from ragout.shared.datatypes import (Permutation, Block, Contig, Scaffold, Link)
from ragout.breakpoint_graph.breakpoint_graph import BreakpointGraph
from ragout.breakpoint_graph.inferer import AdjacencyInferer
from ragout.breakpoint_graph.chimera_detector import ChimeraDetector
from ragout.breakpoint_graph.chimera_detector_ancestor import ChimeraDetector4Ancestor
from ragout.phylogeny.phylogeny import *
from ragout.__version__ import __version__
import ragout.synteny_backend.maf
RunStage = namedtuple("RunStage", ["name", "block_size", "ref_indels",
"repeats", "rearrange"])
class RagoutInstance(object):
"""
Raogut instance for handling reconstruction methods
"""
def __init__(self,maf, references, ancestor, ancestor_fasta,
threads=4,
phyloStr=None,
outDir="ragout-out",
scale="large",
tmpDir="tmp",
outLog="ragout-log.txt",
backend="maf",
is_overwrite = False,
is_debug=False,
is_resolve_repeats=False,
is_solid_scaffolds=False):
self.maf = maf
self.ancestor = ancestor
self.ancestor_seqs = read_fasta_dict(ancestor_fasta)
self.references = references
self.target = references[0]
self.phyloStr = phyloStr
self.scale = scale
self.debug = is_debug
self.outDir = outDir
self.backend = SyntenyBackend.backends[backend]
self.overwrite = is_overwrite
self.threads = threads
if not tmpDir:
self.tmpDir = os.path.join(outDir, "tmp")
else:
self.tmpDir = tmpDir
self.phyloStr = phyloStr
self.logger = enable_logging(outLog, is_debug)
self.debugger = DebugConfig.get_instance()
self.is_solid_scaffolds = is_solid_scaffolds
self.is_resolve_repeats = is_resolve_repeats
if not os.path.isdir(self.outDir):
os.mkdir(self.outDir)
if not os.path.isdir(self.tmpDir):
os.mkdir(self.tmpDir)
self.debug_root = self._set_debugging()
self._set_exe_paths()
self._check_extern_modules(backend)
self.phylogeny, self.naming_ref = self._get_phylogeny_and_naming_ref()
self.synteny_blocks = config.vals["blocks"][self.scale]
self.dummy_recipe = _make_dummy_recipe(self.references, self.target, self.ancestor, self.phyloStr, self.scale, self.maf, self.naming_ref)
self.perm_files = self._make_permutaion_files()
self.run_stages = self.make_run_stages()
self.phylo_perm_file = self.perm_files[self.synteny_blocks[-1]]
self._make_stage_perms()
def _construct_ancestor(self):
###Enable ChimeraDetector4Ancestor
if not self.is_solid_scaffolds:
raw_bp_graphs = {}
for stage in self.run_stages:
raw_bp_graphs[stage] = BreakpointGraph(self.stage_perms[stage], ancestor=self.ancestor, ancestral=True)
chim_detect = ChimeraDetector4Ancestor(raw_bp_graphs, self.run_stages, self.ancestor_seqs)
prev_stages = []
scaffolds = None
###apply for all stages
last_stage = self.run_stages[-1]
for stage in self.run_stages:
logger.info("Stage \"{0}\"".format(stage.name))
#debugger.set_debug_dir(os.path.join(debug_root, stage.name))
prev_stages.append(stage)
if not self.is_solid_scaffolds:
broken_perms = chim_detect.break_contigs(self.stage_perms[stage], [stage])
else:
broken_perms = self.stage_perms[stage]
breakpoint_graph = BreakpointGraph(broken_perms, ancestral=True, ancestor=self.ancestor)
adj_inferer = AdjacencyInferer(breakpoint_graph, self.phylogeny, ancestral= True)
adjacencies = adj_inferer.infer_adjacencies()
cur_scaffolds = scfldr.build_scaffolds(adjacencies, broken_perms, ancestral=True)
if scaffolds is not None:
if not self.is_solid_scaffolds:
merging_perms = chim_detect.break_contigs(self.stage_perms[stage],
prev_stages)
else:
merging_perms = self.stage_perms[stage]
scaffolds = merge.merge_scaffolds(scaffolds, cur_scaffolds,
merging_perms, stage.rearrange, ancestral=True)
else:
scaffolds = cur_scaffolds
scfldr.assign_scaffold_names(scaffolds, self.stage_perms[last_stage], self.naming_ref)
###output generating of ancestor scaffolds
logger.info("Done scaffolding for ''{0}''".format(self.ancestor))
out_gen = OutputGenerator(self.ancestor_seqs, scaffolds)
out_gen.make_output(self.outDir, self.ancestor, write_fasta=False)
def _set_debugging(self):
if not os.path.isdir(self.outDir):
os.mkdir(self.outDir)
if not os.path.isdir(self.tmpDir):
os.mkdir(self.tmpDir)
debug_root = os.path.join(self.outDir, "debug")
self.debugger.set_debugging(self.debug)
self.debugger.set_debug_dir(debug_root)
self.debugger.clear_debug_dir()
return debug_root
def _set_exe_paths(self, LIB_DIR="lib"):
ragout_root = os.path.dirname(os.path.realpath(__file__))
lib_absolute = os.path.join(ragout_root, LIB_DIR)
sys.path.insert(0, lib_absolute)
sys.path.insert(0, ragout_root)
os.environ["PATH"] = lib_absolute + os.pathsep + os.environ["PATH"]
pass
def _check_extern_modules(self, backend):
"""
Checks if all necessary native modules are available
"""
if not m2s.check_binary():
raise BackendException("maf2synteny binary is missing, "
"did you run 'make'?")
if not overlap.check_binary():
raise BackendException("overlap binary is missing, "
"did you run 'make'?")
pass
def _get_phylogeny_and_naming_ref(self):
"""
Retrieves phylogeny (infers if necessary) as well as
naming reference genome
"""
if self.phyloStr:
logger.info("Phylogeny is taken from parameters")
phylogeny = Phylogeny.from_newick(self.phyloStr)
else:
raise Exception("Phylogeny tree must be supplied!")
logger.info(phylogeny.tree_string)
leaves_sorted = phylogeny.nodes_by_distance(self.target, onlyLeaves=True)
naming_ref = leaves_sorted[0]
logger.info("'{0}' is chosen as a naming reference".format(naming_ref))
return phylogeny, naming_ref
def _make_permutaion_files(self):
return self.backend.make_permutations(self.dummy_recipe, self.synteny_blocks, self.outDir,
self.overwrite, self.threads)
def _make_stage_perms(self):
self.stage_perms = {}
for stage in self.run_stages:
self.debugger.set_debug_dir(os.path.join(self.debug_root, stage.name))
self.stage_perms[stage]= PermutationContainer(self.perm_files[stage.block_size],
self.dummy_recipe, stage.repeats,
stage.ref_indels, self.phylogeny)
pass
def make_run_stages(self):
"""
Setting parameters of run stages
"""
stages = []
for block in self.synteny_blocks:
stages.append(RunStage(name=str(block), block_size=block,
ref_indels=False, repeats=False,
rearrange=True))
stages.append(RunStage(name="refine", block_size=self.synteny_blocks[-1],
ref_indels=False, repeats=self.is_resolve_repeats,
rearrange=False))
return stages
def enable_logging(log_file, debug):
"""
Turns on logging, sets debug levels and assigns a log file
"""
logger = logging.getLogger()
log_formatter = logging.Formatter("[%(asctime)s] %(name)s: %(levelname)s: "
"%(message)s", "%H:%M:%S")
console_formatter = logging.Formatter("[%(asctime)s] %(levelname)s: "
"%(message)s", "%H:%M:%S")
console_log = logging.StreamHandler()
console_log.setFormatter(console_formatter)
if not debug:
console_log.setLevel(logging.INFO)
file_handler = logging.FileHandler(log_file, mode="w")
file_handler.setFormatter(log_formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(console_log)
logger.addHandler(file_handler)
return logger
|
gpl-3.0
| 776,794,839,997,108,000 | 41.306122 | 145 | 0.612349 | false |
googleads/google-ads-python
|
google/ads/googleads/v8/errors/types/resource_count_limit_exceeded_error.py
|
1
|
1486
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.errors",
marshal="google.ads.googleads.v8",
manifest={"ResourceCountLimitExceededErrorEnum",},
)
class ResourceCountLimitExceededErrorEnum(proto.Message):
r"""Container for enum describing possible resource count limit
exceeded errors.
"""
class ResourceCountLimitExceededError(proto.Enum):
r"""Enum describing possible resource count limit exceeded
errors.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ACCOUNT_LIMIT = 2
CAMPAIGN_LIMIT = 3
ADGROUP_LIMIT = 4
AD_GROUP_AD_LIMIT = 5
AD_GROUP_CRITERION_LIMIT = 6
SHARED_SET_LIMIT = 7
MATCHING_FUNCTION_LIMIT = 8
RESPONSE_ROW_LIMIT_EXCEEDED = 9
RESOURCE_LIMIT = 10
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| -3,917,095,316,855,932,000 | 29.958333 | 74 | 0.682369 | false |
LivingOn/xbmc-plugin.image.nachtschicht-bhv
|
addon.py
|
1
|
2657
|
# -*- coding=utf8 -*-
#******************************************************************************
# addon.py
#------------------------------------------------------------------------------
#
# Copyright (c) 2013 LivingOn <LivingOn@xmail.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#******************************************************************************
import sys
import urllib
import xbmcgui
import xbmcaddon
import xbmcplugin
from resources.lib.NachtschichtBhvParser import NachtschichtBhvParser
class NachtschichtBhv(object):
PLUGIN_NAME = "plugin.image.nachtschicht-bhv"
_plugin_id = None
_addon = None
def __init__(self):
self._register_addon()
self._process_request()
def _register_addon(self):
self._plugin_id = int(sys.argv[1])
self._addon = xbmcaddon.Addon(id = self.PLUGIN_NAME)
def _process_request(self):
if not sys.argv[2]:
self._create_gallery_list()
else:
self._create_picture_list(sys.argv[2])
def _create_gallery_list(self):
liste = NachtschichtBhvParser.get_gallery_list()
items = []
for (url, title, thumb) in liste:
url = sys.argv[0] + "?" + urllib.urlencode({'url' : url})
items.append((url, xbmcgui.ListItem(title, iconImage=thumb), True,))
xbmcplugin.addDirectoryItems(self._plugin_id, items)
xbmcplugin.endOfDirectory(self._plugin_id, cacheToDisc=True)
def _create_picture_list(self, url):
count = 1
items = []
url = urllib.unquote(url[5:])
for (url, thumb) in NachtschichtBhvParser.get_picture_list(url):
items.append((url, xbmcgui.ListItem("-%d-" % count, iconImage=thumb), False,))
count += 1
xbmcplugin.addDirectoryItems(self._plugin_id, items)
xbmcplugin.endOfDirectory(self._plugin_id, cacheToDisc=False)
if __name__ == "__main__":
NachtschichtBhv()
|
gpl-2.0
| -6,851,067,713,381,325,000 | 35.916667 | 90 | 0.590892 | false |
geodynamics/pylith
|
pylith/friction/obsolete/RateStateAgeing.py
|
1
|
3306
|
#!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file pylith/friction/RateStateAgeing.py
##
## @brief Python object implementing Rate and State with Ageing Law.
##
## Factory: friction_model.
from FrictionModel import FrictionModel
from friction import RateStateAgeing as ModuleRateStateAgeing
# RateStateAgeing class
class RateStateAgeing(FrictionModel, ModuleRateStateAgeing):
"""
Python object implementing Rate and State with Ageing Law.
Factory: friction_model.
"""
# INVENTORY //////////////////////////////////////////////////////////
class Inventory(FrictionModel.Inventory):
"""
Python object for managing RateStateAgeing facilities and properties.
"""
## @class Inventory
## Python object for managing RateStateAgeing facilities and properties.
##
## \b Properties
## @li \b linear_slip_rate Nondimensional slip rate below which friction
## varies linearly with slip rate.
##
## \b Facilities
## @li None
import pyre.inventory
linearSlipRate = pyre.inventory.float("linear_slip_rate", default=1.0e-12,
validator=pyre.inventory.greaterEqual(0.0))
linearSlipRate.meta['tip'] = "Nondimensional slip rate below which friction " \
"varies linearly with slip rate."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="ratestateageing"):
"""
Constructor.
"""
FrictionModel.__init__(self, name)
self.availableFields = \
{'vertex': \
{'info': ["reference_friction_coefficient",
"reference_slip_rate",
"characteristic_slip_distance",
"constitutive_parameter_a",
"constitutive_parameter_b",
"cohesion"],
'data': ["state_variable"]},
'cell': \
{'info': [],
'data': []}}
self._loggingPrefix = "FrRSAg "
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Setup members using inventory.
"""
try:
FrictionModel._configure(self)
ModuleRateStateAgeing.linearSlipRate(self, self.inventory.linearSlipRate)
except ValueError, err:
aliases = ", ".join(self.aliases)
raise ValueError("Error while configuring friction model "
"(%s):\n%s" % (aliases, err.message))
return
def _createModuleObj(self):
"""
Call constructor for module object for access to C++ object.
"""
ModuleRateStateAgeing.__init__(self)
return
# FACTORIES ////////////////////////////////////////////////////////////
def friction_model():
"""
Factory associated with RateStateAgeing.
"""
return RateStateAgeing()
# End of file
|
mit
| 235,920,559,458,479,170 | 27.5 | 85 | 0.562311 | false |
apporc/nova
|
nova/compute/rpcapi.py
|
1
|
45182
|
# Copyright 2013 Red Hat, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the compute RPC API.
"""
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from nova import context
from nova import exception
from nova.i18n import _, _LI, _LE
from nova import objects
from nova.objects import base as objects_base
from nova.objects import service as service_obj
from nova import rpc
rpcapi_opts = [
cfg.StrOpt('compute_topic',
default='compute',
help='The topic compute nodes listen on'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
rpcapi_cap_opt = cfg.StrOpt('compute',
help='Set a version cap for messages sent to compute services. If you '
'plan to do a live upgrade from an old version to a newer '
'version, you should set this option to the old version before '
'beginning the live upgrade procedure. Only upgrading to the '
'next version is supported, so you cannot skip a release for '
'the live upgrade procedure.')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
LOG = logging.getLogger(__name__)
def _compute_host(host, instance):
'''Get the destination host for a message.
:param host: explicit host to send the message to.
:param instance: If an explicit host was not specified, use
instance['host']
:returns: A host
'''
if host:
return host
if not instance:
raise exception.NovaException(_('No compute host specified'))
if not instance.host:
raise exception.NovaException(_('Unable to find host for '
'Instance %s') % instance.uuid)
return instance.host
class ComputeAPI(object):
'''Client side of the compute rpc API.
API version history:
* 1.0 - Initial version.
* 1.1 - Adds get_host_uptime()
* 1.2 - Adds check_can_live_migrate_[destination|source]
* 1.3 - Adds change_instance_metadata()
* 1.4 - Remove instance_uuid, add instance argument to
reboot_instance()
* 1.5 - Remove instance_uuid, add instance argument to
pause_instance(), unpause_instance()
* 1.6 - Remove instance_uuid, add instance argument to
suspend_instance()
* 1.7 - Remove instance_uuid, add instance argument to
get_console_output()
* 1.8 - Remove instance_uuid, add instance argument to
add_fixed_ip_to_instance()
* 1.9 - Remove instance_uuid, add instance argument to attach_volume()
* 1.10 - Remove instance_id, add instance argument to
check_can_live_migrate_destination()
* 1.11 - Remove instance_id, add instance argument to
check_can_live_migrate_source()
* 1.12 - Remove instance_uuid, add instance argument to
confirm_resize()
* 1.13 - Remove instance_uuid, add instance argument to detach_volume()
* 1.14 - Remove instance_uuid, add instance argument to finish_resize()
* 1.15 - Remove instance_uuid, add instance argument to
finish_revert_resize()
* 1.16 - Remove instance_uuid, add instance argument to
get_diagnostics()
* 1.17 - Remove instance_uuid, add instance argument to
get_vnc_console()
* 1.18 - Remove instance_uuid, add instance argument to inject_file()
* 1.19 - Remove instance_uuid, add instance argument to
inject_network_info()
* 1.20 - Remove instance_id, add instance argument to
post_live_migration_at_destination()
* 1.21 - Remove instance_uuid, add instance argument to
power_off_instance() and stop_instance()
* 1.22 - Remove instance_uuid, add instance argument to
power_on_instance() and start_instance()
* 1.23 - Remove instance_id, add instance argument to
pre_live_migration()
* 1.24 - Remove instance_uuid, add instance argument to
rebuild_instance()
* 1.25 - Remove instance_uuid, add instance argument to
remove_fixed_ip_from_instance()
* 1.26 - Remove instance_id, add instance argument to
remove_volume_connection()
* 1.27 - Remove instance_uuid, add instance argument to
rescue_instance()
* 1.28 - Remove instance_uuid, add instance argument to reset_network()
* 1.29 - Remove instance_uuid, add instance argument to
resize_instance()
* 1.30 - Remove instance_uuid, add instance argument to
resume_instance()
* 1.31 - Remove instance_uuid, add instance argument to revert_resize()
* 1.32 - Remove instance_id, add instance argument to
rollback_live_migration_at_destination()
* 1.33 - Remove instance_uuid, add instance argument to
set_admin_password()
* 1.34 - Remove instance_uuid, add instance argument to
snapshot_instance()
* 1.35 - Remove instance_uuid, add instance argument to
unrescue_instance()
* 1.36 - Remove instance_uuid, add instance argument to
change_instance_metadata()
* 1.37 - Remove instance_uuid, add instance argument to
terminate_instance()
* 1.38 - Changes to prep_resize():
* remove instance_uuid, add instance
* remove instance_type_id, add instance_type
* remove topic, it was unused
* 1.39 - Remove instance_uuid, add instance argument to run_instance()
* 1.40 - Remove instance_id, add instance argument to live_migration()
* 1.41 - Adds refresh_instance_security_rules()
* 1.42 - Add reservations arg to prep_resize(), resize_instance(),
finish_resize(), confirm_resize(), revert_resize() and
finish_revert_resize()
* 1.43 - Add migrate_data to live_migration()
* 1.44 - Adds reserve_block_device_name()
* 2.0 - Remove 1.x backwards compat
* 2.1 - Adds orig_sys_metadata to rebuild_instance()
* 2.2 - Adds slave_info parameter to add_aggregate_host() and
remove_aggregate_host()
* 2.3 - Adds volume_id to reserve_block_device_name()
* 2.4 - Add bdms to terminate_instance
* 2.5 - Add block device and network info to reboot_instance
* 2.6 - Remove migration_id, add migration to resize_instance
* 2.7 - Remove migration_id, add migration to confirm_resize
* 2.8 - Remove migration_id, add migration to finish_resize
* 2.9 - Add publish_service_capabilities()
* 2.10 - Adds filter_properties and request_spec to prep_resize()
* 2.11 - Adds soft_delete_instance() and restore_instance()
* 2.12 - Remove migration_id, add migration to revert_resize
* 2.13 - Remove migration_id, add migration to finish_revert_resize
* 2.14 - Remove aggregate_id, add aggregate to add_aggregate_host
* 2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host
* 2.16 - Add instance_type to resize_instance
* 2.17 - Add get_backdoor_port()
* 2.18 - Add bdms to rebuild_instance
* 2.19 - Add node to run_instance
* 2.20 - Add node to prep_resize
* 2.21 - Add migrate_data dict param to pre_live_migration()
* 2.22 - Add recreate, on_shared_storage and host arguments to
rebuild_instance()
* 2.23 - Remove network_info from reboot_instance
* 2.24 - Added get_spice_console method
* 2.25 - Add attach_interface() and detach_interface()
* 2.26 - Add validate_console_port to ensure the service connects to
vnc on the correct port
* 2.27 - Adds 'reservations' to terminate_instance() and
soft_delete_instance()
... Grizzly supports message version 2.27. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.27.
* 2.28 - Adds check_instance_shared_storage()
* 2.29 - Made start_instance() and stop_instance() take new-world
instance objects
* 2.30 - Adds live_snapshot_instance()
* 2.31 - Adds shelve_instance(), shelve_offload_instance, and
unshelve_instance()
* 2.32 - Make reboot_instance take a new world instance object
* 2.33 - Made suspend_instance() and resume_instance() take new-world
instance objects
* 2.34 - Added swap_volume()
* 2.35 - Made terminate_instance() and soft_delete_instance() take
new-world instance objects
* 2.36 - Made pause_instance() and unpause_instance() take new-world
instance objects
* 2.37 - Added the legacy_bdm_in_spec parameter to run_instance
* 2.38 - Made check_can_live_migrate_[destination|source] take
new-world instance objects
* 2.39 - Made revert_resize() and confirm_resize() take new-world
instance objects
* 2.40 - Made reset_network() take new-world instance object
* 2.41 - Make inject_network_info take new-world instance object
* 2.42 - Splits snapshot_instance() into snapshot_instance() and
backup_instance() and makes them take new-world instance
objects.
* 2.43 - Made prep_resize() take new-world instance object
* 2.44 - Add volume_snapshot_create(), volume_snapshot_delete()
* 2.45 - Made resize_instance() take new-world objects
* 2.46 - Made finish_resize() take new-world objects
* 2.47 - Made finish_revert_resize() take new-world objects
... Havana supports message version 2.47. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.47.
* 2.48 - Make add_aggregate_host() and remove_aggregate_host() take
new-world objects
* ... - Remove live_snapshot() that was never actually used
* 3.0 - Remove 2.x compatibility
* 3.1 - Update get_spice_console() to take an instance object
* 3.2 - Update get_vnc_console() to take an instance object
* 3.3 - Update validate_console_port() to take an instance object
* 3.4 - Update rebuild_instance() to take an instance object
* 3.5 - Pass preserve_ephemeral flag to rebuild_instance()
* 3.6 - Make volume_snapshot_{create,delete} use new-world objects
* 3.7 - Update change_instance_metadata() to take an instance object
* 3.8 - Update set_admin_password() to take an instance object
* 3.9 - Update rescue_instance() to take an instance object
* 3.10 - Added get_rdp_console method
* 3.11 - Update unrescue_instance() to take an object
* 3.12 - Update add_fixed_ip_to_instance() to take an object
* 3.13 - Update remove_fixed_ip_from_instance() to take an object
* 3.14 - Update post_live_migration_at_destination() to take an object
* 3.15 - Adds filter_properties and node to unshelve_instance()
* 3.16 - Make reserve_block_device_name and attach_volume use new-world
objects, and add disk_bus and device_type params to
reserve_block_device_name, and bdm param to attach_volume
* 3.17 - Update attach_interface and detach_interface to take an object
* 3.18 - Update get_diagnostics() to take an instance object
* Removed inject_file(), as it was unused.
* 3.19 - Update pre_live_migration to take instance object
* 3.20 - Make restore_instance take an instance object
* 3.21 - Made rebuild take new-world BDM objects
* 3.22 - Made terminate_instance take new-world BDM objects
* 3.23 - Added external_instance_event()
* build_and_run_instance was added in Havana and not used or
documented.
... Icehouse supports message version 3.23. So, any changes to
existing methods in 3.x after that point should be done such that they
can handle the version_cap being set to 3.23.
* 3.24 - Update rescue_instance() to take optional rescue_image_ref
* 3.25 - Make detach_volume take an object
* 3.26 - Make live_migration() and
rollback_live_migration_at_destination() take an object
* ... Removed run_instance()
* 3.27 - Make run_instance() accept a new-world object
* 3.28 - Update get_console_output() to accept a new-world object
* 3.29 - Make check_instance_shared_storage accept a new-world object
* 3.30 - Make remove_volume_connection() accept a new-world object
* 3.31 - Add get_instance_diagnostics
* 3.32 - Add destroy_disks and migrate_data optional parameters to
rollback_live_migration_at_destination()
* 3.33 - Make build_and_run_instance() take a NetworkRequestList object
* 3.34 - Add get_serial_console method
* 3.35 - Make reserve_block_device_name return a BDM object
... Juno supports message version 3.35. So, any changes to
existing methods in 3.x after that point should be done such that they
can handle the version_cap being set to 3.35.
* 3.36 - Make build_and_run_instance() send a Flavor object
* 3.37 - Add clean_shutdown to stop, resize, rescue, shelve, and
shelve_offload
* 3.38 - Add clean_shutdown to prep_resize
* 3.39 - Add quiesce_instance and unquiesce_instance methods
* 3.40 - Make build_and_run_instance() take a new-world topology
limits object
... Kilo supports messaging version 3.40. So, any changes to
existing methods in 3.x after that point should be done so that they
can handle the version_cap being set to 3.40
... Version 4.0 is equivalent to 3.40. Kilo sends version 4.0 by
default, can accept 3.x calls from Juno nodes, and can be pinned to
3.x for Juno compatibility. All new changes should go against 4.x.
* 4.0 - Remove 3.x compatibility
* 4.1 - Make prep_resize() and resize_instance() send Flavor object
* 4.2 - Add migration argument to live_migration()
* 4.3 - Added get_mks_console method
* 4.4 - Make refresh_instance_security_rules send an instance object
* 4.5 - Add migration, scheduler_node and limits arguments to
rebuild_instance()
... Liberty supports messaging version 4.5. So, any changes to
existing methods in 4.x after that point should be done so that they
can handle the version_cap being set to 4.5
* ... - Remove refresh_security_group_members()
* ... - Remove refresh_security_group_rules()
'''
VERSION_ALIASES = {
'icehouse': '3.23',
'juno': '3.35',
'kilo': '4.0',
'liberty': '4.5',
}
def __init__(self):
super(ComputeAPI, self).__init__()
target = messaging.Target(topic=CONF.compute_topic, version='4.0')
upgrade_level = CONF.upgrade_levels.compute
if upgrade_level == 'auto':
version_cap = self._determine_version_cap(target)
else:
version_cap = self.VERSION_ALIASES.get(upgrade_level,
upgrade_level)
serializer = objects_base.NovaObjectSerializer()
self.client = self.get_client(target, version_cap, serializer)
def _determine_version_cap(self, target):
service_version = objects.Service.get_minimum_version(
context.get_admin_context(), 'nova-compute')
history = service_obj.SERVICE_VERSION_HISTORY
try:
version_cap = history[service_version]['compute_rpc']
except IndexError:
LOG.error(_LE('Failed to extract compute RPC version from '
'service history because I am too '
'old (minimum version is now %(version)i)'),
{'version': service_version})
raise exception.ServiceTooOld(thisver=service_obj.SERVICE_VERSION,
minver=service_version)
except KeyError:
LOG.error(_LE('Failed to extract compute RPC version from '
'service history for version %(version)i'),
{'version': service_version})
return target.version
LOG.info(_LI('Automatically selected compute RPC version %(rpc)s '
'from minimum service version %(service)i'),
{'rpc': version_cap,
'service': service_version})
return version_cap
def _compat_ver(self, current, legacy):
if self.client.can_send_version(current):
return current
else:
return legacy
# Cells overrides this
def get_client(self, target, version_cap, serializer):
return rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
def add_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Add aggregate host.
:param ctxt: request context
:param aggregate:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'add_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'add_fixed_ip_to_instance',
instance=instance, network_id=network_id)
def attach_interface(self, ctxt, instance, network_id, port_id,
requested_ip):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'attach_interface',
instance=instance, network_id=network_id,
port_id=port_id, requested_ip=requested_ip)
def attach_volume(self, ctxt, instance, bdm):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'attach_volume', instance=instance, bdm=bdm)
def change_instance_metadata(self, ctxt, instance, diff):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'change_instance_metadata',
instance=instance, diff=diff)
def check_can_live_migrate_destination(self, ctxt, instance, destination,
block_migration, disk_over_commit):
version = '4.0'
cctxt = self.client.prepare(server=destination, version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_destination',
instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
version = '4.0'
source = _compute_host(None, instance)
cctxt = self.client.prepare(server=source, version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_source',
instance=instance,
dest_check_data=dest_check_data)
def check_instance_shared_storage(self, ctxt, instance, data, host=None):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
return cctxt.call(ctxt, 'check_instance_shared_storage',
instance=instance,
data=data)
def confirm_resize(self, ctxt, instance, migration, host,
reservations=None, cast=True):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
rpc_method = cctxt.cast if cast else cctxt.call
return rpc_method(ctxt, 'confirm_resize',
instance=instance, migration=migration,
reservations=reservations)
def detach_interface(self, ctxt, instance, port_id):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'detach_interface',
instance=instance, port_id=port_id)
def detach_volume(self, ctxt, instance, volume_id):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'detach_volume',
instance=instance, volume_id=volume_id)
def finish_resize(self, ctxt, instance, migration, image, disk_info,
host, reservations=None):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'finish_resize',
instance=instance, migration=migration,
image=image, disk_info=disk_info, reservations=reservations)
def finish_revert_resize(self, ctxt, instance, migration, host,
reservations=None):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'finish_revert_resize',
instance=instance, migration=migration,
reservations=reservations)
def get_console_output(self, ctxt, instance, tail_length):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_console_output',
instance=instance, tail_length=tail_length)
def get_console_pool_info(self, ctxt, console_type, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_console_pool_info',
console_type=console_type)
def get_console_topic(self, ctxt, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_console_topic')
def get_diagnostics(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_diagnostics', instance=instance)
def get_instance_diagnostics(self, ctxt, instance):
# TODO(danms): This needs to be fixed for objects
instance_p = jsonutils.to_primitive(instance)
kwargs = {'instance': instance_p}
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_instance_diagnostics', **kwargs)
def get_vnc_console(self, ctxt, instance, console_type):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_vnc_console',
instance=instance, console_type=console_type)
def get_spice_console(self, ctxt, instance, console_type):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_spice_console',
instance=instance, console_type=console_type)
def get_rdp_console(self, ctxt, instance, console_type):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_rdp_console',
instance=instance, console_type=console_type)
def get_mks_console(self, ctxt, instance, console_type):
version = '4.3'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_mks_console',
instance=instance, console_type=console_type)
def get_serial_console(self, ctxt, instance, console_type):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_serial_console',
instance=instance, console_type=console_type)
def validate_console_port(self, ctxt, instance, port, console_type):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'validate_console_port',
instance=instance, port=port,
console_type=console_type)
def host_maintenance_mode(self, ctxt, host_param, mode, host):
'''Set host maintenance mode
:param ctxt: request context
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param mode:
:param host: This is the host to send the message to.
'''
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'host_maintenance_mode',
host=host_param, mode=mode)
def host_power_action(self, ctxt, action, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'host_power_action', action=action)
def inject_network_info(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'inject_network_info', instance=instance)
def live_migration(self, ctxt, instance, dest, block_migration, host,
migration, migrate_data=None):
args = {'migration': migration}
version = '4.2'
if not self.client.can_send_version(version):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'live_migration', instance=instance,
dest=dest, block_migration=block_migration,
migrate_data=migrate_data, **args)
def pause_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'pause_instance', instance=instance)
def post_live_migration_at_destination(self, ctxt, instance,
block_migration, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'post_live_migration_at_destination',
instance=instance, block_migration=block_migration)
def pre_live_migration(self, ctxt, instance, block_migration, disk,
host, migrate_data=None):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'pre_live_migration',
instance=instance,
block_migration=block_migration,
disk=disk, migrate_data=migrate_data)
def prep_resize(self, ctxt, image, instance, instance_type, host,
reservations=None, request_spec=None,
filter_properties=None, node=None,
clean_shutdown=True):
image_p = jsonutils.to_primitive(image)
msg_args = {'instance': instance,
'instance_type': instance_type,
'image': image_p,
'reservations': reservations,
'request_spec': request_spec,
'filter_properties': filter_properties,
'node': node,
'clean_shutdown': clean_shutdown}
version = '4.1'
if not self.client.can_send_version(version):
version = '4.0'
msg_args['instance_type'] = objects_base.obj_to_primitive(
instance_type)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'prep_resize', **msg_args)
def reboot_instance(self, ctxt, instance, block_device_info,
reboot_type):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'reboot_instance',
instance=instance,
block_device_info=block_device_info,
reboot_type=reboot_type)
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None, node=None,
preserve_ephemeral=False, migration=None, limits=None,
kwargs=None):
# NOTE(danms): kwargs is only here for cells compatibility, don't
# actually send it to compute
extra = {'preserve_ephemeral': preserve_ephemeral,
'migration': migration,
'scheduled_node': node,
'limits': limits}
version = '4.5'
if not self.client.can_send_version(version):
version = '4.0'
extra.pop('migration')
extra.pop('scheduled_node')
extra.pop('limits')
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
cctxt.cast(ctxt, 'rebuild_instance',
instance=instance, new_pass=new_pass,
injected_files=injected_files, image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
recreate=recreate, on_shared_storage=on_shared_storage,
**extra)
def refresh_provider_fw_rules(self, ctxt, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'refresh_provider_fw_rules')
def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Remove aggregate host.
:param ctxt: request context
:param aggregate:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'remove_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
def remove_fixed_ip_from_instance(self, ctxt, instance, address):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'remove_fixed_ip_from_instance',
instance=instance, address=address)
def remove_volume_connection(self, ctxt, instance, volume_id, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'remove_volume_connection',
instance=instance, volume_id=volume_id)
def rescue_instance(self, ctxt, instance, rescue_password,
rescue_image_ref=None, clean_shutdown=True):
version = '4.0'
msg_args = {'rescue_password': rescue_password,
'clean_shutdown': clean_shutdown,
'rescue_image_ref': rescue_image_ref,
'instance': instance,
}
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'rescue_instance', **msg_args)
def reset_network(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'reset_network', instance=instance)
def resize_instance(self, ctxt, instance, migration, image, instance_type,
reservations=None, clean_shutdown=True):
msg_args = {'instance': instance, 'migration': migration,
'image': image, 'reservations': reservations,
'instance_type': instance_type,
'clean_shutdown': clean_shutdown,
}
version = '4.1'
if not self.client.can_send_version(version):
msg_args['instance_type'] = objects_base.obj_to_primitive(
instance_type)
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resize_instance', **msg_args)
def resume_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resume_instance', instance=instance)
def revert_resize(self, ctxt, instance, migration, host,
reservations=None):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
cctxt.cast(ctxt, 'revert_resize',
instance=instance, migration=migration,
reservations=reservations)
def rollback_live_migration_at_destination(self, ctxt, instance, host,
destroy_disks=True,
migrate_data=None):
version = '4.0'
extra = {'destroy_disks': destroy_disks,
'migrate_data': migrate_data,
}
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'rollback_live_migration_at_destination',
instance=instance, **extra)
def set_admin_password(self, ctxt, instance, new_pass):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'set_admin_password',
instance=instance, new_pass=new_pass)
def set_host_enabled(self, ctxt, enabled, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'set_host_enabled', enabled=enabled)
def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'swap_volume',
instance=instance, old_volume_id=old_volume_id,
new_volume_id=new_volume_id)
def get_host_uptime(self, ctxt, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_host_uptime')
def reserve_block_device_name(self, ctxt, instance, device, volume_id,
disk_bus=None, device_type=None):
kw = {'instance': instance, 'device': device,
'volume_id': volume_id, 'disk_bus': disk_bus,
'device_type': device_type}
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
volume_bdm = cctxt.call(ctxt, 'reserve_block_device_name', **kw)
if not isinstance(volume_bdm, objects.BlockDeviceMapping):
volume_bdm = objects.BlockDeviceMapping.get_by_volume_id(
ctxt, volume_id)
return volume_bdm
def backup_instance(self, ctxt, instance, image_id, backup_type,
rotation):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'backup_instance',
instance=instance,
image_id=image_id,
backup_type=backup_type,
rotation=rotation)
def snapshot_instance(self, ctxt, instance, image_id):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'snapshot_instance',
instance=instance,
image_id=image_id)
def start_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'start_instance', instance=instance)
def stop_instance(self, ctxt, instance, do_cast=True, clean_shutdown=True):
msg_args = {'instance': instance,
'clean_shutdown': clean_shutdown}
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
rpc_method = cctxt.cast if do_cast else cctxt.call
return rpc_method(ctxt, 'stop_instance', **msg_args)
def suspend_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'suspend_instance', instance=instance)
def terminate_instance(self, ctxt, instance, bdms, reservations=None,
delete_type=None):
# NOTE(rajesht): The `delete_type` parameter is passed because
# the method signature has to match with `terminate_instance()`
# method of cells rpcapi.
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'terminate_instance',
instance=instance, bdms=bdms,
reservations=reservations)
def unpause_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unpause_instance', instance=instance)
def unrescue_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unrescue_instance', instance=instance)
def soft_delete_instance(self, ctxt, instance, reservations=None):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'soft_delete_instance',
instance=instance, reservations=reservations)
def restore_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'restore_instance', instance=instance)
def shelve_instance(self, ctxt, instance, image_id=None,
clean_shutdown=True):
msg_args = {'instance': instance, 'image_id': image_id,
'clean_shutdown': clean_shutdown}
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'shelve_instance', **msg_args)
def shelve_offload_instance(self, ctxt, instance,
clean_shutdown=True):
msg_args = {'instance': instance, 'clean_shutdown': clean_shutdown}
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'shelve_offload_instance', **msg_args)
def unshelve_instance(self, ctxt, instance, host, image=None,
filter_properties=None, node=None):
version = '4.0'
msg_kwargs = {
'instance': instance,
'image': image,
'filter_properties': filter_properties,
'node': node,
}
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'unshelve_instance', **msg_kwargs)
def volume_snapshot_create(self, ctxt, instance, volume_id,
create_info):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'volume_snapshot_create', instance=instance,
volume_id=volume_id, create_info=create_info)
def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id,
delete_info):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'volume_snapshot_delete', instance=instance,
volume_id=volume_id, snapshot_id=snapshot_id,
delete_info=delete_info)
def external_instance_event(self, ctxt, instances, events):
cctxt = self.client.prepare(
server=_compute_host(None, instances[0]),
version='4.0')
cctxt.cast(ctxt, 'external_instance_event', instances=instances,
events=events)
def build_and_run_instance(self, ctxt, instance, host, image, request_spec,
filter_properties, admin_password=None, injected_files=None,
requested_networks=None, security_groups=None,
block_device_mapping=None, node=None, limits=None):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'build_and_run_instance', instance=instance,
image=image, request_spec=request_spec,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping, node=node,
limits=limits)
def quiesce_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'quiesce_instance', instance=instance)
def unquiesce_instance(self, ctxt, instance, mapping=None):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unquiesce_instance', instance=instance,
mapping=mapping)
def refresh_instance_security_rules(self, ctxt, host, instance):
version = '4.4'
if not self.client.can_send_version(version):
version = '4.0'
instance = objects_base.obj_to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'refresh_instance_security_rules',
instance=instance)
|
apache-2.0
| 8,557,778,177,542,389,000 | 45.340513 | 79 | 0.595547 | false |
SkierPGP/Skier
|
db.py
|
1
|
3077
|
import datetime
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.dialects.postgresql import ARRAY
from app import app
db = SQLAlchemy(app)
class Key(db.Model):
"""
The model for a PGP key.
"""
id = db.Column(db.Integer, primary_key=True)
uid = db.relationship("UID", backref="key")
fingerprint = db.Column(db.String(40), nullable=False)
key_fp_id = db.Column(db.String(8), nullable=False)
keyalgo = db.Column(db.Integer, nullable=False)
created = db.Column(db.DateTime, nullable=False)
expires = db.Column(db.DateTime, nullable=False)
length = db.Column(db.Integer, nullable=False)
armored = db.Column(db.Text, nullable=True)
added_time = db.Column(db.DateTime, default=datetime.datetime.utcnow())
signatures = db.relationship("Signature", backref="key")
subkeys = db.Column(ARRAY(db.String(255)), nullable=True)
oid = db.Column(db.String(255), nullable=True)
@classmethod
def from_keyinfo(cls, obj):
k = Key()
for uid in obj.uid:
k.uid.append(uid)
k.length = obj.length
k.created = datetime.datetime.fromtimestamp(obj.created)
k.expires = datetime.datetime.fromtimestamp(obj.expires) if obj.expires else datetime.datetime(1970, 1, 1, 0, 0, 0)
k.fingerprint = obj.fingerprint
k.key_fp_id = obj.shortid
k.keyalgo = obj.get_algo_id()
k.added_time = datetime.datetime.utcnow()
k.armored = obj.armored
for key, v in obj.signatures.items():
for sig in v:
sigob = Signature()
sigob.sigtype = sig[2]
sigob.pgp_keyid = sig[0]
sigob.key_sfp_for = key
k.signatures.append(sigob)
if k.subkeys is None:
k.subkeys = []
for sub in obj.subkeys:
k.subkeys.append(sub)
k.oid = obj.oid
return k
def __repr__(self):
return "<Key {fp} for {uid}>".format(uid=self.uid, fp=self.fingerprint)
class UID(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
full_uid = db.Column(db.String())
uid_name = db.Column(db.String())
uid_email = db.Column(db.String())
uid_comment = db.Column(db.String(), nullable=True)
key_id = db.Column(db.Integer, db.ForeignKey("key.id"))
def __repr__(self):
return "{}".format(self.full_uid)
@property
def todict(self):
return {"full": self.full_uid, "name": self.uid_name, "email": self.uid_email, "comment": self.uid_comment}
class Signature(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
pgp_keyid = db.Column(db.String(16), nullable=False)
sigtype = db.Column(db.Integer)
key_sfp_for = db.Column(db.String(16))
key_id = db.Column(db.Integer, db.ForeignKey("key.id"))
class Synch(db.Model):
id = db.Column(db.Integer, autoincrement=True, primary_key=True)
synch_time = db.Column(db.DateTime, nullable=False)
synch_count = db.Column(db.Integer, default=0)
|
agpl-3.0
| 7,163,609,061,429,038,000 | 27.5 | 123 | 0.626584 | false |
ndokter/dsmr_parser
|
dsmr_parser/clients/filereader.py
|
1
|
6256
|
import logging
import fileinput
import tailer
from dsmr_parser.clients.telegram_buffer import TelegramBuffer
from dsmr_parser.exceptions import ParseError, InvalidChecksumError
from dsmr_parser.objects import Telegram
from dsmr_parser.parsers import TelegramParser
logger = logging.getLogger(__name__)
class FileReader(object):
"""
Filereader to read and parse raw telegram strings from a file and instantiate Telegram objects
for each read telegram.
Usage:
from dsmr_parser import telegram_specifications
from dsmr_parser.clients.filereader import FileReader
if __name__== "__main__":
infile = '/data/smartmeter/readings.txt'
file_reader = FileReader(
file = infile,
telegram_specification = telegram_specifications.V4
)
for telegram in file_reader.read_as_object():
print(telegram)
The file can be created like:
from dsmr_parser import telegram_specifications
from dsmr_parser.clients import SerialReader, SERIAL_SETTINGS_V5
if __name__== "__main__":
outfile = '/data/smartmeter/readings.txt'
serial_reader = SerialReader(
device='/dev/ttyUSB0',
serial_settings=SERIAL_SETTINGS_V5,
telegram_specification=telegram_specifications.V4
)
for telegram in serial_reader.read_as_object():
f=open(outfile,"ab+")
f.write(telegram._telegram_data.encode())
f.close()
"""
def __init__(self, file, telegram_specification):
self._file = file
self.telegram_parser = TelegramParser(telegram_specification)
self.telegram_buffer = TelegramBuffer()
self.telegram_specification = telegram_specification
def read_as_object(self):
"""
Read complete DSMR telegram's from a file and return a Telegram object.
:rtype: generator
"""
with open(self._file, "rb") as file_handle:
while True:
data = file_handle.readline()
str = data.decode()
self.telegram_buffer.append(str)
for telegram in self.telegram_buffer.get_all():
try:
yield Telegram(telegram, self.telegram_parser, self.telegram_specification)
except InvalidChecksumError as e:
logger.warning(str(e))
except ParseError as e:
logger.error('Failed to parse telegram: %s', e)
class FileInputReader(object):
"""
Filereader to read and parse raw telegram strings from stdin or files specified at the commandline
and instantiate Telegram objects for each read telegram.
Usage python script "syphon_smartmeter_readings_stdin.py":
from dsmr_parser import telegram_specifications
from dsmr_parser.clients.filereader import FileInputReader
if __name__== "__main__":
fileinput_reader = FileReader(
file = infile,
telegram_specification = telegram_specifications.V4
)
for telegram in fileinput_reader.read_as_object():
print(telegram)
Command line:
tail -f /data/smartmeter/readings.txt | python3 syphon_smartmeter_readings_stdin.py
"""
def __init__(self, telegram_specification):
self.telegram_parser = TelegramParser(telegram_specification)
self.telegram_buffer = TelegramBuffer()
self.telegram_specification = telegram_specification
def read_as_object(self):
"""
Read complete DSMR telegram's from stdin of filearguments specified on teh command line
and return a Telegram object.
:rtype: generator
"""
with fileinput.input(mode='rb') as file_handle:
while True:
data = file_handle.readline()
str = data.decode()
self.telegram_buffer.append(str)
for telegram in self.telegram_buffer.get_all():
try:
yield Telegram(telegram, self.telegram_parser, self.telegram_specification)
except InvalidChecksumError as e:
logger.warning(str(e))
except ParseError as e:
logger.error('Failed to parse telegram: %s', e)
class FileTailReader(object):
"""
Filereader to read and parse raw telegram strings from the tail of a
given file and instantiate Telegram objects for each read telegram.
Usage python script "syphon_smartmeter_readings_stdin.py":
from dsmr_parser import telegram_specifications
from dsmr_parser.clients.filereader import FileTailReader
if __name__== "__main__":
infile = '/data/smartmeter/readings.txt'
filetail_reader = FileTailReader(
file = infile,
telegram_specification = telegram_specifications.V5
)
for telegram in filetail_reader.read_as_object():
print(telegram)
"""
def __init__(self, file, telegram_specification):
self._file = file
self.telegram_parser = TelegramParser(telegram_specification)
self.telegram_buffer = TelegramBuffer()
self.telegram_specification = telegram_specification
def read_as_object(self):
"""
Read complete DSMR telegram's from a files tail and return a Telegram object.
:rtype: generator
"""
with open(self._file, "rb") as file_handle:
for data in tailer.follow(file_handle):
str = data.decode()
self.telegram_buffer.append(str)
for telegram in self.telegram_buffer.get_all():
try:
yield Telegram(telegram, self.telegram_parser, self.telegram_specification)
except InvalidChecksumError as e:
logger.warning(str(e))
except ParseError as e:
logger.error('Failed to parse telegram: %s', e)
|
mit
| -5,151,617,203,341,825,000 | 35.584795 | 103 | 0.596867 | false |
anshitag/Job-Cred-Meter
|
Python Files/datacollection.py
|
1
|
3030
|
import requests
import json
access_token='EAAMcWZCJ7Ui4BALHXjXKkizZANG5AhQZAL9buUS5KR6fX8YurTosJLmxTfP4WoZBiP8DFe8eqbJ2NplRPGLsve2xYIwwsbERTnX5pvsiRIvJ0So1TZAHmOHlUPVnzZCBjPdZBhkm8CBnYTV0eNjAwQiBVkroQGnaqgZD'
outfile=open("list.csv","w")
def text_cleaning(text):
text = text.encode('ascii', 'ignore').decode('ascii')
#text=text.replace('\\','BSL')
#text=text.replace('\'','SIC')
#text=text.replace("'",'TLD')
#text=text.replace('"','DIC')
text=text.replace('\n','. ')
#text=text.replace(')','CBR')
#text=text.replace('(','OBR')
text=text.replace(';',', ')
#text=text.replace(',','COM')
return text
def search(group_name):
base_url = 'https://graph.facebook.com/search?limit=1000&type=group&q='+group_name+'&access_token='+access_token
results=requests.get(base_url)
results_text=results.text
results_json=json.loads(results_text)
for item in results_json['data']:
if item['privacy']=='OPEN':
try:
#print item['name']
outfile.write(item['id']+";"+item['name'])
outfile.write("\n")
#print ''
except:
pass
base_url = 'https://graph.facebook.com/search?limit=1000&type=page&q='+group_name+'&access_token='+access_token
results=requests.get(base_url)
results_text=results.text
results_json=json.loads(results_text)
for item in results_json['data']:
try:
#print item['name']
outfile.write(item['id']+";"+item['name'])
outfile.write("\n")
#print ''
except:
pass
search('internships')
search('startup')
search('jobs')
outfile.close()
infile=open("list.csv","r")
outfile=open("feed.csv","w")
for line in infile:
print line.split(';')[0]
base_url='https://graph.facebook.com/v2.6/'+line.split(';')[0]+'/feed?access_token='+access_token
results=requests.get(base_url)
results_text=results.text
results_json=json.loads(results_text)
#while(True):
#try:
for item in results_json['data']:
if 'message' in item:
print text_cleaning(item['message'])
print ''
if 'updated_time' in item:
outfile.write(item['id']+";"+text_cleaning(item['message'])+";"+item['updated_time'])
outfile.write("\n")
else:
outfile.write(item['id']+";"+text_cleaning(item['message'])+";"+item['created_time'])
outfile.write("\n")
#base_url=results_json['paging']['next']
#results=requests.get(base_url)
#results_text=results.text
#results_json=json.loads(results_text)
#except KeyError:
#break
outfile.close()
infile.close()
|
mit
| -368,879,785,351,056,060 | 32.827586 | 180 | 0.542574 | false |
gitenberg-dev/pg-epubmaker
|
epubmaker/writers/RSTWriter.py
|
1
|
1259
|
#!/usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*- coding: iso-8859-1 -*-
"""
RSTWriter.py
Copyright 2009 by Marcello Perathoner
Distributable under the GNU General Public License Version 3 or newer.
Build an RST file. This is just the master RST with the PG license mixed in.
"""
from __future__ import with_statement
import os
from epubmaker.lib.Logger import debug, info, error
from epubmaker import ParserFactory
from epubmaker import writers
class Writer (writers.BaseWriter):
""" Class to write a reStructuredText. """
def build (self):
""" Build RST file. """
filename = os.path.join (self.options.outputdir, self.options.outputfile)
info ("Creating RST file: %s" % filename)
parser = ParserFactory.ParserFactory.create (self.options.candidate.filename,
self.options.candidate.mediatype)
parser.options = self.options
if not hasattr (parser, 'rst2nroff'):
error ('RSTWriter can only work on a RSTParser.')
return
data = parser.preprocess ('utf-8').encode ('utf-8')
self.write_with_crlf (filename, data)
info ("Done RST file: %s" % filename)
|
gpl-3.0
| 4,966,281,297,831,469,000 | 25.787234 | 86 | 0.634631 | false |
fintura/pyPaaS
|
pypaas/checkout.py
|
1
|
3420
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import copy
import shutil
import os.path
import datetime
import subprocess
from configparser import ConfigParser
from . import options
class Checkout(object):
def __init__(self, branch, commit, name):
self.branch, self.commit, self.name = branch, commit, name
@property
def path(self):
return os.path.join(
options.BASEPATH, 'checkouts',
self.branch.repo.name, self.branch.name,
'{0}-{1}'.format(self.name, self.commit[:11])
)
@classmethod
def create(cls, branch, commit):
name = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
self = cls(branch, commit, name)
self.run_in(
['git', 'clone', '-q', self.branch.repo.path, self.path],
env={},
cwd=os.path.expanduser('~deploy')
)
self.run_in(
['git', 'config', 'advice.detachedHead', 'false'],
env={}
)
self.run_in(
['git', 'checkout', self.commit],
env={}
)
self.run_in(
['git', 'submodule', 'update', '--init', '--recursive'],
env={}
)
to_delete = []
for root, dirs, files in os.walk(self.path):
for d in dirs:
if d == '.git':
to_delete.append(os.path.join(root, d))
for d in to_delete:
shutil.rmtree(d)
return self
@property
def cmd_env(self):
env = copy.deepcopy(self.branch.config.get('env', dict()))
env.update(os.environ)
env['GIT_COMMIT'] = self.commit
return env
@classmethod
def all_for_branch(cls, branch):
try:
files = os.listdir(os.path.join(
options.BASEPATH, 'checkouts', branch.repo.name, branch.name
))
except FileNotFoundError:
return
for basename in files:
f = os.path.join(
options.BASEPATH, 'checkouts',
branch.repo.name, branch.name, basename
)
if not os.path.isdir(f):
continue
name, commit = basename.split('-')
yield cls(branch, commit, name)
def run_hook_cmd(self, name, default=None):
hook = self.branch.config.get('hooks', {}).get(name, default)
if hook is None:
return
if not isinstance(hook, list):
hook = [hook]
for c in hook:
self.run_in(c, shell=True)
@property
def custom_cmds(self):
try:
return self.branch.config['custom_cmds']
except KeyError:
return dict()
def run_in(self, cmd, cwd=None, env=None, **kwargs):
cwd = self.path if cwd is None else cwd
env = self.cmd_env if env is None else env
# necessary for capturing of the output by replacing sys.stderr
subprocess.check_call(
cmd,
cwd=cwd,
env=env,
stderr=subprocess.STDOUT,
**kwargs
)
def run_custom_cmd(self, name):
self.run_in(self.custom_cmds[name], shell=True)
def build(self):
self.run_hook_cmd(
name='build',
default='if [ -f ./.build.sh ]; then ./.build.sh; fi'
)
def remove(self):
shutil.rmtree(self.path)
|
mit
| 2,602,529,112,575,175,700 | 27.264463 | 76 | 0.521637 | false |
JakobGM/robotarm-optimization
|
tests/test_robot_arm.py
|
1
|
1781
|
import unittest
import numpy as np
from numpy import pi
from robot_arm import RobotArm
class TestRobotArm(unittest.TestCase):
def setUp(self):
self.lengths = (3, 2, 2,)
self.destinations = (
(5, 4, 6, 4, 5),
(0, 2, 0.5, -2, -1),
)
self.theta = (pi, pi / 2, 0,)
self.robot_arm = RobotArm(self.lengths, self.destinations, self.theta)
def test_init_all_arguments(self):
RobotArm(self.lengths, self.destinations, self.theta)
def test_init_without_theta(self):
RobotArm(self.lengths, self.destinations)
def test_wrong_lengths_type(self):
self.assertRaises(
TypeError,
RobotArm,
np.array(self.lengths),
self.destinations,
self.theta
)
def test_wrong_destinations_type(self):
self.assertRaises(
TypeError,
RobotArm,
self.lengths,
np.array(self.destinations),
self.theta)
def test_wrong_theta_type(self):
self.assertRaises(
TypeError,
RobotArm,
self.lengths,
self.destinations,
np.array(self.theta))
def test_destinations_properties(self):
robot_arm = RobotArm(self.lengths, self.destinations, self.theta)
self.assertIsInstance(robot_arm.destinations, np.ndarray)
# Check if points are 2D
self.assertTrue(robot_arm.destinations.shape[0] == 2)
# Check if destinations are immutable
self.assertRaises(
ValueError,
robot_arm.destinations.__setitem__,
(0, 0,),
0,
)
def test_generate_initial_guess(self):
self.robot_arm.generate_initial_guess()
|
mit
| 6,607,707,870,683,648,000 | 26.4 | 78 | 0.571028 | false |
taxigps/xbmc-addons-chinese
|
plugin.video.youku/default.py
|
1
|
28143
|
๏ปฟ# -*- coding: utf-8 -*-
import xbmc, xbmcgui, xbmcplugin, xbmcaddon, urllib2, urllib, re, string, sys, os, gzip, StringIO, math, urlparse
import base64, time, cookielib
import simplejson
# Plugin constants
__addon__ = xbmcaddon.Addon()
__addonname__ = __addon__.getAddonInfo('name')
__profile__ = xbmc.translatePath( __addon__.getAddonInfo('profile') ).decode("utf-8")
UserAgent = 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)'
ORDER_LIST1 = [['1','ๆๅคๆญๆพ'], ['2','ๆๅค่ฏ่ฎบ'], ['4','ๆๅๆฌข่ฟ'], ['5','ๆ่ฟไธๆ '], ['6','ๆ่ฟๆดๆฐ']]
DAYS_LIST1 = [['1','ไปๆฅ'], ['2','ๆฌๅจ'], ['4','ๅๅฒ']]
ORDER_LIST2 = [['1','ๆๅคๆญๆพ'], ['2','ๆๆฐๅๅธ'], ['3','ๆๅค่ฏ่ฎบ'], ['4','ๆๅคๆถ่'], ['5','ๆๅๆฌข่ฟ']]
DAYS_LIST2 = [['1','ไปๆฅ'], ['2','ๆฌๅจ'], ['3','ๆฌๆ'], ['4','ๅๅฒ']]
class youkuDecoder:
def __init__( self ):
return
def getFileIDMixString(self,seed):
mixed = []
source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
seed = float(seed)
for i in range(len(source)):
seed = (seed * 211 + 30031 ) % 65536
index = math.floor(seed /65536 *len(source))
mixed.append(source[int(index)])
source.remove(source[int(index)])
return mixed
def getFileId(self,fileId,seed):
mixed = self.getFileIDMixString(seed)
ids = fileId.split('*')
realId = []
for i in range(0,len(ids)-1):
realId.append(mixed[int(ids[i])])
return ''.join(realId)
def trans_e(self, a, c):
b = range(256)
f = 0
result = ''
h = 0
while h < 256:
f = (f + b[h] + ord(a[h % len(a)])) % 256
b[h], b[f] = b[f], b[h]
h += 1
q = f = h = 0
while q < len(c):
h = (h + 1) % 256
f = (f + b[h]) % 256
b[h], b[f] = b[f], b[h]
result += chr(ord(c[q]) ^ b[(b[h] + b[f]) % 256])
q += 1
return result
def trans_f(self, a, c):
"""
:argument a: list
:param c:
:return:
"""
b = []
for f in range(len(a)):
i = ord(a[f][0]) - 97 if "a" <= a[f] <= "z" else int(a[f]) + 26
e = 0
while e < 36:
if c[e] == i:
i = e
break
e += 1
v = i - 26 if i > 25 else chr(i + 97)
b.append(str(v))
return ''.join(b)
f_code_1 = 'becaf9be'
f_code_2 = 'bf7e5f01'
def _calc_ep(self, sid, fileId, token):
ep = self.trans_e(self.f_code_2, '%s_%s_%s' % (sid, fileId, token))
return base64.b64encode(ep)
def _calc_ep2(self, vid, ep):
e_code = self.trans_e(self.f_code_1, base64.b64decode(ep))
sid, token = e_code.split('_')
new_ep = self.trans_e(self.f_code_2, '%s_%s_%s' % (sid, vid, token))
return base64.b64encode(new_ep), token, sid
def get_sid(self, ep):
e_code = self.trans_e(self.f_code_1, base64.b64decode(ep))
return e_code.split('_')
def generate_ep(self, no, fileid, sid, token):
ep = urllib.quote(self._calc_ep(sid, fileid, token).encode('latin1'),
safe="~()*!.'"
)
return ep
def log(txt):
message = '%s: %s' % (__addonname__, txt)
xbmc.log(msg=message, level=xbmc.LOGDEBUG)
def GetHttpData(url, referer=''):
log("%s::url - %s" % (sys._getframe().f_code.co_name, url))
req = urllib2.Request(url)
req.add_header('User-Agent', UserAgent)
if referer:
req.add_header('Referer', referer)
try:
response = urllib2.urlopen(req)
httpdata = response.read()
if response.headers.get('content-encoding', None) == 'gzip':
httpdata = gzip.GzipFile(fileobj=StringIO.StringIO(httpdata)).read()
charset = response.headers.getparam('charset')
response.close()
except:
log( "%s (%d) [%s]" % (
sys.exc_info()[2].tb_frame.f_code.co_name,
sys.exc_info()[2].tb_lineno,
sys.exc_info()[1]
))
return ''
match = re.compile('<meta http-equiv=["]?[Cc]ontent-[Tt]ype["]? content="text/html;[\s]?charset=(.+?)"').findall(httpdata)
if match:
charset = match[0]
else:
match = re.compile('<meta charset="(.+?)"').findall(httpdata)
if match:
charset = match[0]
if charset:
charset = charset.lower()
if (charset != 'utf-8') and (charset != 'utf8'):
httpdata = httpdata.decode(charset, 'ignore').encode('utf8', 'ignore')
return httpdata
def searchDict(dlist,idx):
for i in range(0,len(dlist)):
if dlist[i][0] == idx:
return dlist[i][1]
return ''
def getCurrent(text,list,id):
match = re.compile('<li class="current"\s*><span>(.+?)</span>').search(text)
if match:
list.append([id, match.group(1)])
def getList(listpage,id,genre,area,year):
if id == 'c_95':
str1 = '้ฃๆ ผ๏ผ'
str3a = 'ๅ่ก๏ผ'
str3b = 'r'
elif id == 'c_84' or id == 'c_87':
str1 = '็ฑปๅ๏ผ'
str3a = 'ๅบๅ๏ผ'
str3b = 'pr'
else:
str1 = '็ฑปๅ๏ผ'
str3a = 'ๆถ้ด๏ผ'
str3b = 'r'
match = re.compile('<label>%s</label>(.+?)</ul>' % (str1), re.DOTALL).search(listpage)
genrelist = re.compile('_g_([^_\.]*)[^>]*>([^<]+)</a>').findall(match.group(1))
getCurrent(match.group(1), genrelist, genre)
if id == 'c_84' or id == 'c_87':
arealist = []
else:
match = re.compile('<label>ๅฐๅบ๏ผ</label>(.+?)</ul>', re.DOTALL).search(listpage)
arealist = re.compile('_a_([^_\.]*)[^>]*>([^<]+)</a>').findall(match.group(1))
getCurrent(match.group(1), arealist, area)
match = re.compile('<label>%s</label>(.+?)</ul>' % (str3a), re.DOTALL).search(listpage)
yearlist = re.compile('_%s_([^_\.]*)[^>]*>([^<]+)</a>' % (str3b)).findall(match.group(1))
getCurrent(match.group(1), yearlist, year)
return genrelist,arealist,yearlist
def getList2(listpage,genre):
match = re.compile('<label>็ฑปๅ๏ผ</label>(.+?)</ul>', re.DOTALL).search(listpage)
if match:
genrelist = re.compile('<li><a href=".*?/category/video/[^g]*g_([0-9]+)[^\.]*\.html"[^>]*>(.+?)</a></li>').findall(match.group(1))
getCurrent(match.group(1), genrelist, genre)
else:
genrelist = []
return genrelist
def rootList():
link = GetHttpData('http://list.youku.com/')
match0 = re.compile('<label>ๅ็ฑป๏ผ</label>(.+?)</ul>', re.DOTALL).search(link)
match = re.compile('<li><a\s*href="/category/([^/]+)/([^\.]+)\.html">(.+?)</a></li>', re.DOTALL).findall(match0.group(1))
totalItems = len(match)
for path, id, name in match:
if path == 'show':
u = sys.argv[0]+"?mode=1&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&genre=&area=&year=&order=1&days=1&page=1"
else:
u = sys.argv[0]+"?mode=11&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&genre=0&year=1&order=1&days=1&page=1"
li = xbmcgui.ListItem(name)
xbmcplugin.addDirectoryItem(int(sys.argv[1]),u,li,True,totalItems)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def progList(name,id,page,genre,area,year,order,days):
url = 'http://list.youku.com/category/show/%s_g_%s_a_%s_s_%s_d_%s_r_%s_p_%s.html' % (id, genre, area, order, days, year, page)
link = GetHttpData(url)
match = re.compile('<ul class="yk-pages">(.+?)</ul>', re.DOTALL).search(link)
plist = []
if match:
match1 = re.compile('<li.+?>([0-9]+)(</a>|</span>)</li>', re.DOTALL).findall(match.group(1))
if match1:
for num, temp in match1:
if (num not in plist) and (num != page):
plist.append(num)
totalpages = int(match1[len(match1)-1][0])
else:
totalpages = 1
match = re.compile('<div class="yk-filter" id="filter">(.+?)<div class="yk-filter-handle">', re.DOTALL).search(link)
if match:
listpage = match.group(1)
else:
listpage = ''
if id == 'c_95':
match = re.compile('<div class="yk-pack p-list"(.+?)</ul></div>', re.DOTALL).findall(link)
else:
match = re.compile('<div class="yk-pack pack-film">(.+?)</ul></div>', re.DOTALL).findall(link)
totalItems = len(match) + 1 + len(plist)
currpage = int(page)
genrelist,arealist,yearlist = getList(listpage,id,genre,area,year)
if genre:
genrestr = searchDict(genrelist,genre)
else:
genrestr = 'ๅ
จ้จ็ฑปๅ'
if area:
areastr = searchDict(arealist,area)
else:
areastr = 'ๅ
จ้จๅฐๅบ'
if year:
yearstr = searchDict(yearlist,year)
else:
if id == 'c_84' or id == 'c_87':
yearstr = 'ๅ
จ้จๅบๅ'
else:
yearstr = 'ๅ
จ้จๅนดไปฝ'
li = xbmcgui.ListItem(name+'๏ผ็ฌฌ'+str(currpage)+'/'+str(totalpages)+'้กต๏ผใ[COLOR FFFF0000]' + genrestr + '[/COLOR]/[COLOR FF00FF00]' + areastr + '[/COLOR]/[COLOR FFFFFF00]' + yearstr + '[/COLOR]/[COLOR FF00FF00]' + searchDict(DAYS_LIST1,days) + '[/COLOR]/[COLOR FF00FFFF]' + searchDict(ORDER_LIST1,order) + '[/COLOR]ใ๏ผๆๆญค้ๆฉ๏ผ')
u = sys.argv[0]+"?mode=4&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&genre="+urllib.quote_plus(genre)+"&area="+urllib.quote_plus(area)+"&year="+urllib.quote_plus(year)+"&order="+order+"&days="+days+"&page="+urllib.quote_plus(listpage)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)
for i in range(0,len(match)):
if id in ('c_96','c_95'):
mode = 2
isdir = False
else:
mode = 3
isdir = True
match1 = re.compile('/id_(.+?).html"').search(match[i])
p_id = match1.group(1)
match1 = re.compile('<img class="quic".*?src="(.+?)"').search(match[i])
p_thumb = match1.group(1)
match1 = re.compile('<li class="title"><a .*?">(.+?)</a>').search(match[i])
p_name = match1.group(1)
match1 = re.compile('<li class="status hover-hide"><span .*?<span>(.+?)</span>').search(match[i])
if match1:
p_name1 = p_name + '๏ผ' + match1.group(1) + '๏ผ'
else:
p_name1 = p_name
match1 = re.compile('<span class="vip-free">(.+?)</span>').search(match[i])
if match1:
p_name1 = p_name1 + '[' + match1.group(1) + ']'
li = xbmcgui.ListItem(str(i + 1) + '. ' + p_name1, iconImage = '', thumbnailImage = p_thumb)
u = sys.argv[0]+"?mode="+str(mode)+"&name="+urllib.quote_plus(p_name)+"&id="+urllib.quote_plus(p_id)+"&thumb="+urllib.quote_plus(p_thumb)
#li.setInfo(type = "Video", infoLabels = {"Title":p_name, "Director":p_director, "Genre":p_genre, "Plot":p_plot, "Year":p_year, "Cast":p_cast, "Tagline":p_tagline})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, isdir, totalItems)
for num in plist:
li = xbmcgui.ListItem("... ็ฌฌ" + num + "้กต")
u = sys.argv[0]+"?mode=1&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&genre="+urllib.quote_plus(genre)+"&area="+urllib.quote_plus(area)+"&year="+year+"&order="+order+"&days="+days+"&page="+str(num)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def getMovie(name,id,thumb):
if len(id)==21:
link = GetHttpData('http://www.youku.com/show_page/id_' + id + '.html')
match = re.compile('<a class="btnShow btnplayposi".*?href="http://v.youku.com/v_show/id_(.+?)\.html[^"]*"', re.DOTALL).search(link)
if not match:
match = re.compile('<div class="btnplay">.*?href="http://v.youku.com/v_show/id_(.+?)\.html[^"]*"', re.DOTALL).search(link)
if match:
# ๆญๆพๆญฃ็
PlayVideo(name, match.group(1), thumb)
else:
# ่งฃๆ้ขๅ็
match = re.compile('class="btnShow btnplaytrailer".*?href="http://v.youku.com/v_show/id_(.+?)\.html[^"]*"', re.DOTALL).search(link)
if match:
PlayVideo(name, match.group(1), thumb)
else:
xbmcgui.Dialog().ok(__addonname__, '่งฃๆๅฐๅๅผๅธธ๏ผๅฏ่ฝๆฏๆถ่ดน่็ฎ๏ผๆ ๆณๆญๆพ')
else:
PlayVideo(name, id, thumb)
def seriesList(name,id,thumb):
url = "http://v.youku.com/v_show/id_%s.html" % (id)
data = GetHttpData(url)
#pages = re.compile('<li data="(point_reload_[0-9]+)"', re.DOTALL).findall(data)
#if len(pages)>1:
# for i in range(1,len(pages)):
# url = "http://www.youku.com/show_point/id_%s.html?dt=json&divid=%s&tab=0&__rt=1&__ro=%s" % (id, pages[i], pages[i])
# link = GetHttpData(url)
# data += link
match = re.compile('class="item(.+?)</div>', re.DOTALL).findall(data)
totalItems = len(match)
for i in range(0,len(match)):
match1 = re.compile('//v.youku.com/v_show/id_(.+?)\.html').search(match[i])
if match1:
p_id = match1.group(1)
else:
continue
#match1 = re.compile('<div class="thumb"><img .*?src="(.+?)"').search(match[i])
p_thumb = thumb
match1 = re.compile('title="(.+?)"').search(match[i])
p_name = "%s %s" % (name, match1.group(1))
p_name1 = p_name
li = xbmcgui.ListItem(p_name1, iconImage = '', thumbnailImage = p_thumb)
u = sys.argv[0]+"?mode=10&name="+urllib.quote_plus(p_name)+"&id="+urllib.quote_plus(p_id)+"&thumb="+urllib.quote_plus(p_thumb)
#li.setInfo(type = "Video", infoLabels = {"Title":p_name, "Director":p_director, "Genre":p_genre, "Plot":p_plot, "Year":p_year, "Cast":p_cast, "Tagline":p_tagline})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def progList2(name,id,page,genre,order,days):
url = 'http://list.youku.com/category/video/%s_g_%s_s_%s_d_%s_p_%s.html' % (id, genre, order, days, page)
link = GetHttpData(url)
match = re.compile('<ul class="yk-pages">(.+?)</ul>', re.DOTALL).search(link)
plist = []
if match:
match1 = re.compile('<li.+?>([0-9]+)(</a>|</span>)</li>', re.DOTALL).findall(match.group(1))
if match1:
for num, temp in match1:
if (num not in plist) and (num != page):
plist.append(num)
totalpages = int(match1[len(match1)-1][0])
else:
totalpages = 1
match = re.compile('<div class="yk-filter\s*" id="filter">(.+?)<div class="yk-filter-handle">', re.DOTALL).search(link)
if match:
listpage = match.group(1)
else:
listpage = ''
match = re.compile('<div class="yk-pack p-list"(.+?)</ul></div>', re.DOTALL).findall(link)
totalItems = len(match) + 1 + len(plist)
currpage = int(page)
genrelist = getList2(listpage, genre)
if genre == '0':
genrestr = 'ๅ
จ้จ็ฑปๅ'
else:
genrestr = searchDict(genrelist,genre)
li = xbmcgui.ListItem(name+'๏ผ็ฌฌ'+str(currpage)+'/'+str(totalpages)+'้กต๏ผใ[COLOR FFFF0000]' + genrestr + '[/COLOR]/[COLOR FF00FF00]' + searchDict(DAYS_LIST2,days) + '[/COLOR]/[COLOR FF00FFFF]' + searchDict(ORDER_LIST2,order) + '[/COLOR]ใ๏ผๆๆญค้ๆฉ๏ผ')
u = sys.argv[0]+"?mode=12&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&genre="+urllib.quote_plus(genre)+"&order="+order+"&days="+days+"&page="+urllib.quote_plus(listpage)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)
for i in range(0,len(match)):
match1 = re.compile('/id_(.+?).html"').search(match[i])
p_id = match1.group(1)
match1 = re.compile('<img class="quic".*?src="(.+?)"').search(match[i])
p_thumb = match1.group(1)
match1 = re.compile('<li class="title"><a .*?">(.+?)</a>').search(match[i])
p_name = match1.group(1)
p_name1 = p_name
li = xbmcgui.ListItem(str(i + 1) + '. ' + p_name1, iconImage = '', thumbnailImage = p_thumb)
u = sys.argv[0]+"?mode=10&name="+urllib.quote_plus(p_name)+"&id="+urllib.quote_plus(p_id)+"&thumb="+urllib.quote_plus(p_thumb)
#li.setInfo(type = "Video", infoLabels = {"Title":p_name, "Director":p_director, "Genre":p_genre, "Plot":p_plot, "Year":p_year, "Cast":p_cast, "Tagline":p_tagline})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)
for num in plist:
li = xbmcgui.ListItem("... ็ฌฌ" + num + "้กต")
u = sys.argv[0]+"?mode=11&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&genre="+urllib.quote_plus(genre)+"&order="+order+"&days="+days+"&page="+str(num)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def selResolution(streamtypes):
ratelist = []
for i in range(0,len(streamtypes)):
if streamtypes[i] in ('flv', 'flvhd'): ratelist.append([4, 'ๆ ๆธ
', i, 'flv']) # [ๆธ
ๆฐๅบฆ่ฎพ็ฝฎๅผ, ๆธ
ๆฐๅบฆ, streamtypes็ดขๅผ]
if streamtypes[i] in ('mp4', 'mp4hd'): ratelist.append([3, '้ซๆธ
', i, 'mp4'])
if streamtypes[i] in ('hd2', 'hd2v2', 'mp4hd2', 'mp4hd2v2'): ratelist.append([2, '่ถ
ๆธ
', i, 'hd2'])
if streamtypes[i] in ('hd3', 'hd3v2', 'mp4hd3', 'mp4hd3v2'): ratelist.append([1, '1080P', i, 'hd3'])
ratelist.sort()
if len(ratelist) > 1:
resolution = int(__addon__.getSetting('resolution'))
if resolution == 0: # ๆฏๆฌก่ฏข้ฎ่ง้ขๆธ
ๆฐๅบฆ
list = [x[1] for x in ratelist]
sel = xbmcgui.Dialog().select('ๆธ
ๆฐๅบฆ๏ผไฝ็ฝ้่ฏท้ๆฉไฝๆธ
ๆฐๅบฆ๏ผ', list)
if sel == -1:
return None, None, None, None
else:
sel = 0
while sel < len(ratelist)-1 and resolution > ratelist[sel][0]: sel += 1
else:
sel = 0
return streamtypes[ratelist[sel][2]], ratelist[sel][1], ratelist[sel][2], ratelist[sel][3]
def youku_ups(id):
res = urllib2.urlopen('https://log.mmstat.com/eg.js')
cna = res.headers['etag'][1:-1]
query = urllib.urlencode(dict(
vid = id,
ccode = '0516',
client_ip = '192.168.1.1',
utid = cna,
client_ts = time.time() / 1000,
ckey = 'DIl58SLFxFNndSV1GFNnMQVYkx1PP5tKe1siZu/86PR1u/Wh1Ptd+WOZsHHWxysSfAOhNJpdVWsdVJNsfJ8Sxd8WKVvNfAS8aS8fAOzYARzPyPc3JvtnPHjTdKfESTdnuTW6ZPvk2pNDh4uFzotgdMEFkzQ5wZVXl2Pf1/Y6hLK0OnCNxBj3+nb0v72gZ6b0td+WOZsHHWxysSo/0y9D2K42SaB8Y/+aD2K42SaB8Y/+ahU+WOZsHcrxysooUeND'
))
url = 'https://ups.youku.com/ups/get.json?%s' % (query)
link = GetHttpData(url, referer='http://v.youku.com/')
json_response = simplejson.loads(link)
api_data = json_response['data']
data_error = api_data.get('error')
if data_error:
api_error_code = data_error.get('code')
api_error_msg = data_error.get('note').encode('utf-8')
dialog = xbmcgui.Dialog()
ok = dialog.ok(__addonname__,'ๅฐๅ่งฃๆ้่ฏฏ๏ผ%d๏ผ๏ผ\n%s' % (api_error_code,api_error_msg))
return {}
else:
return api_data
def change_cdn(url):
# if the cnd_url starts with an ip addr, it should be youku's old CDN
# which rejects http requests randomly with status code > 400
# change it to the dispatcher of aliCDN can do better
# at least a little more recoverable from HTTP 403
dispatcher_url = 'vali.cp31.ott.cibntv.net'
if dispatcher_url in url:
return url
elif 'k.youku.com' in url:
return url
else:
url_seg_list = list(urlparse.urlsplit(url))
url_seg_list[1] = dispatcher_url
return urlparse.urlunsplit(url_seg_list)
def PlayVideo(name,id,thumb):
movdat = youku_ups(id)
if not movdat:
return
vid = id
lang_select = int(__addon__.getSetting('lang_select')) # ้ป่ฎค|ๆฏๆฌก้ๆฉ|่ชๅจ้ฆ้
if lang_select != 0 and movdat.has_key('dvd') and 'audiolang' in movdat['dvd']:
langlist = movdat['dvd']['audiolang']
if lang_select == 1:
list = [x['lang'] for x in langlist]
sel = xbmcgui.Dialog().select('้ๆฉ่ฏญ่จ', list)
if sel ==-1:
return
vid = langlist[sel]['vid'].encode('utf-8')
name = '%s %s' % (name, langlist[sel]['lang'].encode('utf-8'))
else:
lang_prefer = __addon__.getSetting('lang_prefer') # ๅฝ่ฏญ|็ฒค่ฏญ
for i in range(0,len(langlist)):
if langlist[i]['lang'].encode('utf-8') == lang_prefer:
vid = langlist[i]['vid'].encode('utf-8')
name = '%s %s' % (name, langlist[i]['lang'].encode('utf-8'))
break
if vid != id:
movdat = youku_ups(vid)
if not movdat:
return
streamtypes = [stream['stream_type'].encode('utf-8') for stream in movdat['stream']]
typeid, typename, streamno, resolution = selResolution(streamtypes)
if typeid:
'''
oip = movdat['security']['ip']
ep = movdat['security']['encrypt_string']
sid, token = youkuDecoder().get_sid(ep)
play_method = int(__addon__.getSetting('play_method'))
if play_method != 0: # m3u8ๆนๅผ
query = urllib.urlencode(dict(
vid=vid, ts=int(time.time()), keyframe=1, type=resolution,
ep=ep, oip=oip, ctype=12, ev=1, token=token, sid=sid,
))
cookie = ['%s=%s' % (x.name, x.value) for x in cj][0]
movurl = 'http://pl.youku.com/playlist/m3u8?%s|Cookie=%s' % (query, cookie)
else: # ้ป่ฎคๆญๆพๆนๅผ
if typeid in ('mp4', 'mp4hd'):
type = 'mp4'
else:
type = 'flv'
urls = []
segs = movdat['stream'][streamno]['segs']
total = len(segs)
for no in range(0, total):
k = segs[no]['key']
if k == -1:
dialog = xbmcgui.Dialog()
ok = dialog.ok(__addonname__,'ไผๅ่็ฎ๏ผๆ ๆณๆญๆพ')
return
fileid = segs[no]['fileid']
ep = youkuDecoder().generate_ep(no, fileid, sid, token)
query = urllib.urlencode(dict(
ctype = 12,
ev = 1,
K = k,
ep = urllib.unquote(ep),
oip = oip,
token = token,
yxon = 1
))
url = 'http://k.youku.com/player/getFlvPath/sid/{sid}_00/st/{container}/fileid/{fileid}?{query}'.format(
sid = sid,
container = type,
fileid = fileid,
query = query
)
link = GetHttpData(url)
json_response = simplejson.loads(link)
urls.append(json_response[0]['server'].encode('utf-8'))
movurl = 'stack://' + ' , '.join(urls)
'''
movurl = movdat['stream'][streamno]['m3u8_url']
#urls = []
#is_preview = False
#for seg in movdat['stream'][streamno]['segs']:
# if seg.get('cdn_url'):
# urls.append(change_cdn(seg['cdn_url'].encode('utf-8')))
# else:
# is_preview = True
#if not is_preview:
# movurl = 'stack://' + ' , '.join(urls)
name = '%s[%s]' % (name, typename)
listitem=xbmcgui.ListItem(name,thumbnailImage=thumb)
listitem.setInfo(type="Video",infoLabels={"Title":name})
xbmc.Player().play(movurl, listitem)
def performChanges(name,id,listpage,genre,area,year,order,days):
genrelist,arealist,yearlist = getList(listpage,id,genre,area,year)
change = False
if id == 'c_95':
str1 = '้ฃๆ ผ'
str3 = 'ๅ่ก'
elif id == 'c_84' or id == 'c_87':
str1 = '็ฑปๅ'
str3 = 'ๅบๅ'
else:
str1 = '็ฑปๅ'
str3 = 'ๆถ้ด'
dialog = xbmcgui.Dialog()
if len(genrelist)>0:
list = [x[1] for x in genrelist]
sel = dialog.select(str1, list)
if sel != -1:
genre = genrelist[sel][0]
change = True
if len(arealist)>0:
list = [x[1] for x in arealist]
sel = dialog.select('ๅฐๅบ', list)
if sel != -1:
area = arealist[sel][0]
change = True
if len(yearlist)>0:
list = [x[1] for x in yearlist]
sel = dialog.select(str3, list)
if sel != -1:
year = yearlist[sel][0]
change = True
list = [x[1] for x in DAYS_LIST1]
sel = dialog.select('่ๅด', list)
if sel != -1:
days = DAYS_LIST1[sel][0]
change = True
list = [x[1] for x in ORDER_LIST1]
sel = dialog.select('ๆๅบ', list)
if sel != -1:
order = ORDER_LIST1[sel][0]
change = True
if change:
progList(name,id,'1',genre,area,year,order,days)
def performChanges2(name,id,listpage,genre,order,days):
genrelist = getList2(listpage, genre)
change = False
dialog = xbmcgui.Dialog()
if len(genrelist)>0:
list = [x[1] for x in genrelist]
sel = dialog.select('็ฑปๅ', list)
if sel != -1:
genre = genrelist[sel][0]
change = True
list = [x[1] for x in DAYS_LIST2]
sel = dialog.select('่ๅด', list)
if sel != -1:
days = DAYS_LIST2[sel][0]
change = True
list = [x[1] for x in ORDER_LIST2]
sel = dialog.select('ๆๅบ', list)
if sel != -1:
order = ORDER_LIST2[sel][0]
change = True
if change:
progList2(name,id,'1',genre,order,days)
def get_params():
param = []
paramstring = sys.argv[2]
if len(paramstring) >= 2:
params = sys.argv[2]
cleanedparams = params.replace('?', '')
if (params[len(params) - 1] == '/'):
params = params[0:len(params) - 2]
pairsofparams = cleanedparams.split('&')
param = {}
for i in range(len(pairsofparams)):
splitparams = {}
splitparams = pairsofparams[i].split('=')
if (len(splitparams)) == 2:
param[splitparams[0]] = splitparams[1]
return param
params = get_params()
mode = None
name = ''
id = ''
genre = ''
area = ''
year = ''
order = ''
page = '1'
url = None
thumb = None
try:
thumb = urllib.unquote_plus(params["thumb"])
except:
pass
try:
url = urllib.unquote_plus(params["url"])
except:
pass
try:
page = urllib.unquote_plus(params["page"])
except:
pass
try:
order = urllib.unquote_plus(params["order"])
except:
pass
try:
days = urllib.unquote_plus(params["days"])
except:
pass
try:
year = urllib.unquote_plus(params["year"])
except:
pass
try:
area = urllib.unquote_plus(params["area"])
except:
pass
try:
genre = urllib.unquote_plus(params["genre"])
except:
pass
try:
id = urllib.unquote_plus(params["id"])
except:
pass
try:
name = urllib.unquote_plus(params["name"])
except:
pass
try:
mode = int(params["mode"])
except:
pass
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
if mode == None:
rootList()
elif mode == 1:
progList(name,id,page,genre,area,year,order,days)
elif mode == 2:
getMovie(name,id,thumb)
elif mode == 3:
seriesList(name,id,thumb)
elif mode == 4:
performChanges(name,id,page,genre,area,year,order,days)
elif mode == 10:
PlayVideo(name,id,thumb)
elif mode == 11:
progList2(name,id,page,genre,order,days)
elif mode == 12:
performChanges2(name,id,page,genre,order,days)
|
gpl-2.0
| -6,283,709,938,491,380,000 | 38.605452 | 326 | 0.545481 | false |
dani-i/bachelor-project
|
session/test_session.py
|
1
|
5751
|
from controllers.data_set_controller import DataSetController
from controllers.session_controller import SessionController
from utils.train.session_details import SessionDetails
import constants.cnn_constants as cnn_const
import neural_networks.cnn as cnn
import tensorflow as tf
import threading
import math
class TestSession(threading.Thread):
def __init__(self,
session_details: SessionDetails,
test_data_set_path: str,
automated_test_session,
testing_finished):
super(TestSession, self).__init__()
self._test_data_set_path = test_data_set_path
self._testing_finished = testing_finished
self._session_details = session_details
self._GUI = automated_test_session
self._confusion_matrix = [
[
0
for _ in range(self._session_details.number_of_classes)
]
for _ in range(self._session_details.number_of_classes)
]
self._cnn = cnn.CNN(
session_details=session_details
)
self.stop_testing = False
#########################################################################
# Auxiliary methods
def _test(self):
with tf.Graph().as_default() as graph:
self._test_data_set_details = DataSetController.read_main_data_set_file(
file_path=self._test_data_set_path
)
labels, images = SessionController.get_examples_for_test_session(
test_data_set_details=self._test_data_set_details,
session_details=self._session_details
)
logits = self._cnn.create_cnn_model(images)
top_k_op = tf.nn.top_k(logits, 1)
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(
logdir=self._session_details.checkpoints_directory,
graph=graph
)
variable_averages = tf.train.ExponentialMovingAverage(
cnn_const.CNN_MOVING_AVERAGE_DECAY
)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
writer = tf.summary.FileWriter(
logdir=self._session_details.checkpoints_directory
)
with tf.Session() as sess:
checkpoint = tf.train.get_checkpoint_state(
checkpoint_dir=self._session_details.checkpoints_directory
)
if checkpoint and checkpoint.model_checkpoint_path:
saver.restore(sess, checkpoint.model_checkpoint_path)
global_step = checkpoint.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found')
return
coord = tf.train.Coordinator()
threads = []
try:
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(
qr.create_threads(
sess,
coord=coord,
daemon=True,
start=True
)
)
num_iter = int(
math.ceil(
self._test_data_set_details.number_of_examples
/ self._session_details.examples_per_batch
)
)
left_to_check = \
self._test_data_set_details.number_of_examples
step = 0
while step < num_iter and not coord.should_stop():
predictions, label, img = sess.run(
[
top_k_op,
labels,
images
]
)
for index in range(len(label)):
b = predictions.indices[index][0]
a = label[index]
self._confusion_matrix[a][b] \
= self._confusion_matrix[a][b] \
+ 1
left_to_check -= 1
if left_to_check == 0:
break
step += 1
percent = int(step / num_iter * 100)
self._GUI.update_test_progress(
progress=percent
)
if not self.stop_testing:
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary_writer.add_summary(summary, global_step)
except Exception as e:
coord.request_stop(e)
writer.close()
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
writer.close()
#########################################################################
# Public methods
def run(self):
self._test()
self._GUI.confusion_matrix_update_method(
confusion_matrix=self._confusion_matrix
)
self._testing_finished()
def start_test(self):
self.start()
#########################################################################
|
apache-2.0
| 4,908,201,968,194,576,000 | 30.255435 | 96 | 0.449661 | false |
quaddra/engage
|
python_pkg/engage/drivers/standard/postgres_database__9_x/driver.py
|
1
|
5516
|
"""Resource manager for postgres-database 9.x
"""
# Common stdlib imports
import sys
import os
import os.path
## import commands
# fix path if necessary (if running from source or running as test)
try:
import engage.utils
except:
sys.exc_clear()
dir_to_add_to_python_path = os.path.abspath((os.path.join(os.path.dirname(__file__), "../../../..")))
sys.path.append(dir_to_add_to_python_path)
import engage_utils.process as procutils
import engage.drivers.resource_manager as resource_manager
import engage.drivers.utils
# Drivers compose *actions* to implement their methods.
from engage.drivers.action import *
from engage.drivers.action import _check_file_exists
# setup errors
from engage.utils.user_error import UserError, EngageErrInf
import gettext
_ = gettext.gettext
errors = { }
def define_error(error_code, msg):
global errors
error_info = EngageErrInf(__name__, error_code, msg)
errors[error_info.error_code] = error_info
# error codes
# FILL IN
ERR_BAD_USER = 1
define_error(ERR_BAD_USER,
_("Install must be running as database user %(db_user)s, was running as %(user)s"))
# setup logging
from engage.utils.log_setup import setup_engage_logger
logger = setup_engage_logger(__name__)
# this is used by the package manager to locate the packages.json
# file associated with the driver
def get_packages_filename():
return engage.drivers.utils.get_packages_filename(__file__)
@make_value_action
def is_database_installed(self, psql_exe, database_name, database_user):
_check_file_exists(psql_exe, self)
rc = procutils.run_and_log_program([psql_exe, '-d', database_name,
'-U', database_user, '-c', r'\d'], None,
self.ctx.logger, os.path.dirname(psql_exe))
return rc==0
def make_context(resource_json, sudo_password_fn, dry_run=False):
"""Create a Context object (defined in engage.utils.action). This contains
the resource's metadata in ctx.props, references to the logger and sudo
password function, and various helper functions. The context object is used
by individual actions.
If your resource does not need the sudo password, you can just pass in
None for sudo_password_fn.
"""
ctx = Context(resource_json, logger, __file__,
sudo_password_fn=sudo_password_fn,
dry_run=dry_run)
ctx.check_port('config_port',
database_name=unicode,
create_schema_script=unicode)
ctx.check_port('input_ports.postgres',
psql_exe=unicode,
pg_ctl_exe=unicode,
createdb_exe=unicode,
createuser_exe=unicode,
initdb_exe=unicode)
ctx.check_port('input_ports.postgres_inst',
database_dir=unicode,
user=unicode)
# add any extra computed properties here using the ctx.add() method.
return ctx
#
# Now, define the main resource manager class for the driver.
# If this driver is a service, inherit from service_manager.Manager.
# If the driver is just a resource, it should inherit from
# resource_manager.Manager. If you need the sudo password, add
# PasswordRepoMixin to the inheritance list.
#
class Manager(resource_manager.Manager):
# Uncomment the line below if this driver needs root access
## REQUIRES_ROOT_ACCESS = True
def __init__(self, metadata, dry_run=False):
package_name = "%s %s" % (metadata.key["name"],
metadata.key["version"])
resource_manager.Manager.__init__(self, metadata, package_name)
self.ctx = make_context(metadata.to_json(),
None, # self._get_sudo_password,
dry_run=dry_run)
def validate_pre_install(self):
p = self.ctx.props
user = getpass.getuser()
if user != p.input_ports.postgres_inst.user:
raise UserError(errors[ERR_BAD_USER],
msg_args={'user':user,
'db_user':p.input_ports.postgres_inst.user})
if p.config_port.create_schema_script!='':
self.ctx.r(check_file_exists, p.config_port.create_schema_script)
def is_installed(self):
p = self.ctx.props
return self.ctx.rv(is_database_installed, p.input_ports.postgres.psql_exe,
p.config_port.database_name,
p.input_ports.postgres_inst.user)
def install(self, package):
p = self.ctx.props
r = self.ctx.r
r(run_program, [p.input_ports.postgres.createdb_exe,
p.config_port.database_name],
cwd=p.input_ports.postgres_inst.database_dir)
if p.config_port.create_schema_script!='':
logger.info('Will run %s to create schema for %s' %
(p.config_port.create_schema_script,
p.config_port.database_name))
r(run_program,
[p.input_ports.postgres.psql_exe,
'-d', p.config_port.database_name,
'-f', p.config_port.create_schema_script],
cwd=p.input_ports.postgres_inst.database_dir)
else:
logger.info("No create schema script specified for %s" %
p.config_port.database_name)
def validate_post_install(self):
assert self.is_installed()
|
apache-2.0
| -8,512,102,885,461,848,000 | 35.529801 | 105 | 0.613851 | false |
Teino1978-Corp/Teino1978-Corp-light_.gitignore
|
light_data_migrations_0008_fi_comparativo.py
|
1
|
1169
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('master', '0019_periodo_tipoperiodo'),
('data', '0007_auto_20150216_0921'),
]
operations = [
migrations.CreateModel(
name='FI_Comparativo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sem_precio', models.FloatField()),
('mes_precio', models.FloatField()),
('sem_local', models.FloatField()),
('mes_local', models.FloatField()),
('sem_frecuencia', models.FloatField()),
('mes_frecuencia', models.FloatField()),
('sem_ticket', models.FloatField()),
('mes_ticket', models.FloatField()),
('oficina', models.ForeignKey(to='master.OficinaVentas')),
('periodo', models.ForeignKey(to='master.Periodo')),
],
options={
},
bases=(models.Model,),
),
]
|
mit
| -5,811,489,867,645,398,000 | 33.382353 | 114 | 0.521814 | false |
googleapis/python-talent
|
setup.py
|
1
|
2740
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import setuptools
# Package metadata.
name = "google-cloud-talent"
description = "Google Cloud Talent Solution API client library"
version = "2.1.0"
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
# 'Development Status :: 5 - Production/Stable'
release_status = "Development Status :: 4 - Beta"
dependencies = [
"google-api-core[grpc] >= 1.26.0, <2.0.0dev",
"proto-plus >= 1.15.0",
"packaging >= 14.3",
]
extras = {"libcst": "libcst >= 0.2.5"}
# Setup boilerplate below this line.
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.rst")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
# Only include packages under the 'google' namespace. Do not include tests,
# benchmarks, etc.
packages = [
package
for package in setuptools.PEP420PackageFinder.find()
if package.startswith("google")
]
# Determine which namespaces are needed.
namespaces = ["google"]
if "google.cloud" in packages:
namespaces.append("google.cloud")
setuptools.setup(
name=name,
version=version,
description=description,
long_description=readme,
author="Google LLC",
author_email="googleapis-packages@google.com",
license="Apache 2.0",
url="https://github.com/googleapis/python-talent",
classifiers=[
release_status,
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
"Topic :: Internet",
],
platforms="Posix; MacOS X; Windows",
packages=packages,
namespace_packages=namespaces,
install_requires=dependencies,
extras_require=extras,
python_requires=">=3.6",
scripts=["scripts/fixup_talent_v4beta1_keywords.py"],
include_package_data=True,
zip_safe=False,
)
|
apache-2.0
| -6,588,985,584,347,073,000 | 29.10989 | 75 | 0.684307 | false |
cria/microSICol
|
py/modules/labels.py
|
1
|
38937
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
This file contains a mapping from the labels found in the html
templates to translatable strings.
'''
label_dict = {
"label_Container": _("Container"),
"label_Rep_Division": _("Rep Division"),
#taxa
"label_Species_General_Comments": _("Comments"),
"label_Species_General_Hazard_Group_Comments": _("Comments"),
"label_Species_General_Author": _("Author"),
"label_Species_General_Species": _("singular|Species"),
"label_Species_General_Classification": _("Classification"),
"label_Species_General_Taxonomical_References": _("Taxonomic References"),
"label_Species_General_Synonym": _("Synonym"),
"label_Species_General_Hazard_Group": _("Hazard_Group"),
"label_Species_General_Reference": _("Reference"),
"label_Species_General_Alternative_State": _("Alternative State"),
"label_Species_General_Alternative_State_Type": _("Type"),
"label_Species_General_Scientific_Name": _("Scientific Name"),
"label_Species_General_Higher_Taxa": _("Higher Taxa"),
"label_Species_General_Taxonomic_Group": _("Taxonomic Group"),
#strains
"label_Strains_General_Code": _("Code"),
"label_Strains_General_Comments": _("Comments"),
"label_Strains_General_Species": _("singular|Species"),
"label_Strains_General_Division": _("Division"),
"label_Strains_General_Numeric_Code": _("Number"),
"label_Strains_General_Autonum": _("Auto Numeration"),
"label_Strains_General_Origin_Code" : _("Origin Code"),
"label_Strains_General_Taxonomic_Complement" : _("Taxonomic Complement"),
"label_Strains_General_Type" : _("Type"),
"label_Strains_General_Status" : _("Status"),
"label_Strains_General_Is_OGM" : _("OGM"),
"label_Strains_General_History" : _("History"),
"label_Strains_General_Codes_In_Other_Collections" : _("Codes in Other Collections"),
"label_Strains_Origin_Comments": _("Comments"),
"label_Strains_Origin_Origin_Place" : _("Origin Place"),
"label_Strains_Origin_Date" : _("Date"),
"label_Strains_Origin_Collector" : _("Collector"),
"label_Strains_Origin_Collector_Person" : _("Person"),
"label_Strains_Origin_Collector_Institution" : _("Institution"),
"label_Strains_Origin_Country" : _("Country"),
"label_Strains_Origin_State" : _("State"),
"label_Strains_Origin_City" : _("City"),
"label_Strains_Origin_Place" : _("Place"),
"label_Strains_Origin_GPS": _("GPS"),
"label_Strains_Origin_GPS_Latitude": _("Latitude"),
"label_Strains_Origin_GPS_Longitude" : _("Longitude"),
"label_Strains_Origin_GPS_Precision" : _("Precision"),
"label_Strains_Origin_GPS_Datum" : _("Datum"),
"label_Strains_Origin_GPS_Comments" : _("Comments"),
"label_Strains_Origin_Precision_Meters" :_("(m)"),
"label_Strains_Origin_Host": _("HOST"),
"label_Strains_Origin_Host_Common_Name": _("Common Name"),
"label_Strains_Origin_Host_Genus" : _("Genus"),
"label_Strains_Origin_Host_Infraespecific_Name": _("Infraespecific Name"),
"label_Strains_Origin_Host_Complement_Infraespecific": _("Taxonomic Complement"),
"label_Strains_Origin_Host_Level" : _("Level"),
"label_Strains_Origin_International_Code" : _("International Code"),
"label_Strains_Origin_Substratum" : _("Substratum"),
"label_Strains_Origin_Clinic_Form" : _("Clinic Form"),
"label_Strains_Origin_HIV" : _("HIV"),
"label_Strains_Origin_Specific_Epithet" : _("Specific Epithet"),
"label_Strains_Isolation_Comments": _("Comments"),
"label_Strains_Isolation_Date" : _("Date"),
"label_Strains_Isolation_Isolated_By" : _("Isolated by"),
"label_Strains_Isolation_Person" : _("Person"),
"label_Strains_Isolation_Institution" : _("Institution"),
"label_Strains_Isolation_Isolated_From": _("Isolated from"),
"label_Strains_Isolation_Method" : _("Method"),
"label_Strains_Identification_Comments": _("Comments"),
"label_Strains_Identification_Identified_As_Species" : _("Identified as"),
"label_Strains_Identification_Identified_Genus" : _("Genus"),
"label_Strains_Identification_Identified_Specie" : _("Specie"),
"label_Strains_Identification_Identified_Infraespecific_Name": _("Infraespecific Name"),
"label_Strains_Identification_Identified_Complement_Infraespecific": _("Taxonomic Complement"),
"label_Strains_Identification_Identified_Level" : _("Level"),
"label_Strains_Identification_Date" : _("Date"),
"label_Strains_Identification_Identified_By" : _("Identified by"),
"label_Strains_Identification_Person" : _("Person"),
"label_Strains_Identification_Institution" : _("Institution"),
"label_Strains_Identification_Method" : _("Method"),
"label_Strains_Deposit_Comments": _("Comments"),
"label_Strains_Deposit_Deposited_By_Person" : _("Person"),
"label_Strains_Deposit_Deposited_By_Institution" : _("Institution"),
"label_Strains_Deposit_Deposited_As_Species" : _("Deposited as"),
"label_Strains_Deposit_Deposited_By" : _("Deposited by"),
"label_Strains_Deposit_Authentication": _("Authentication"),
"label_Strains_Deposit_Authentication_Date": _("Date"),
"label_Strains_Deposit_Authentication_Person": _("Responsible"),
"label_Strains_Deposit_Authentication_Result": _("Result"),
"label_Strains_Deposit_Deposited_Genus" : _("Genus"),
"label_Strains_Deposit_Deposited_Infraespecific_Name": _("Infraespecific Name"),
"label_Strains_Deposit_Deposited_Complement_Infraespecific": _("Taxonomic Complement"),
"label_Strains_Deposit_Deposited_Level" : _("Level"),
"label_Strains_Deposit_Date" : _("Date"),
"label_Strains_Deposit_Reason" : _("Deposit Type"),
"label_Strains_Deposit_Recommended_Preservation_Method" : _("Recommended Preservation Method"),
"label_Strains_Deposit_Culture_Was_Sent" : _("How Culture was sent"),
"label_Strains_Deposit_Specific_Epithet" : _("Specific Epithet"),
"label_Strains_Growth_Comments": _("Comments"),
"label_Strains_Growth_Recommended_Culture_Medium" : _("Recommended Culture Medium"),
"label_Strains_Growth_Recommended_Temperature" : _("Recommended Temperature"),
"label_Strains_Growth_Incubation_Time" : _("Incubation Time"),
"label_Strains_Growth_Ph" : _("PH"),
"label_Strains_Growth_Oxygen_Requirements" : _("Oxygen Requirements"),
"label_Strains_Characteristics_Biochemical" : _("Biochemical"),
"label_Strains_Characteristics_Molecular" : _("Molecular"),
"label_Strains_Characteristics_Immunologic" : _("Immunological"),
"label_Strains_Characteristics_Morphologic" : _("Morphological"),
"label_Strains_Characteristics_Pathogenic" : _("Pathogenic"),
"label_Strains_Characteristics_Genotypic" : _("Genotypic"),
"label_Strains_Characteristics_OGM" : _("OGM"),
"label_Strains_Characteristics_OGM_Group" : _("Group"),
"label_Strains_Characteristics_OGM_Comments" : _("Comments"),
"label_Strains_Characteristics_Biological_Risk_Comments" : _("Biological Risk Comments"),
"label_Strains_Characteristics_Restrictions" : _("Restrictions"),
"label_Strains_Characteristics_Pictures" : _("Pictures"),
"label_Strains_Characteristics_URL" : _("References (links etc.)"),
"label_Strains_Characteristics_Additional_Notes_to_Catalog" : _("Additional Notes to Catalog"),
"label_Strains_Properties_Properties" : _("Properties"),
"label_Strains_Properties_Applications" : _("Applications"),
"label_Strains_Properties_URL" : _("References (links etc.)"),
"label_Strains_QC_Purity": _("Purity"),
"label_Strains_QC_Counting": _("Counting"),
"label_Strains_QC_Counting_Unity": _("UFC/mL"),
"label_Strains_QC_Not_Apply": _("Does not apply"),
"label_Strains_QC_Date" : _("Date"),
"label_Strains_QC_Lot": _("Lot"),
"label_Strains_QC_New_Test": _("Add new test"),
"label_Strains_QC_Used_Test": _("Used Test"),
"label_Strains_QC_Result": _("Result"),
"label_Strains_QC_Observations": _("Observations"),
"label_Strains_QC_Responsible": _("Responsible"),
"label_Strains_QC_Minimize": _("Minimize"),
"label_Strains_QC_Remove_Test": _("Remove test"),
"label_Strains_Stock_Lot_Number": _("Lot"),
"label_Strains_Stock_Minimum": _("Minimum Stock"),
"label_Strains_Stock_Preservation_Method": _("Preservation Method"),
"label_Strains_Stock_Total_Ampoules" : _("Total of available ampoules"),
"label_Strains_Stock_In_Stock": _("In Stock"),
"label_Strains_Security_Checkbox_Go_Catalog": _("This strain goes to catalog"),
#institutions
"label_Institutions_General_Code1": _("Code1"),
"label_Institutions_General_Code2": _("Code2"),
"label_Institutions_General_Code3": _("Code3"),
"label_Institutions_General_Locator": _("Locator"),
"label_Institutions_General_Name": _("Name"),
"label_Institutions_General_Address": _("Address"),
"label_Institutions_General_Phone": _("Phones/Fax"),
"label_Institutions_General_Email": _("Email"),
"label_Institutions_General_Website": _("Website"),
"label_Institutions_General_Comments": _("Comments"),
"label_Institutions_General_Complement": _("Complement"),
"label_Institutions_Security_Checkbox_Go_Catalog": _("This institution goes to catalog"),
#people
"label_People_Security_Checkbox_Go_Catalog": _("This people goes to catalog"),
"label_People_General_Nickname": _("NickName"),
"label_People_General_Name": _("Name"),
"label_People_General_Phone": _("Phones/Fax"),
"label_People_General_Comments": _("Comments"),
"label_People_General_Institution": _("Institution"),
"label_People_General_Personal_Address": _("Personal Address"),
"label_People_General_Personal_Email": _("Personal E-mail"),
#documents
"label_Documents_General_Code": _("Code"),
"label_Documents_General_Qualifier": _("Qualifier"),
"label_Documents_Gereral_Test_Category": _("Category"),
"label_Documents_General_Title": _("Title"),
"label_Documents_General_Description": _("Description"),
"label_Documents_General_File_Name": _("File Name"),
"label_Documents_General_Update_Insert_File" : _("Update/Insert File"),
"label_Documents_Security_Checkbox_Go_Catalog": _("This document goes to catalog"),
#references
"label_References_General_Code": _("Code"),
"label_References_General_Title": _("Title"),
"label_References_General_Comments": _("Comments"),
"label_References_General_Year":_("Year"),
"label_References_General_URL": _("URL"),
"label_References_General_Author": _("Author"),
"label_References_Security_Checkbox_Go_Catalog": _("This reference goes to catalog"),
#preservation
"label_Preservation_General_Strain_Code": _("Code"),
"label_Preservation_General_Responsible": _("Responsible"),
"label_Preservation_General_Lot_Number": _("Lot"),
"label_Preservation_General_Origin_Lot_Number": _("Lot"),
"label_Preservation_Strain_Origin_Position": _("Original Position"),
"label_Preservation_General_Used_Method": _("Used Method"),
"label_Preservation_Strain_Stock_Minimum_Abreviation": _("Min."),
"label_Preservation_Strain_Type": _("Preservation Type"),
"label_Preservation_General_Process_Data": _("Process Data"),
"label_Preservation_Strain_Macro_Characs": _("Macroscopic Characteristics"),
"label_Preservation_Strain_Micro_Characs": _("Microscopic Characteristics"),
"label_Preservation_Strain_Result": _("Results"),
"label_Preservation_Strain_Obs": _("Observations"),
"label_Preservation_Strain_Strain": _("Strain"),
"label_Preservation_Strain_Origin": _("Origin"),
"label_Preservation_Strain_Origin_Fieldset": _("Origin"),
"label_Preservation_Strain_Purity": _("Purity"),
"label_Preservation_Strain_Purity_Fieldset": _("Purity"),
"label_Preservation_Strain_Stock_Position": _("Stock Position"),
"label_Preservation_General_Preservation_Method_Fieldset": _("Preservation Method"),
"label_Preservation_Strain_Cryoprotector": _("Cryoprotector"),
"label_Preservation_Strain_Counting": _("Counting"),
"label_Preservation_Strain_Counting_Unity": _("UFC/mL"),
"label_Preservation_Strain_Not_Apply": _("Does not apply"),
"label_Preservation_Strain_Culture_Conditions" : _("Culture Conditions"),
"label_Preservation_Strain_Culture_Medium" : _("Culture Medium"),
"label_Preservation_Strain_Temperature" : _("Temperature"),
"label_Preservation_Strain_Decrease_Stock": _("Decrease Stock"),
"label_Preservation_Strain_Stock_Limit": _("Minimum Stock"),
"label_Preservation_General_Date" : _("Date"),
"label_Preservation_Strain_Incubation_Time" : _("Incubation Time"),
"label_Preservation_Strain_In_Stock": _("In Stock"),
"label_Preservation_General_Lot": _("Lot"),
"label_Preservation_Strain_Original_Location": _("Original Location"),
"label_Preservation_Strain_Maximize": _("Maximize"),
"label_Preservation_Strain_Minimize": _("Minimize"),
"label_Preservation_Strain_Remove_Strain": _("remove Strain"),
"label_Preservation_Strain_Add_New_Strain": _("add new Strain"),
#distribution
"label_Distribution_General_Responsible": _("Responsible"),
"label_Distribution_General_Lot_Number": _("Lot"),
"label_Distribution_General_Quantity": _("Qtty."),
"label_Distribution_General_Reason": _("Reason"),
"label_Distribution_General_Institution": _("Institution"),
"label_Distribution_General_Person": _("Person"),
"label_Distribution_General_Strain": _("Strain"),
"label_Distribution_General_Strain": _("Strain"),
"label_Distribution_General_Institution": _("Institution"),
"label_Distribution_General_Date" : _("Date"),
#configuration
"label_Configuration_Group_User": _("User"),
"label_Configuration_Users_Name": _("Name"),
"label_Configuration_Users_Login": _("Login"),
"label_Configuration_Users_Password": _("Password"),
"label_Configuration_Users_Password_Quality": _("Password quality"),
"label_Configuration_Users_Confirm_Password": _("Confirm password"),
"label_Configuration_Users_Comments": _("Comments"),
"label_Configuration_Users_Roles": _("Groups"),
"label_Configuration_Users_Create_Opt": _("Create"),
"label_Configuration_Users_Delete_Opt": _("Delete"),
"label_Configuration_Coll_Base":_("Base"),
"label_Configuration_Coll_Code":_("Code"),
"label_Configuration_Coll_Name":_("Name"),
"label_Configuration_Coll_Logo":_("Logo"),
"label_Configuration_Coll_Logo_Size":_("(130x30)"),
"label_Configuration_Division_Division": _("Division"),
"label_Configuration_Division_Pattern": _("Pattern"),
"label_Configuration_Role_Name": _("Name"),
"label_Configuration_Role_Description": _("Description"),
"label_Configuration_Role_Type": _("Type"),
"label_Configuration_Role_Modules": _("Modules"),
"label_Configuration_Role_Group": _("Group"),
"label_Configuration_Role_Level": _("Level"),
"label_Configuration_Role_Group_Members": _("Group Members"),
"label_Configuration_Subcoll_Coll":_("Collection"),
"label_Configuration_Subcoll_Name":_("Name"),
"label_Configuration_Subcoll_Code":_("Code"),
"label_Configuration_Subcoll_DateInput":_("Date Input Mask"),
"label_Configuration_Subcoll_DateOutput":_("Date Output Mask"),
"label_Configuration_Subcoll_Data_Lang": _("Input Language"),
"label_Configuration_Subcoll_Data_Lang_Associates": _("Associates"),
"label_Configuration_Subcoll_Data_Lang_Not_Associates": _("Not Associates"),
"label_Configuration_Subcoll_To_Associate_Data_Lang": _("To Associate"),
"label_Configuration_Subcoll_To_Dissociate_Data_Lang": _("To Dissociate"),
"label_Configuration_Subcoll_Move_Up_Data_Lang": _("Move Up"),
"label_Configuration_Subcoll_Move_Down_Data_Lang": _("Move Down"),
"label_Configuration_Subcoll_Lang":_("Language"),
"label_Configuration_Subcoll_Day":_("Day"),
"label_Configuration_Subcoll_Month":_("Month"),
"label_Configuration_Subcoll_Year":_("Year"),
"label_Configuration_Combos_Dep_Reason_Associates": _("Associates"),
"label_Configuration_Combos_Dep_Reason_Not_Associates": _("Not Associates"),
"label_Configuration_Combos_Move_Up_Dep_Reason": _("Move Up"),
"label_Configuration_Combos_Move_Down_Dep_Reason": _("Move Down"),
"label_Configuration_Combos_Str_Type": _("Strain - General - Type"),
"label_Configuration_Combos_Str_Type_Associates": _("Associates"),
"label_Configuration_Combos_Str_Type_Not_Associates": _("Not Associates"),
"label_Configuration_Combos_To_Associate_Str_Type": _("To Associate"),
"label_Configuration_Combos_To_Dissociate_Str_Type": _("To Dissociate"),
"label_Configuration_Combos_Move_Up_Str_Type": _("Move Up"),
"label_Configuration_Combos_Move_Down_Str_Type": _("Move Down"),
"label_Configuration_Combos_New_Item": _("New Item"),
"label_Configuration_Combos_Test_Group": _("Document - Test - Group"),
"label_Configuration_Combos_Test_Group_Associates": _("Associates"),
"label_Configuration_Combos_Test_Group_Not_Associates": _("Not Associates"),
"label_Configuration_Combos_To_Associate_Test_Group": _("To Associate"),
"label_Configuration_Combos_To_Dissociate_Test_Group": _("To Dissociate"),
"label_Configuration_Combos_Move_Up_Test_Group": _("Move Up"),
"label_Configuration_Combos_Move_Down_Test_Group": _("Move Down"),
"label_Configuration_Combos_Preservation_Method": _("Preservation Method / Unit of Measure"),
"label_Configuration_Combos_Preservation_Method_Associates": _("Associates"),
"label_Configuration_Combos_Preservation_Method_Not_Associates": _("Not Associates"),
"label_Configuration_Combos_To_Associate_Preservation_Method": _("To Associate"),
"label_Configuration_Combos_To_Dissociate_Preservation_Method": _("To Dissociate"),
"label_Configuration_Combos_Move_Up_Preservation_Method": _("Move Up"),
"label_Configuration_Combos_Move_Down_Preservation_Method": _("Move Down"),
"label_Configuration_Combos_Taxon_Group": _("Taxon Group"),
"label_Configuration_Combos_Taxon_Group_Associates": _("Associates"),
"label_Configuration_Combos_Taxon_Group_Not_Associates": _("Not Associates"),
"label_Configuration_Combos_To_Associate_Taxon_Group": _("To Associate"),
"label_Configuration_Combos_To_Dissociate_Taxon_Group": _("To Dissociate"),
"label_Configuration_Combos_Move_Up_Taxon_Group": _("Move Up"),
"label_Configuration_Combos_Move_Down_Taxon_Group": _("Move Down"),
"label_Configuration_Databases_Data": _("Databases Data"),
"label_Configuration_Databases_Data_Dbms":_("Data DBMS"),
"label_Configuration_Databases_Data_Host":_("Data Host"),
"label_Configuration_Databases_Data_Port":_("Data Port"),
"label_Configuration_Databases_Data_Name":_("Data Name"),
"label_Configuration_Databases_Data_User":_("Data User"),
"label_Configuration_Databases_Data_Pwd":_("Data Password"),
"label_Configuration_Databases_Tracebility": _("Database Tracebility"),
"label_Configuration_Databases_Tracebility_Dbms":_("Tracebility DBMS"),
"label_Configuration_Databases_Tracebility_Host":_("Tracebility Host"),
"label_Configuration_Databases_Tracebility_Port":_("Tracebility Port"),
"label_Configuration_Databases_Tracebility_Name":_("Tracebility Name"),
"label_Configuration_Databases_Tracebility_User":_("Tracebility User"),
"label_Configuration_Databases_Tracebility_Pwd":_("Tracebility Password"),
"label_Configuration_Config_IndexURL":_("Index URL"),
"label_Configuration_Config_RootDir":_("Root Directory"),
"label_Configuration_Config_StartPage":_("Start Page"),
"label_Configuration_Config_DateInput":_("Date Input Mask"),
"label_Configuration_Config_DateOutput":_("Date Output Mask"),
"label_Configuration_Config_Data_Lang": _("Input Language"),
"label_Configuration_Config_Data_Lang_Associates": _("Associates"),
"label_Configuration_Config_Data_Lang_Not_Associates": _("Not Associates"),
"label_Configuration_Config_To_Associate_Data_Lang": _("To Associate"),
"label_Configuration_Config_To_Dissociate_Data_Lang": _("To Dissociate"),
"label_Configuration_Config_Move_Up_Data_Lang": _("Move Up"),
"label_Configuration_Config_Move_Down_Data_Lang": _("Move Down"),
"label_Configuration_Config_Lang":_("Language"),
"label_Configuration_Config_Max_Upload_Size": _("Maximum upload size (bytes)"),
"label_Configuration_Config_Upload_Blank": _("Leave blank to disable size limit"),
"label_Configuration_Users_Subcollection": _("Subcollection"),
"label_Configuration_Division_Subcollection": _("Subcollection"),
"label_Configuration_Templates_Subcollection": _("Subcollection"),
"label_Configuration_Templates_select_subcoll": _("Select a Subcoll"),
#utilities - Stock Movement
"label_Traceability_Datetime": _("Date/Time"),
"label_Traceability_Datetime_To": _("to"),
"label_Traceability_User": _("User"),
"label_Traceability_Operation": _("Operation"),
"label_Traceability_Record": _("Record"),
"label_Traceability_Search": _("Search"),
"label_Traceability_Reset_Search": _("Reset Search"),
"label_Traceability_Strain_Code": _("Strain Code"),
"label_Traceability_Lot": _("Lot"),
"label_Traceability_Field": _("Field"),
"label_Traceability_Value": _("Value"),
"label_Traceability_Datetime": _("Date/Time"),
"label_Traceability_Datetime_To": _("to"),
"label_Traceability_User": _("User"),
"label_Traceability_Operation": _("Operation"),
"label_Traceability_Record": _("Record"),
"label_Traceability_Search": _("Search"),
"label_Traceability_Strain_Code": _("Strain Code"),
"label_Traceability_Lot": _("Lot"),
"label_Traceability_Field": _("Field"),
"label_Traceability_Value": _("Value"),
"label_Stock_Movement_Date": _("Date"),
"label_Stock_Movement_Description": _("Description"),
"label_Stock_Movement_Preservation_Method": _("Preservation Method"),
"label_Stock_Movement_From": _("Stock Movement From"),
"label_Stock_Movement_To": _("Stock Movement To"),
"label_Stock_Movement_Positions": _("Stock Movement Positions"),
#utilities - Container
"label_Container_Abbreviation": _("Abbreviation"),
"label_Container_Description": _("Description"),
"label_Container_Preservation_Method": _("Preservation Methods"),
"label_Container_Structure": _("Structure"),
"label_Container_New": _("New"),
"label_Container_Del": _("Del"),
"label_Container_Copy": _("Copy"),
"label_Container_Paste": _("Paste"),
#preferences
"label_Preferences_Account_User_Password": _("Leave blank to maintain previous password."),
"label_Preferences_Account": _("Account"),
"label_Preferences_Language": _("Language"),
"label_Preferences_User_Lines_Per_Page": _("Lines shown per page"),
"label_Preferences_User_Max_Num_Pages":_("Maximum number of pages"),
"label_Preferences_User_Show_Strain_Inactives": _("Show inactive strains"),
"label_Preferences_User_Name": _("Name"),
"label_Preferences_User_Login": _("Login"),
"label_Preferences_User_Password": _("Password"),
"label_Preferences_User_Confirm_Password": _("Confirm password"),
"label_Preferences_User_Roles": _("Groups"),
#login
"label_Login_User": _("User"),
"label_Login_Password": _("Password"),
"label_Login_Type_User_And_Password": _("Type your name user and password to enter."),
"label_Login_Type_User": _("Type your user name"),
"label_Login_Type_Password": _("Type your password"),
"label_Login_Enter": _("Enter"),
#logout
"label_Logout_Successfull_Title": _("Logged Out Successfully"),
"label_Logout_Successfull_Phrase": _("You have logged out correctly."),
"label_Logout_Login_Again": _("Login again"),
#subcollection
"label_Subcollection_Choose_Subcollection": _("Choose the subcolletion"),
#form.save
"label_Unknown_Error": _("An unknown error has occurred during operation."),
"label_Error_Contact": _("Contact the System Administrator and inform him the following message:"),
"label_Return_Error": _("Return to form and try again"),
"label_Operation_Error": _("Operation Error"),
#tests
"label_Javascript_Error": _("JavaScript Error"),
"label_JS_Error_Msg1": _("Your browser does not have JavaScript support or it is disabled."),
"label_JS_Error_Msg2": _("This feature is essential for some system features."),
"label_JS_Error_Msg3": _("Please verify your browser's JavaScript support, enable it and try again."),
"label_Cookie_Error": _("Cookie Error"),
"label_Cookie_Error_Msg1": _("Your browser does not have Cookies support or it is disabled."),
"label_Cookie_Error_Msg2": _("This feature is essential to use thise system."),
"label_Cookie_Error_Msg3": _("Please verify your browser's Cookie support, enable it and try again."),
#dynamics (don't remove)
"label_distribution_Locations": _("Origin Locations"),
"label_preservation_Locations": _("Stock Position"),
"label_quality_Locations": _("Origin Locations"),
#generic labels
"label_Version": _("Version"),
"label_Footer": _("Collection Information System of Biotechnological Interest, SICol"),
"label_Access_Denied": _("ACCESS DENIED!"),
"label_Location_Container": _("Container"),
"label_Location_Location": _("Location"),
"label_Location_ValorClique": _("Click Quantity"),
"label_Location_Yes": _("yes"),
"label_Location_No": _("no"),
"label_Print": _("PRINT"),
"label_Confirm_Delete": _("Do you really want to delete this item?"),
"label_Delete": _("DELETE"),
"label_Edit": _("EDIT"),
"label_SaveAs": _("SAVE_AS"),
"label_Permission": _("Permission"),
"label_Save": _("SAVE"),
"label_Cancel": _("CANCEL"),
"label_Filter": _("Filter"),
"label_OK": _("OK"),
"label_Do_Filter": _("Do Filter"),
"label_Textlink_Support": _("this field supports textLinks"),
#tinyMCE plugin
"textlink_Type" : _("TextLink Type"),
"textlink_Reference" : _("Reference"),
"textlink_Document" : _("Document"),
"textlink_Taxa" : _("Taxa"),
"textlink_Link" : _("Link"),
"textlink_Title" : _("Title"),
"textlink_Insert" : _("Insert"),
"textlink_Cancel" : _("Cancel"),
#menus
"menu_Strains": _("Strains"),
"menu_Species": _("plural|Species"),
"menu_People": _("People"),
"menu_Institutions": _("Institutions"),
"menu_Documents": _("Documents"),
"menu_References": _("References"),
"menu_Preservation": _("Preservation"),
"menu_Distribution": _("Distribution"),
"menu_Reports": _("Reports"),
"menu_Rep_General": _("General"),
"menu_Rep_Fields": _("Fields"),
"menu_Rep_Filters": _("Filters"),
"menu_Rep_Format": _("Format"),
"menu_Rep_Security": _("Security"),
"menu_Strains_Tip": _("Strains Tip"),
"menu_Species_Tip": _("Species Tip"),
"menu_People_Tip": _("People Tip"),
"menu_Institutions_Tip": _("Institutions Tip"),
"menu_Documents_Tip": _("Documents Tip"),
"menu_References_Tip": _("References Tip"),
"menu_Preferences": _("Preferences"),
"menu_Configuration": _("Configuration"),
"menu_Traceability": _("Traceability"),
"menu_Users": _("Users"),
"menu_Groups": _("Groups"),
"menu_Collections": _("Collections"),
"menu_Subcollections": _("Subcollections"),
"menu_Combos": _("Combos"),
"menu_Divisions": _("Divisions"),
"menu_Reports": _("Reports"),
"menu_Databases": _("Databases"),
"menu_ConfigXML": _("Config.xml"),
"menu_Strains_General_Tip": _("General Tip"),
"menu_Strains_Deposit_Tip": _("Deposit Tip"),
"menu_Strains_Collection_Event_Tip": _("Collection-Event Tip"),
"menu_Strains_Isolation_Tip": _("Isolation Tip"),
"menu_Strains_Identification_Tip": _("Identification Tip"),
"menu_Strains_Culture_Tip": _("Culture Tip"),
"menu_Strains_Characteristics_Tip": _("Characteristics Tip"),
"menu_Strains_Security_Tip": _("Security Tip"),
"menu_Distribution_General": _("General"),
"menu_Documents_General": _("General"),
"menu_Institutions_General": _("General"),
"menu_People_General": _("General"),
"menu_Preservation_General": _("General"),
"menu_References_General": _("General"),
"menu_Species_General": _("General"),
"menu_Strains_General": _("General"),
"menu_Reports_General": _("General"),
"menu_Strains_Deposit": _("Deposit"),
"menu_Strains_Collection_event": _("Collection-Event"),
"menu_Strains_Isolation": _("Isolation"),
"menu_Strains_Identification": _("Identification"),
"menu_Strains_Culture": _("Culture"),
"menu_Strains_Characteristics": _("Characteristics"),
"menu_Strains_Properties": _("Properties"),
"menu_Strains_Quality": _("Quality"),
"menu_Strains_Stock": _("Stock"),
"menu_Distribution_Security": _("Security"),
"menu_Documents_Security": _("Security"),
"menu_Institutions_Security": _("Security"),
"menu_People_Security": _("Security"),
"menu_Preservation_Security": _("Security"),
"menu_References_Security": _("Security"),
"menu_Species_Security": _("Security"),
"menu_Strains_Security": _("Security"),
"menu_Reports_Security": _("Security"),
"menu_Stockmovement": _("Stock Movement"),
"menu_Containers": _("Containers"),
"menu_Traceability": _("Traceability"),
#links
"link_Preferences": _("Preferences"),
"link_Configuration": _("Configuration"),
"link_Utilities": _("Utilities"),
"link_Exit": _("Exit"),
"link_Exit_Title": _("Choose other Subcollection or Exit"),
"link_Logout": _("Logout"),
"link_New": _("New"),
"link_Reports": _("Reports"),
"link_Search": _("Search"),
"link_Advanced_Search": _("Advanced Search"),
"link_Include_New": _("Include New"),
#reports
"label_Rep_ID" : _("ID"),
"label_Rep_General_Type" : _("Type"),
"label_Rep_General_Description" : _("Description"),
"label_Rep_General_Select" : _("Selected"),
"label_Rep_General_Group" : _("Group by"),
"label_Rep_General_Order" : _("Order by"),
"label_Rep_General_Totalizer" : _("Totalized by"),
"label_Rep_General_Filter" : _("Filter"),
"label_Rep_General_Report_Format": _("Report format"),
"label_Rep_Language" : _("Language"),
"label_Rep_Language_Values" : _ ("Use fields content in"),
"label_Rep_Format": _("Format"),
"label_Rep_Default_HTML": _("Default HTML"),
"label_Rep_Default": _("Default HTML"),
"label_Rep_default": _("Default HTML"),
"label_Rep_CSV": _("CSV"),
"label_Rep_csv": _("CSV"),
"label_Rep_Chart": _("Chart"),
"label_Rep_chart": _("Chart"),
"label_Rep_Custom_HTML": _("Custom HTML"),
"label_Rep_Custom": _("Custom HTML"),
"label_Rep_custom": _("Custom HTML"),
"label_Rep_XML": _("XML"),
"label_Rep_xml": _("XML"),
"label_Rep_page_title":_("SICol - Report"),
"label_Rep_Header": _("Header"),
"label_Rep_External": _("External"),
"label_Rep_Internal": _("Internal"),
"label_Rep_None": _("None"),
"label_Rep_Separator": _("Separator"),
"label_Rep_chart_type": _("Chart Type"),
"label_Rep_Bar": _("Bar"),
"label_Rep_Pie": _("Pie"),
"label_Rep_Line": _("Line"),
"label_Rep_Column": _("Column"),
"label_Rep_header_template": _("Header template"),
"label_Rep_footer_template": _("Footer template"),
"label_Rep_data_template": _("Data template"),
"label_Rep_css_template": _("CSS template"),
"label_Rep_subcoll_template": _("Use SubColl Templates"),
"label_Rep_header": _("Header Template"),
"label_Rep_footer": _("Footer Template"),
"label_Rep_fieldlink_Type" : _("FieldLink Type"),
"label_Rep_System_Fields" : _("System Fields"),
"label_Rep_Report_Fields" : _("Report Fields"),
"label_Rep_fieldlink_Insert" : _("Insert"),
"label_Rep_fieldlink_Cancel" : _("Cancel"),
"label_Rep_Date" : _("Date"),
"label_Rep_Time" : _("Time"),
"label_Rep_Name_Coll" : _("Collection Name"),
"label_Rep_Code_Coll" : _("Collection Code"),
"label_Rep_Name_Subcoll" : _("Subcollection Name"),
"label_Rep_Code_Subcoll" : _("Subcollection Code"),
"label_Rep_User" : _("User Name"),
"label_Rep_Description" : _("Report Description"),
"label_Rep_totalizer" : _("Total"),
"label_Rep_equal": _("Equals"),
"label_Rep_differs": _("Differs"),
"label_Rep_contains": _("Contains"),
"label_Rep_in": _("In"),
"label_Rep_not_in": _("Not In"),
"label_Rep_greater": _("Greater"),
"label_Rep_greater_or_equal": _("Greater or Equal"),
"label_Rep_less": _("Less"),
"label_Rep_less_or_equal": _("Less or Equal"),
"label_Rep_connector_and" : _("and"),
"label_Rep_connector_or" : _("or"),
"label_Rep_connector_" : "",
#strain reports fields
"label_Rep_Strain_Id": _("Strain ID"),
"label_Rep_Division": _("Division"),
"label_Rep_Code": _("Code"),
"label_Rep_Numeric_Code": _("Number"),
"label_Rep_Origin_Code": _("Origin Code"),
"label_Rep_Status": _("Status"),
"label_Rep_Taxon_Group": _("Taxon Group"),
"label_Rep_Taxon": _("Taxon"),
"label_Rep_Type": _("Type"),
"label_Rep_Name": _("Name"),
"label_Rep_Description": _("Description"),
"label_Rep_Next": _("Next"),
"label_Rep_Fields": _("Fields"),
"label_Rep_Selected": _("Selected"),
"label_Rep_Group": _("Group"),
"label_Rep_Totaled": _("Totalized"),
"label_Rep_Step1": _("Step1"),
"label_Rep_Step2": _("Step2"),
"label_Rep_Is_Ogm": _("Is Ogm"),
"label_Rep_Taxonomic_Complement": _("Taxonomic Complement"),
"label_Rep_History": _("History"),
"label_Rep_Other_Codes": _("Other Codes"),
"label_Rep_General_Comments": _("General Comments"),
"label_Rep_Collect_Date": _("Collect Date"),
"label_Rep_Collect_Year": _("Collect Year"),
"label_Rep_Collect_Month": _("Collect Month"),
"label_Rep_Collect_Person": _("Collect Person"),
"label_Rep_Collect_Institution": _("Collect Institution"),
"label_Rep_Country": _("Country"),
"label_Rep_State_Code": _("State Code"),
"label_Rep_State_Name": _("State Name"),
"label_Rep_City": _("City"),
"label_Rep_Place": _("Place"),
"label_Rep_Latitude": _("Latitude"),
"label_Rep_Latitude_DMS": _("Latitude DMS"),
"label_Rep_Latitude_Mode": _("Latitude Mode"),
"label_Rep_Longitude": _("Longitude"),
"label_Rep_Longitude_DMS": _("Longitude DMS"),
"label_Rep_Longitude_Mode": _("Longitude Mode"),
"label_Rep_Datum": _("Datum"),
"label_Rep_GPS_Precision": _("GPS Precision"),
"label_Rep_GPS_Comments": _("GPS Comments"),
"label_Rep_Substratum": _("Substratum"),
"label_Rep_Host_Name": _("Host Name"),
"label_Rep_Host_Genus": _("Host Genus"),
"label_Rep_Host_Species": _("Host Species"),
"label_Rep_Host_Level": _("Host Level"),
"label_Rep_Host_Subspecies": _("Host Subspecies"),
"label_Rep_Host_Taxonomic_Complement": _("Host Taxonomic Complement"),
"label_Rep_International_Code": _("International Code"),
"label_Rep_Clinical_Form_Code": _("Clinical Form Code"),
"label_Rep_Clinical_Form_Name": _("Clinical Form Name"),
"label_Rep_HIV": _("HIV"),
"label_Rep_Collect_Comments": _("Collect Comments"),
"label_Rep_Isolation_Date": _("Isolation Date"),
"label_Rep_Isolation_Year": _("Isolation Year"),
"label_Rep_Isolation_Month": _("Isolation Month"),
"label_Rep_Isolation_Person": _("Isolation Person"),
"label_Rep_Isolation_Institution": _("Isolation Institution"),
"label_Rep_Isolation_From": _("Isolation From"),
"label_Rep_Isolation_Method": _("Isolation Method"),
"label_Rep_Isolation_Comments": _("Isolation Comments"),
"label_Rep_Identification_Date": _("Identification Date"),
"label_Rep_Identification_Year": _("Identification Year"),
"label_Rep_Identification_Month": _("Identification Month"),
"label_Rep_Identification_Person": _("Identification Person"),
"label_Rep_Identification_Institution": _("Identification Institution"),
"label_Rep_Identification_Genus": _("Identification Genus"),
"label_Rep_Identification_Species": _("Identification Species"),
"label_Rep_Identification_Level": _("Identification Level"),
"label_Rep_Identification_Subspecies": _("Identification Subspecies"),
"label_Rep_Identification_Taxonomic_Complement": _("Identification Taxonomic Complement"),
"label_Rep_Identification_Method": _("Identification Method"),
"label_Rep_Identification_Comments": _("Identification Comments"),
"label_Rep_Deposit_Date": _("Deposit Date"),
"label_Rep_Deposit_Year": _("Deposit Year"),
"label_Rep_Deposit_Month": _("Deposit Month"),
"label_Rep_Deposit_Person": _("Deposit Person"),
"label_Rep_Deposit_Institution": _("Deposit Institution"),
"label_Rep_Deposit_Genus": _("Deposit Genus"),
"label_Rep_Deposit_Species": _("Deposit Species"),
"label_Rep_Deposit_Level": _("Deposit Level"),
"label_Rep_Deposit_Subspecies": _("Deposit Subspecies"),
"label_Rep_Deposit_Taxonomic_Complement": _("Deposit Taxonomic Complement"),
"label_Rep_Deposit_Reason": _("Deposit Reason"),
"label_Rep_Deposit_Form": _("Deposit Form"),
"label_Rep_Recommended_Preservation_Method": _("Recommended Preservation Method"),
"label_Rep_Authentication_Date": _("Authentication Date"),
"label_Rep_Authentication_Year": _("Authentication Year"),
"label_Rep_Authentication_Month": _("Authentication Month"),
"label_Rep_Authentication_Person": _("Authentication Person"),
"label_Rep_Authentication_Result": _("Authentication Result"),
"label_Rep_Deposit_Comments": _("Deposit Comments"),
"label_Rep_Recommended_Growth_Medium": _("Recommended Growth Medium"),
"label_Rep_Recommended_Temperature": _("Recommended Temperature"),
"label_Rep_Incubation_Time": _("Incubation Time"),
"label_Rep_PH": _("PH"),
"label_Rep_Oxygen_Requirements": _("Oxygen Requirements"),
"label_Rep_Growth_Comments": _("Growth Comments"),
"label_Rep_Morphological_Characteristics": _("Morphological Characteristics"),
"label_Rep_Molecular_Characteristics": _("Molecular Characteristics"),
"label_Rep_Biochemical_Characteristics": _("Biochemical Characteristics"),
"label_Rep_Immunologic_Characteristics": _("Immunologic Characteristics"),
"label_Rep_Pathogenic_Characteristics": _("Pathogenic Characteristics"),
"label_Rep_Genotypic_Characteristics": _("Genotypic Characteristics"),
"label_Rep_OGM": _("OGM Group"),
"label_Rep_OGM_Comments": _("OGM Comments"),
"label_Rep_Biorisk_Comments": _("Biorisk Comments"),
"label_Rep_Restrictions": _("Restrictions"),
"label_Rep_Pictures": _("Pictures"),
"label_Rep_Characteristics_References": _("Characteristics References"),
"label_Rep_Catalogue_Notes": _("Catalogue Notes"),
"label_Rep_Properties": _("Properties"),
"label_Rep_Applications": _("Applications"),
"label_Rep_Properties_References": _("Properties References"),
"label_Rep_Go_Catalog": _("Go Catalog"),
"label_Rep_Inst_ID": _("Institution ID"),
"label_Rep_Inst_Code1": _("Code1"),
"label_Rep_Inst_Code2": _("Code2"),
"label_Rep_Inst_Code3": _("Code3"),
"label_Rep_Inst_Complement": _("Complement"),
"label_Rep_Inst_Nickname": _("Locator"),
"label_Rep_Inst_Name": _("Name"),
"label_Rep_Inst_Address": _("Address"),
"label_Rep_Inst_Phone": _("Phones/Fax"),
"label_Rep_Inst_Email": _("Email"),
"label_Rep_Inst_Website": _("Website"),
"label_Rep_Inst_Go_Catalog": _("Go Catalog"),
"label_Rep_Inst_Comments": _("Comments"),
"label_Rep_Per_ID": _("Person ID"),
"label_Rep_Per_Name": _("Name"),
"label_Rep_Per_Nickname": _("NickName"),
"label_Rep_Per_Address": _("Address"),
"label_Rep_Per_Phone": _("Phones/Fax"),
"label_Rep_Per_Email": _("Email"),
"label_Rep_Per_Go_Catalog": _("Go Catalog"),
"label_Rep_Per_Comments": _("Comments"),
"label_Rep_Doc_ID": _("Document ID"),
"label_Rep_Doc_Code": _("Code"),
"label_Rep_Doc_Qualifier": _("Qualifier"),
"label_Rep_Doc_Title": _("Title"),
"label_Rep_Doc_Description": _("Description"),
"label_Rep_Doc_File_Name": _("File Name"),
"label_Rep_Doc_Category": _("Category"),
"label_Rep_Doc_Go_Catalog": _("Go Catalog"),
"label_Rep_Ref_Code": _("Code"),
"label_Rep_Ref_Title": _("Title"),
"label_Rep_Ref_Author": _("Author"),
"label_Rep_Ref_Year": _("Year"),
"label_Rep_Ref_Url": _("URL"),
"label_Rep_Ref_Comments": _("Comments"),
"label_Rep_Ref_Go_Catalog": _("Go Catalog"),
"label_Rep_Spe_ID": _("Taxon ID"),
"label_Rep_Spe_Taxon_Group": _("Taxonomic Group"),
"label_Rep_Spe_Hi_Tax": _("Higher Taxa"),
"label_Rep_Spe_Sciname": _("Scientific Name"),
"label_Rep_Spe_Sciname_No_Auth": _("Scientific Name No Auth"),
"label_Rep_Spe_Taxon_Ref": _("Taxonomic References"),
"label_Rep_Spe_Synonym": _("Synonym"),
"label_Rep_Spe_Hazard_Group": _("Hazard_Group"),
"label_Rep_Spe_Hazard_Group_Ref": _("Hazard Group Reference"),
"label_Rep_Spe_Ambient_Risk": _("Hazard Group Comments"),
"label_Rep_Spe_Sciname_Alt_State": _("Alternative State"),
"label_Rep_Spe_Alt_States_Type": _("Alternative State Type"),
"label_Rep_Spe_Comments": _("Comments"),
"label_Rep_Stock_Strain_Code": _("Strain (Code)"),
"label_Rep_Stock_Strain_Numeric_Code": _("Strain (Number)"),
"label_Rep_Stock_Taxon": _("Taxon"),
"label_Rep_Stock_Preservation_Method": _("Preservation Method"),
"label_Rep_Stock_Lot": _("Lot"),
"label_Rep_Stock_Position": _("Position"),
"label_Rep_Stock_Quantity": _("Quantity"),
#Report Generation Errors
"label_Rep_Generation_Error": _("Error in Report Generation"),
"label_Rep_Return_List": _("Return to Reports List"),
}
|
gpl-2.0
| -7,150,550,810,199,151,000 | 45.968637 | 102 | 0.708016 | false |
mlperf/training_results_v0.7
|
Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/tests/python/unittest/test_codegen_device.py
|
1
|
4204
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.contrib import util
import numpy as np
def test_add_pipeline():
n = tvm.var('n')
A = tvm.placeholder((n,), name='A')
B = tvm.placeholder((), name='B')
C = tvm.compute(A.shape, lambda *i: A(*i) + B(), name='C')
D = tvm.compute(A.shape, lambda *i: C(*i) + 1, name='D')
s = tvm.create_schedule(D.op)
# GPU schedule have to split by gridIdx and threadIdx
num_thread = 256
xo, xi = s[C].split(C.op.axis[0], factor=num_thread)
s[C].bind(xi, tvm.thread_axis("threadIdx.x"))
s[C].bind(xo, tvm.thread_axis("blockIdx.x"))
xo, xi = s[D].split(D.op.axis[0], factor=num_thread)
s[D].bind(xi, tvm.thread_axis("threadIdx.x"))
s[D].bind(xo, tvm.thread_axis("blockIdx.x"))
# compile to IR
s = s.normalize()
bounds = tvm.schedule.InferBound(s)
stmt = tvm.schedule.ScheduleOps(s, bounds)
Ab = tvm.decl_buffer(A.shape, A.dtype, name='A')
Bb = tvm.decl_buffer(B.shape, B.dtype, name='B')
Db = tvm.decl_buffer(D.shape, D.dtype, name='D')
stmt = tvm.ir_pass.LoopPartition(stmt, False)
stmt = tvm.ir_pass.StorageFlatten(stmt, {A: Ab, B:Bb, D:Db}, 64)
stmt = tvm.ir_pass.Simplify(stmt)
fapi = tvm.ir_pass.MakeAPI(stmt, "myadd", [Ab, Bb, Db], 0, True)
fsplits = [x for x in tvm.ir_pass.SplitHostDevice(fapi)]
fsplits[0] = tvm.ir_pass.LowerTVMBuiltin(fsplits[0])
def check_target(device, host="stackvm"):
ctx = tvm.context(device, 0)
if not ctx.exist:
return
if not tvm.module.enabled(host):
return
mhost = tvm.codegen.build_module(fsplits[0], host)
mdev = tvm.codegen.build_module(fsplits[1:], device)
mhost.import_module(mdev)
code = mdev.get_source()
f = mhost.entry_func
# launch the kernel.
n = 1027
a = tvm.nd.array(np.random.uniform(size=n).astype(Ab.dtype), ctx)
b = tvm.nd.array(np.random.uniform(size=()).astype(Bb.dtype), ctx)
d = tvm.nd.array(np.zeros(n, dtype=Db.dtype), ctx)
f(a, b, d)
tvm.testing.assert_allclose(
d.asnumpy(), a.asnumpy() + b.asnumpy() + 1)
def check_module_save(device, host="stackvm"):
ctx = tvm.context(device, 0)
if not ctx.exist:
return
if not tvm.module.enabled(host):
return
fmt = "ptx" if device == "cuda" else device
mhost = tvm.codegen.build_module(fsplits[0], host)
mdev = tvm.codegen.build_module(fsplits[1:], device)
temp = util.tempdir()
mpath = temp.relpath("test.%s" % fmt)
mdev.save(mpath)
mdev2 = tvm.module.load(mpath)
mhost.import_module(mdev2)
f = mhost.entry_func
# launch the kernel.
n = 1027
a = tvm.nd.array(np.random.uniform(size=n).astype(Ab.dtype), ctx)
b = tvm.nd.array(np.random.uniform(size=()).astype(Bb.dtype), ctx)
d = tvm.nd.array(np.zeros(n, dtype=Db.dtype), ctx)
f(a, b, d)
tvm.testing.assert_allclose(
d.asnumpy(), a.asnumpy() + b.asnumpy() + 1)
check_target("cuda", host="stackvm")
check_target("cuda", host="llvm")
check_module_save("cuda", host="stackvm")
check_target("nvptx", host="llvm")
check_target("vulkan", host="llvm")
check_target("rocm", host="llvm")
check_module_save("vulkan", host="stackvm")
if __name__ == "__main__":
test_add_pipeline()
|
apache-2.0
| 1,658,198,143,993,721,900 | 38.28972 | 74 | 0.622265 | false |
mic4ael/indico
|
bin/utils/create_module.py
|
1
|
6792
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import os
import re
import textwrap
from collections import defaultdict
from datetime import date
import click
click.disable_unicode_literals_warning = True
def _validate_indico_dir(ctx, param, value):
if not os.path.isdir(value):
raise click.BadParameter('directory does not exist')
if not os.path.exists(os.path.join(value, 'modules')):
raise click.BadParameter('directory has no modules subdirectory')
return value
def _process_name(ctx, param, value):
path = _get_module_dir(ctx.params['indico_dir'], ctx.params.get('event'), value)
ctx.params['module_dir'] = path
return value
def _process_model_classes(ctx, param, value):
items = defaultdict(list)
for item in value:
if ':' in item:
module_name, class_name = item.split(':', 1)
else:
class_name = item
module_name = _snakify(item) + 's'
items[module_name].append(class_name)
return items
def _get_module_dir(indico_dir, event, name):
segments = [indico_dir, 'modules']
if event:
segments.append('events')
segments.append(name)
return os.path.join(*segments)
def _snakify(name):
# from http://stackoverflow.com/a/1176023/298479
name = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', name).lower()
def touch(path):
open(path, 'a').close()
def write(f, text=''):
if text:
f.write(text.encode('ascii'))
f.write(b'\n')
def write_header(f):
f.write(textwrap.dedent(b"""
# This file is part of Indico.
# Copyright (C) 2002 - {year} CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
""").lstrip().format(year=date.today().year))
def write_model(f, class_name, event):
schema = 'events' if event else 'TODO'
table_name = _snakify(class_name) + 's'
if event and table_name.startswith('event_'):
table_name = table_name[6:]
f.write(b'\n\n')
f.write(textwrap.dedent(b"""
class {cls}(db.Model):
__tablename__ = '{table}'
__table_args__ = {{'schema': '{schema}'}}
#: The ID of the object
id = db.Column(
db.Integer,
primary_key=True
)
@return_ascii
def __repr__(self):
return format_repr(self, 'id')
""").lstrip().format(cls=class_name, table=table_name, schema=schema))
@click.command()
@click.option('--indico-dir', envvar='INDICO_DIR', metavar='DIR', default='indico', callback=_validate_indico_dir,
is_eager=True, help='Path to the indico folder. Can be specified via the INDICO_DIR env var and '
'defaults to `indico`')
@click.argument('name', callback=_process_name) # adds module_dir to params
@click.option('-e', '--event', is_flag=True, help='Create module inside modules/events/ instead of modules/')
@click.option('-m', '--models', is_flag=True, help='Create models package')
@click.option('-M', '--model', 'model_classes', multiple=True, metavar='[module_name:]ClassName',
callback=_process_model_classes,
help='Create a model - implies `--models` and can be used multiple times. If no module name is '
'specified, it is derived from the class name')
@click.option('-b', '--blueprint', is_flag=True, help='Create a blueprint')
@click.option('-t', '--templates', is_flag=True, help='Add templates/ folder (only makes sense with a blueprint)')
@click.option('-c', '--controllers', is_flag=True, help='Add controllers module (only makes sense with a blueprint)')
@click.option('-v', '--views', is_flag=True, help='Add views module (only makes sense with a blueprint)')
def main(indico_dir, name, module_dir, event, models, blueprint, templates, controllers, views, model_classes):
if not os.path.exists(module_dir):
os.mkdir(module_dir)
touch(os.path.join(module_dir, '__init__.py'))
if models or model_classes:
models_dir = os.path.join(module_dir, 'models')
if not os.path.exists(models_dir):
os.mkdir(models_dir)
touch(os.path.join(models_dir, '__init__.py'))
for module_name, class_names in model_classes.iteritems():
model_path = os.path.join(models_dir, '{}.py'.format(module_name))
if os.path.exists(model_path):
raise click.exceptions.UsageError('Cannot create model in {} (file already exists)'.format(module_name))
with open(model_path, 'w') as f:
write_header(f)
write(f, 'from indico.core.db import db')
write(f, 'from indico.util.string import format_repr, return_ascii')
for class_name in class_names:
write_model(f, class_name, event)
if blueprint:
blueprint_name = 'event_{}'.format(name) if event else name
blueprint_path = os.path.join(module_dir, 'blueprint.py')
if os.path.exists(blueprint_path):
raise click.exceptions.UsageError('Cannot create blueprint (file already exists)')
with open(blueprint_path, 'w') as f:
write_header(f)
write(f, 'from indico.web.flask.wrappers import IndicoBlueprint')
write(f)
if templates:
virtual_template_folder = 'events/{}'.format(name) if event else name
write(f, "_bp = IndicoBlueprint('{}', __name__, template_folder='templates',\n\
virtual_template_folder='{}')".format(blueprint_name, virtual_template_folder))
else:
write(f, "_bp = IndicoBlueprint('{}', __name__)".format(blueprint_name))
write(f)
if templates:
templates_dir = os.path.join(module_dir, 'templates')
if not os.path.exists(templates_dir):
os.mkdir(templates_dir)
if controllers:
controllers_path = os.path.join(module_dir, 'controllers.py')
if not os.path.exists(controllers_path):
with open(controllers_path, 'w') as f:
write_header(f)
if views:
views_path = os.path.join(module_dir, 'views.py')
if not os.path.exists(views_path):
with open(views_path, 'w') as f:
write_header(f)
if __name__ == '__main__':
main()
|
mit
| -6,267,578,865,600,355,000 | 38.034483 | 120 | 0.604976 | false |
SCUEvals/scuevals-api
|
db/alembic/env.py
|
1
|
2188
|
from __future__ import with_statement
import os
import sys
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
sys.path.append(os.getcwd())
from scuevals_api.models import db # noqa
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
config.set_main_option('sqlalchemy.url', os.environ[config.get_main_option('sqlalchemy.url')])
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
agpl-3.0
| -7,893,498,225,739,737,000 | 27.051282 | 94 | 0.70841 | false |
founders4schools/duedilv3
|
duedil/resources/pro/company/__init__.py
|
1
|
1180
|
from __future__ import unicode_literals
from .accounts import Account
from .accounts.financial import AccountDetailsFinancial
from .accounts.gaap import AccountDetailsGAAP
from .accounts.ifrs import AccountDetailsIFRS
from .accounts.insurance import AccountDetailsInsurance
from .accounts.statutory import AccountDetailsStatutory
from .bank_account import BankAccount
from .company import Company
from .director import Director
from .directorship import Directorship
from .document import Document
from .secondary_industries import Industry
from .mortgage import Mortgage
from .previous_company_name import PreviousCompanyName
from .registered_address import RegisteredAddress
from .service_address import ServiceAddress
from .shareholder import Shareholder
from .company_keywords import Keywords
__all__ = ['AccountDetailsFinancial', 'AccountDetailsGAAP',
'AccountDetailsIFRS', 'AccountDetailsInsurance',
'AccountDetailsStatutory', 'BankAccount',
'Company', 'Director', 'Directorship', 'Document',
'Industry', 'Mortgage', 'Keywords',
'PreviousCompanyName', 'RegisteredAddress',
'ServiceAddress', 'Shareholder']
|
apache-2.0
| -554,486,124,231,948,900 | 41.142857 | 61 | 0.783898 | false |
michaelmurdock/mmlib
|
db_utils.py
|
1
|
1578
|
# db_utils.py
def execute_query_for_single_value(cursor, query):
'''
query is a string containing a well-formed query that is executed
against the specified cursor.
RETURNS: the following tripe: (results_flag, err_msg, value)
'''
try:
# Execute the query
cursor.execute(query)
except Exception as e:
msg = 'Exception calling cursor.execute(). Query: %s, Error details: %s' % (query, str(e))
return (False, msg, None)
try:
# Fetch the single result
query_result = cursor.fetchone()
except Exception as e:
msg = 'Exception calling cursor.fetchone(). Query: %s, Error details: %s' % (query, str(e))
return (False, msg, None)
return (True, '', query_result[0])
def query_for_multiple_values(cursor, query):
'''
query is a string containing a well-formed query that is executed
against the specified cursor.
RETURNS: the following tripe: (results_flag, err_msg, x_values)
'''
try:
# Execute the query
cursor.execute(query)
except Exception as e:
msg = 'Exception calling cursor.execute(). Query: %s, Error details: %s' % (query, str(e))
return (False, msg, None)
try:
# Fetch the result
query_fetch_results = cursor.fetchall()
except Exception as e:
msg = 'Exception calling cursor.fetchall(). Details: %s' % (str(e))
return (False, msg)
return (True, '', query_fetch_results)
|
bsd-2-clause
| 2,520,249,092,786,609,700 | 26.178571 | 99 | 0.583016 | false |
DavidAwad/Quantum-Simulator
|
quantum.py
|
1
|
5150
|
#!/usr/bin/env python
# encoding: utf-8
from cmath import exp, pi, sqrt
from random import random
import itertools
def vectorflip(bitstring):
"""
returns flipped bits in a given string
"""
if not bitstring: raise TypeError("vectorflip passed None")
return ''.join('1' if x == '0' else '0' for x in bitstring)
def bitflip(bitstring, qubit):
"""
returns opposite state for a specific qubit
"""
if not bitstring or not qubit: raise TypeError("bitflip passed None")
arr = list(bitstring)
arr[qubit-1] = '1' if qubit is '0' else '0'
return ''.join(arr)
class System:
"""
This class represents the wave function describing a
basic quantum computer consisting of n qubits.
"""
def __init__(self, num_qubits):
"""
set up a quantum system with the given number of qubits
initialized to the "zero" qubit.
"""
self.num_qubits = num_qubits
# In this classical simulation,we use 2^n Qubit complex numbers
# this array of size 2^n will replicate the 2^n states Qubits can have
self.amplitudes = [0] * (1 << num_qubits) # use bitshift to realize 2^n
self.amplitudes[0] = 1 # so that sum of prob.s is 1, starting in 000 state
self.states = self.generate_states(num_qubits)
def generate_states(self, num_qubits):
"""
returns a dictionary of all possible states for n qubits
in the format { 'state': 'amplitude' } e.g. { 010 : 0j}
"""
if num_qubits <= 0:
raise ValueError("simulation requires at least 1 qubit")
# generate table of states using itertools
tuples = [''.join(_) for _ in itertools.product(['0', '1'], repeat=num_qubits)]
data = {}
map(lambda x: data.update({x:0}), tuples)
# so that sum of squared amplitudes is 1, assume starting in 000 state
data['000'] = 1
return data
def collapse(self):
"""
collapse the system (i.e. measure it) and return the state
based on a random choice from the weighted distribution
"""
total = 0
r = random()
for state in self.states.keys():
total += abs(self.states[state])**2
if r <= total: # randomly selected weighted number
# reset amplitudes after system collapse
self.states = { x:0 for x in self.states }
self.states['000'] = 1
return state
def amplitude(self, state):
"""
takes in a possible state of the system such as '010' and returns
the amplitude of that possible state.
"""
if len(state) > num_qubits:
raise ValueError("State doesn't exist")
# grab binary representation of state, access that array position
return self.states[state]
def probability(self, state):
"""
simply returns the square of the absolute value
of the amplitude for a given state
"""
if not state: raise TypeError("state passed as None")
return abs(self.states[state])**2
def pi_over_eight(self, qubit):
"""
applies a ฯ/8 gate to the given qubit
"""
if qubit > self.num_qubits:
raise ValueError("Qubit %s not in system" % qubit)
for state in self.states:
if state[qubit-1] is '0': # given qubit is 0 in this possible state
self.states[state] *= exp(-1j * pi / 8)
else: # given q bit is 1 in this possible state
self.states[state] *= exp(1j * pi / 8)
return
def hadamard(self, qubit):
"""
applies a Hadamard gate to the given qubit.
"""
if qubit > self.num_qubits:
raise ValueError("Qubit %s not in system" % qubit)
# make complete copy as values update simultaneously
copy = {k:v for k,v in self.states.items()}
for state in self.states.keys():
if state[qubit-1] is '0': # given qubit is 0 in this possible state
print(state)
self.states[state] = (copy[state] + copy[bitflip(state, qubit)]) / sqrt(2)
else: # given qubit is 1 in this possible state
self.states[state] = (copy[bitflip(state, qubit)] - copy[state]) / sqrt(2)
def controlled_not(self, control, target):
"""
applies a controlled-not gate using the first given qubit as the
control of the permutation of the second.
"""
# the two qubits have to valid and different
if control > self.num_qubits or target > self.num_qubits:
raise ValueError("Qubit %s not in system" % qubit)
if control == target:
raise ValueError("controlled not using the same bit for both inputs")
copy = {k:v for k,v in self.states.items()}
for state in self.states.keys():
if state[control-1] is '0': pass
else: # control is 1, flip amplitude of target
print(state) # FIXME
self.states[state] = copy[bitflip(state, target)]
|
gpl-3.0
| 1,803,433,893,474,609,000 | 37.714286 | 90 | 0.58458 | false |
gpodder/mygpo
|
mygpo/administration/views.py
|
1
|
13775
|
import re
import socket
from itertools import count, chain
from collections import Counter
from datetime import datetime
import redis
import django
from django.db.models import Avg
from django.shortcuts import render
from django.contrib import messages
from django.urls import reverse
from django.core.cache import cache
from django.http import HttpResponseRedirect
from django.template.loader import render_to_string
from django.template import RequestContext
from django.utils.translation import gettext as _
from django.contrib.sites.requests import RequestSite
from django.views.generic import TemplateView
from django.utils.decorators import method_decorator
from django.conf import settings
from django.contrib.auth import get_user_model
from mygpo.podcasts.models import Podcast, Episode
from mygpo.administration.auth import require_staff
from mygpo.administration.group import PodcastGrouper
from mygpo.maintenance.merge import PodcastMerger, IncorrectMergeException
from mygpo.administration.clients import UserAgentStats, ClientStats
from mygpo.users.views.registration import send_activation_email
from mygpo.administration.tasks import merge_podcasts
from mygpo.utils import get_git_head
from mygpo.data.models import PodcastUpdateResult
from mygpo.users.models import UserProxy
from mygpo.publisher.models import PublishedPodcast
from mygpo.api.httpresponse import JsonResponse
from mygpo.celery import celery
class InvalidPodcast(Exception):
""" raised when we try to merge a podcast that doesn't exist """
class AdminView(TemplateView):
@method_decorator(require_staff)
def dispatch(self, *args, **kwargs):
return super(AdminView, self).dispatch(*args, **kwargs)
class Overview(AdminView):
template_name = "admin/overview.html"
class HostInfo(AdminView):
""" shows host information for diagnosis """
template_name = "admin/hostinfo.html"
def get(self, request):
commit, msg = get_git_head()
base_dir = settings.BASE_DIR
hostname = socket.gethostname()
django_version = django.VERSION
feed_queue_status = self._get_feed_queue_status()
num_index_outdated = self._get_num_outdated_search_index()
avg_podcast_update_duration = self._get_avg_podcast_update_duration()
return self.render_to_response(
{
"git_commit": commit,
"git_msg": msg,
"base_dir": base_dir,
"hostname": hostname,
"django_version": django_version,
"num_celery_tasks": self._get_waiting_celery_tasks(),
"avg_podcast_update_duration": avg_podcast_update_duration,
"feed_queue_status": feed_queue_status,
"num_index_outdated": num_index_outdated,
}
)
def _get_waiting_celery_tasks(self):
con = celery.broker_connection()
args = {"host": con.hostname}
if con.port:
args["port"] = con.port
r = redis.StrictRedis(**args)
return r.llen("celery")
def _get_avg_podcast_update_duration(self):
queryset = PodcastUpdateResult.objects.filter(successful=True)
return queryset.aggregate(avg_duration=Avg("duration"))["avg_duration"]
def _get_feed_queue_status(self):
now = datetime.utcnow()
next_podcast = Podcast.objects.all().order_by_next_update().first()
delta = next_podcast.next_update - now
delta_mins = delta.total_seconds() / 60
return delta_mins
def _get_num_outdated_search_index(self):
return Podcast.objects.filter(search_index_uptodate=False).count()
class MergeSelect(AdminView):
template_name = "admin/merge-select.html"
def get(self, request):
num = int(request.GET.get("podcasts", 2))
urls = [""] * num
return self.render_to_response({"urls": urls})
class MergeBase(AdminView):
def _get_podcasts(self, request):
podcasts = []
for n in count():
podcast_url = request.POST.get("feed%d" % n, None)
if podcast_url is None:
break
if not podcast_url:
continue
p = Podcast.objects.get(urls__url=podcast_url)
podcasts.append(p)
return podcasts
class MergeVerify(MergeBase):
template_name = "admin/merge-grouping.html"
def post(self, request):
try:
podcasts = self._get_podcasts(request)
grouper = PodcastGrouper(podcasts)
def get_features(id_id):
e = Episode.objects.get(pk=id_id[0])
return ((e.url, e.title), id_id[0])
num_groups = grouper.group(get_features)
except InvalidPodcast as ip:
messages.error(request, _("No podcast with URL {url}").format(url=str(ip)))
podcasts = []
num_groups = []
return self.render_to_response({"podcasts": podcasts, "groups": num_groups})
class MergeProcess(MergeBase):
RE_EPISODE = re.compile(r"episode_([0-9a-fA-F]{32})")
def post(self, request):
try:
podcasts = self._get_podcasts(request)
except InvalidPodcast as ip:
messages.error(request, _("No podcast with URL {url}").format(url=str(ip)))
grouper = PodcastGrouper(podcasts)
features = {}
for key, feature in request.POST.items():
m = self.RE_EPISODE.match(key)
if m:
episode_id = m.group(1)
features[episode_id] = feature
get_features = lambda id_e: (features.get(id_e[0], id_e[0]), id_e[0])
num_groups = grouper.group(get_features)
if "renew" in request.POST:
return render(
request,
"admin/merge-grouping.html",
{"podcasts": podcasts, "groups": num_groups},
)
elif "merge" in request.POST:
podcast_ids = [p.get_id() for p in podcasts]
num_groups = list(num_groups)
res = merge_podcasts.delay(podcast_ids, num_groups)
return HttpResponseRedirect(
reverse("admin-merge-status", args=[res.task_id])
)
class MergeStatus(AdminView):
""" Displays the status of the merge operation """
template_name = "admin/task-status.html"
def get(self, request, task_id):
result = merge_podcasts.AsyncResult(task_id)
if not result.ready():
return self.render_to_response({"ready": False})
# clear cache to make merge result visible
# TODO: what to do with multiple frontends?
cache.clear()
try:
actions, podcast = result.get()
except IncorrectMergeException as ime:
messages.error(request, str(ime))
return HttpResponseRedirect(reverse("admin-merge"))
return self.render_to_response(
{"ready": True, "actions": actions.items(), "podcast": podcast}
)
class UserAgentStatsView(AdminView):
template_name = "admin/useragents.html"
def get(self, request):
uas = UserAgentStats()
useragents = uas.get_entries()
return self.render_to_response(
{
"useragents": useragents.most_common(),
"max_users": uas.max_users,
"total": uas.total_users,
}
)
class ClientStatsView(AdminView):
template_name = "admin/clients.html"
def get(self, request):
cs = ClientStats()
clients = cs.get_entries()
return self.render_to_response(
{
"clients": clients.most_common(),
"max_users": cs.max_users,
"total": cs.total_users,
}
)
class ClientStatsJsonView(AdminView):
def get(self, request):
cs = ClientStats()
clients = cs.get_entries()
return JsonResponse(map(self.to_dict, clients.most_common()))
def to_dict(self, res):
obj, count = res
if not isinstance(obj, tuple):
return obj, count
return obj._asdict(), count
class StatsView(AdminView):
""" shows general stats as HTML page """
template_name = "admin/stats.html"
def _get_stats(self):
return {
"podcasts": Podcast.objects.count_fast(),
"episodes": Episode.objects.count_fast(),
"users": UserProxy.objects.count_fast(),
}
def get(self, request):
stats = self._get_stats()
return self.render_to_response({"stats": stats})
class StatsJsonView(StatsView):
""" provides general stats as JSON """
def get(self, request):
stats = self._get_stats()
return JsonResponse(stats)
class ActivateUserView(AdminView):
""" Lets admins manually activate users """
template_name = "admin/activate-user.html"
def get(self, request):
return self.render_to_response({})
def post(self, request):
username = request.POST.get("username")
email = request.POST.get("email")
if not (username or email):
messages.error(request, _("Provide either username or email address"))
return HttpResponseRedirect(reverse("admin-activate-user"))
try:
user = UserProxy.objects.all().by_username_or_email(username, email)
except UserProxy.DoesNotExist:
messages.error(request, _("No user found"))
return HttpResponseRedirect(reverse("admin-activate-user"))
user.activate()
messages.success(
request,
_(
"User {username} ({email}) activated".format(
username=user.username, email=user.email
)
),
)
return HttpResponseRedirect(reverse("admin-activate-user"))
class ResendActivationEmail(AdminView):
""" Resends the users activation email """
template_name = "admin/resend-acivation.html"
def get(self, request):
return self.render_to_response({})
def post(self, request):
username = request.POST.get("username")
email = request.POST.get("email")
if not (username or email):
messages.error(request, _("Provide either username or email address"))
return HttpResponseRedirect(reverse("admin-resend-activation"))
try:
user = UserProxy.objects.all().by_username_or_email(username, email)
except UserProxy.DoesNotExist:
messages.error(request, _("No user found"))
return HttpResponseRedirect(reverse("admin-resend-activation"))
if user.is_active:
messages.success(request, "User {username} is already activated")
else:
send_activation_email(user, request)
messages.success(
request,
_(
"Email for {username} ({email}) resent".format(
username=user.username, email=user.email
)
),
)
return HttpResponseRedirect(reverse("admin-resend-activation"))
class MakePublisherInput(AdminView):
""" Get all information necessary for making someone publisher """
template_name = "admin/make-publisher-input.html"
class MakePublisher(AdminView):
""" Assign publisher permissions """
template_name = "admin/make-publisher-result.html"
def post(self, request):
User = get_user_model()
username = request.POST.get("username")
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
messages.error(
request, 'User "{username}" not found'.format(username=username)
)
return HttpResponseRedirect(reverse("admin-make-publisher-input"))
feeds = request.POST.get("feeds")
feeds = feeds.split()
podcasts = set()
for feed in feeds:
try:
podcast = Podcast.objects.get(urls__url=feed)
except Podcast.DoesNotExist:
messages.warning(
request, "Podcast with URL {feed} not found".format(feed=feed)
)
continue
podcasts.add(podcast)
created, existed = self.set_publisher(request, user, podcasts)
if (created + existed) > 0:
self.send_mail(request, user, podcasts)
return HttpResponseRedirect(reverse("admin-make-publisher-result"))
def set_publisher(self, request, user, podcasts):
created, existed = PublishedPodcast.objects.publish_podcasts(user, podcasts)
messages.success(
request,
"Set publisher permissions for {created} podcasts; "
"{existed} already existed".format(created=created, existed=existed),
)
return created, existed
def send_mail(self, request, user, podcasts):
site = RequestSite(request)
msg = render_to_string(
"admin/make-publisher-mail.txt",
{
"user": user,
"podcasts": podcasts,
"support_url": settings.SUPPORT_URL,
"site": site,
},
request=request,
)
subj = get_email_subject(site, _("Publisher Permissions"))
user.email_user(subj, msg)
messages.success(
request, 'Sent email to user "{username}"'.format(username=user.username)
)
class MakePublisherResult(AdminView):
template_name = "make-publisher-result.html"
def get_email_subject(site, txt):
return "[{domain}] {txt}".format(domain=site.domain, txt=txt)
|
agpl-3.0
| -3,548,724,407,814,230,000 | 28.816017 | 87 | 0.607477 | false |
rjawor/concordia-server
|
tests/addAlignedLemmatizedTM.py
|
1
|
3161
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import json
import urllib2
import sys
import host
import time
BUFFER_SIZE = 500
address = 'http://'+host.concordia_host
if len(host.concordia_port) > 0:
address += ':'+host.concordia_port
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def add_examples(examplesData):
req = urllib2.Request(address)
req.add_header('Content-Type', 'application/json')
response = json.loads(urllib2.urlopen(req, json.dumps(examplesData)).read())
if response['status'] == 'error':
raise Exception(response['message'])
if len(sys.argv) != 7:
raise Exception("wrong number of arguments")
name = sys.argv[1]
sourceFile = sys.argv[2]
sourceLangId = int(sys.argv[3])
targetFile = sys.argv[4]
targetLangId = int(sys.argv[5])
alignmentsFile = sys.argv[6]
if (file_len(sourceFile) != file_len(targetFile)):
raise Exception("source and target files are not of the same length!")
if (file_len(alignmentsFile) != 3*file_len(sourceFile)):
raise Exception("alignments file is not exactly 3 times longer than source and target")
totalExamples = file_len(sourceFile)
data = {
'operation': 'addTm',
'sourceLangId':sourceLangId,
'targetLangId':targetLangId,
'name':name,
'tmLemmatized':True
}
req = urllib2.Request(address)
req.add_header('Content-Type', 'application/json')
response = json.loads(urllib2.urlopen(req, json.dumps(data)).read())
tmId = int(response['newTmId'])
print "Added new tm: %d" % tmId
data = {
'operation': 'addAlignedLemmatizedSentences',
'tmId':tmId
}
examples = []
start = time.time()
with open(sourceFile) as sf, open(targetFile) as tf, open(alignmentsFile) as af:
for lineNumber in range(totalExamples):
sourceSentence = sf.readline().strip()
targetSentence = tf.readline().strip()
# skip to lines of the alignments file, these are lemmatized and we need the raw sentences from the source and target files.
af.readline()
af.readline()
alignmentString = af.readline().strip()
examples.append([sourceSentence, targetSentence, alignmentString])
if len(examples) >= BUFFER_SIZE:
data['examples'] = examples
add_examples(data)
mark = time.time()
print "Added %d of %d lemmatized examples. Time elapsed: %.4f s, current speed: %.4f examples/second" % ( (lineNumber+1), totalExamples, mark-start, (lineNumber+1)/(mark-start))
examples = []
if len(examples) > 0:
data['examples'] = examples
add_examples(data)
end = time.time()
print "Added all %d lemmatized sentences. Time elapsed: %.4f s, overall speed: %.4f sentences/second" % ((lineNumber+1), end-start, (lineNumber+1)/(end-start))
print "Generating index..."
start = time.time()
data = {
'operation': 'refreshIndex',
'tmId' : tmId
}
req = urllib2.Request(address)
req.add_header('Content-Type', 'application/json')
urllib2.urlopen(req, json.dumps(data)).read()
end = time.time()
print "Index regeneration complete. The operation took %.4f s" % (end - start)
|
lgpl-3.0
| 7,864,143,049,183,775,000 | 27.736364 | 189 | 0.669092 | false |
derekjchow/models
|
research/object_detection/legacy/trainer.py
|
1
|
17662
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Detection model trainer.
This file provides a generic training method that can be used to train a
DetectionModel.
"""
import functools
import tensorflow as tf
from object_detection.builders import optimizer_builder
from object_detection.builders import preprocessor_builder
from object_detection.core import batcher
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.utils import ops as util_ops
from object_detection.utils import variables_helper
from deployment import model_deploy
slim = tf.contrib.slim
def create_input_queue(batch_size_per_clone, create_tensor_dict_fn,
batch_queue_capacity, num_batch_queue_threads,
prefetch_queue_capacity, data_augmentation_options):
"""Sets up reader, prefetcher and returns input queue.
Args:
batch_size_per_clone: batch size to use per clone.
create_tensor_dict_fn: function to create tensor dictionary.
batch_queue_capacity: maximum number of elements to store within a queue.
num_batch_queue_threads: number of threads to use for batching.
prefetch_queue_capacity: maximum capacity of the queue used to prefetch
assembled batches.
data_augmentation_options: a list of tuples, where each tuple contains a
data augmentation function and a dictionary containing arguments and their
values (see preprocessor.py).
Returns:
input queue: a batcher.BatchQueue object holding enqueued tensor_dicts
(which hold images, boxes and targets). To get a batch of tensor_dicts,
call input_queue.Dequeue().
"""
tensor_dict = create_tensor_dict_fn()
tensor_dict[fields.InputDataFields.image] = tf.expand_dims(
tensor_dict[fields.InputDataFields.image], 0)
images = tensor_dict[fields.InputDataFields.image]
float_images = tf.to_float(images)
tensor_dict[fields.InputDataFields.image] = float_images
include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks
in tensor_dict)
include_keypoints = (fields.InputDataFields.groundtruth_keypoints
in tensor_dict)
include_multiclass_scores = (fields.InputDataFields.multiclass_scores
in tensor_dict)
if data_augmentation_options:
tensor_dict = preprocessor.preprocess(
tensor_dict, data_augmentation_options,
func_arg_map=preprocessor.get_default_func_arg_map(
include_label_weights=True,
include_multiclass_scores=include_multiclass_scores,
include_instance_masks=include_instance_masks,
include_keypoints=include_keypoints))
input_queue = batcher.BatchQueue(
tensor_dict,
batch_size=batch_size_per_clone,
batch_queue_capacity=batch_queue_capacity,
num_batch_queue_threads=num_batch_queue_threads,
prefetch_queue_capacity=prefetch_queue_capacity)
return input_queue
def get_inputs(input_queue,
num_classes,
merge_multiple_label_boxes=False,
use_multiclass_scores=False):
"""Dequeues batch and constructs inputs to object detection model.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
num_classes: Number of classes.
merge_multiple_label_boxes: Whether to merge boxes with multiple labels
or not. Defaults to false. Merged boxes are represented with a single
box and a k-hot encoding of the multiple labels associated with the
boxes.
use_multiclass_scores: Whether to use multiclass scores instead of
groundtruth_classes.
Returns:
images: a list of 3-D float tensor of images.
image_keys: a list of string keys for the images.
locations_list: a list of tensors of shape [num_boxes, 4]
containing the corners of the groundtruth boxes.
classes_list: a list of padded one-hot (or K-hot) float32 tensors containing
target classes.
masks_list: a list of 3-D float tensors of shape [num_boxes, image_height,
image_width] containing instance masks for objects if present in the
input_queue. Else returns None.
keypoints_list: a list of 3-D float tensors of shape [num_boxes,
num_keypoints, 2] containing keypoints for objects if present in the
input queue. Else returns None.
weights_lists: a list of 1-D float32 tensors of shape [num_boxes]
containing groundtruth weight for each box.
"""
read_data_list = input_queue.dequeue()
label_id_offset = 1
def extract_images_and_targets(read_data):
"""Extract images and targets from the input dict."""
image = read_data[fields.InputDataFields.image]
key = ''
if fields.InputDataFields.source_id in read_data:
key = read_data[fields.InputDataFields.source_id]
location_gt = read_data[fields.InputDataFields.groundtruth_boxes]
classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes],
tf.int32)
classes_gt -= label_id_offset
if merge_multiple_label_boxes and use_multiclass_scores:
raise ValueError(
'Using both merge_multiple_label_boxes and use_multiclass_scores is'
'not supported'
)
if merge_multiple_label_boxes:
location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels(
location_gt, classes_gt, num_classes)
classes_gt = tf.cast(classes_gt, tf.float32)
elif use_multiclass_scores:
classes_gt = tf.cast(read_data[fields.InputDataFields.multiclass_scores],
tf.float32)
else:
classes_gt = util_ops.padded_one_hot_encoding(
indices=classes_gt, depth=num_classes, left_pad=0)
masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks)
keypoints_gt = read_data.get(fields.InputDataFields.groundtruth_keypoints)
if (merge_multiple_label_boxes and (
masks_gt is not None or keypoints_gt is not None)):
raise NotImplementedError('Multi-label support is only for boxes.')
weights_gt = read_data.get(
fields.InputDataFields.groundtruth_weights)
return (image, key, location_gt, classes_gt, masks_gt, keypoints_gt,
weights_gt)
return zip(*map(extract_images_and_targets, read_data_list))
def _create_losses(input_queue, create_model_fn, train_config):
"""Creates loss function for a DetectionModel.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
create_model_fn: A function to create the DetectionModel.
train_config: a train_pb2.TrainConfig protobuf.
"""
detection_model = create_model_fn()
(images, _, groundtruth_boxes_list, groundtruth_classes_list,
groundtruth_masks_list, groundtruth_keypoints_list,
groundtruth_weights_list) = get_inputs(
input_queue,
detection_model.num_classes,
train_config.merge_multiple_label_boxes,
train_config.use_multiclass_scores)
preprocessed_images = []
true_image_shapes = []
for image in images:
resized_image, true_image_shape = detection_model.preprocess(image)
preprocessed_images.append(resized_image)
true_image_shapes.append(true_image_shape)
images = tf.concat(preprocessed_images, 0)
true_image_shapes = tf.concat(true_image_shapes, 0)
if any(mask is None for mask in groundtruth_masks_list):
groundtruth_masks_list = None
if any(keypoints is None for keypoints in groundtruth_keypoints_list):
groundtruth_keypoints_list = None
detection_model.provide_groundtruth(
groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_masks_list,
groundtruth_keypoints_list,
groundtruth_weights_list=groundtruth_weights_list)
prediction_dict = detection_model.predict(images, true_image_shapes)
losses_dict = detection_model.loss(prediction_dict, true_image_shapes)
for loss_tensor in losses_dict.values():
tf.losses.add_loss(loss_tensor)
def train(create_tensor_dict_fn,
create_model_fn,
train_config,
master,
task,
num_clones,
worker_replicas,
clone_on_cpu,
ps_tasks,
worker_job_name,
is_chief,
train_dir,
graph_hook_fn=None):
"""Training function for detection models.
Args:
create_tensor_dict_fn: a function to create a tensor input dictionary.
create_model_fn: a function that creates a DetectionModel and generates
losses.
train_config: a train_pb2.TrainConfig protobuf.
master: BNS name of the TensorFlow master to use.
task: The task id of this training instance.
num_clones: The number of clones to run per machine.
worker_replicas: The number of work replicas to train with.
clone_on_cpu: True if clones should be forced to run on CPU.
ps_tasks: Number of parameter server tasks.
worker_job_name: Name of the worker job.
is_chief: Whether this replica is the chief replica.
train_dir: Directory to write checkpoints and training summaries to.
graph_hook_fn: Optional function that is called after the inference graph is
built (before optimization). This is helpful to perform additional changes
to the training graph such as adding FakeQuant ops. The function should
modify the default graph.
Raises:
ValueError: If both num_clones > 1 and train_config.sync_replicas is true.
"""
detection_model = create_model_fn()
data_augmentation_options = [
preprocessor_builder.build(step)
for step in train_config.data_augmentation_options]
with tf.Graph().as_default():
# Build a configuration specifying multi-GPU and multi-replicas.
deploy_config = model_deploy.DeploymentConfig(
num_clones=num_clones,
clone_on_cpu=clone_on_cpu,
replica_id=task,
num_replicas=worker_replicas,
num_ps_tasks=ps_tasks,
worker_job_name=worker_job_name)
# Place the global step on the device storing the variables.
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
if num_clones != 1 and train_config.sync_replicas:
raise ValueError('In Synchronous SGD mode num_clones must ',
'be 1. Found num_clones: {}'.format(num_clones))
batch_size = train_config.batch_size // num_clones
if train_config.sync_replicas:
batch_size //= train_config.replicas_to_aggregate
with tf.device(deploy_config.inputs_device()):
input_queue = create_input_queue(
batch_size, create_tensor_dict_fn,
train_config.batch_queue_capacity,
train_config.num_batch_queue_threads,
train_config.prefetch_queue_capacity, data_augmentation_options)
# Gather initial summaries.
# TODO(rathodv): See if summaries can be added/extracted from global tf
# collections so that they don't have to be passed around.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
global_summaries = set([])
model_fn = functools.partial(_create_losses,
create_model_fn=create_model_fn,
train_config=train_config)
clones = model_deploy.create_clones(deploy_config, model_fn, [input_queue])
first_clone_scope = clones[0].scope
if graph_hook_fn:
with tf.device(deploy_config.variables_device()):
graph_hook_fn()
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
with tf.device(deploy_config.optimizer_device()):
training_optimizer, optimizer_summary_vars = optimizer_builder.build(
train_config.optimizer)
for var in optimizer_summary_vars:
tf.summary.scalar(var.op.name, var, family='LearningRate')
sync_optimizer = None
if train_config.sync_replicas:
training_optimizer = tf.train.SyncReplicasOptimizer(
training_optimizer,
replicas_to_aggregate=train_config.replicas_to_aggregate,
total_num_replicas=worker_replicas)
sync_optimizer = training_optimizer
with tf.device(deploy_config.optimizer_device()):
regularization_losses = (None if train_config.add_regularization_loss
else [])
total_loss, grads_and_vars = model_deploy.optimize_clones(
clones, training_optimizer,
regularization_losses=regularization_losses)
total_loss = tf.check_numerics(total_loss, 'LossTensor is inf or nan.')
# Optionally multiply bias gradients by train_config.bias_grad_multiplier.
if train_config.bias_grad_multiplier:
biases_regex_list = ['.*/biases']
grads_and_vars = variables_helper.multiply_gradients_matching_regex(
grads_and_vars,
biases_regex_list,
multiplier=train_config.bias_grad_multiplier)
# Optionally freeze some layers by setting their gradients to be zero.
if train_config.freeze_variables:
grads_and_vars = variables_helper.freeze_gradients_matching_regex(
grads_and_vars, train_config.freeze_variables)
# Optionally clip gradients
if train_config.gradient_clipping_by_norm > 0:
with tf.name_scope('clip_grads'):
grads_and_vars = slim.learning.clip_gradient_norms(
grads_and_vars, train_config.gradient_clipping_by_norm)
# Create gradient updates.
grad_updates = training_optimizer.apply_gradients(grads_and_vars,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops, name='update_barrier')
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
# Add summaries.
for model_var in slim.get_model_variables():
global_summaries.add(tf.summary.histogram('ModelVars/' +
model_var.op.name, model_var))
for loss_tensor in tf.losses.get_losses():
global_summaries.add(tf.summary.scalar('Losses/' + loss_tensor.op.name,
loss_tensor))
global_summaries.add(
tf.summary.scalar('Losses/TotalLoss', tf.losses.get_total_loss()))
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone_scope))
summaries |= global_summaries
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
# Soft placement allows placing on CPU ops without GPU implementation.
session_config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
# Save checkpoints regularly.
keep_checkpoint_every_n_hours = train_config.keep_checkpoint_every_n_hours
saver = tf.train.Saver(
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
# Create ops required to initialize the model from a given checkpoint.
init_fn = None
if train_config.fine_tune_checkpoint:
if not train_config.fine_tune_checkpoint_type:
# train_config.from_detection_checkpoint field is deprecated. For
# backward compatibility, fine_tune_checkpoint_type is set based on
# from_detection_checkpoint.
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = 'detection'
else:
train_config.fine_tune_checkpoint_type = 'classification'
var_map = detection_model.restore_map(
fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type,
load_all_detection_checkpoint_vars=(
train_config.load_all_detection_checkpoint_vars))
available_var_map = (variables_helper.
get_variables_available_in_checkpoint(
var_map, train_config.fine_tune_checkpoint,
include_global_step=False))
init_saver = tf.train.Saver(available_var_map)
def initializer_fn(sess):
init_saver.restore(sess, train_config.fine_tune_checkpoint)
init_fn = initializer_fn
slim.learning.train(
train_tensor,
logdir=train_dir,
master=master,
is_chief=is_chief,
session_config=session_config,
startup_delay_steps=train_config.startup_delay_steps,
init_fn=init_fn,
summary_op=summary_op,
number_of_steps=(
train_config.num_steps if train_config.num_steps else None),
save_summaries_secs=120,
sync_optimizer=sync_optimizer,
saver=saver)
|
apache-2.0
| -5,056,624,637,753,159,000 | 41.456731 | 80 | 0.678915 | false |
satyrius/cmsplugin-articles
|
cmsplugin_articles/south_migrations/0001_initial.py
|
1
|
3016
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ArticlesPlugin'
db.create_table(u'cmsplugin_articles_articlesplugin', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('limit', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal(u'cmsplugin_articles', ['ArticlesPlugin'])
def backwards(self, orm):
# Deleting model 'ArticlesPlugin'
db.delete_table(u'cmsplugin_articles_articlesplugin')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'cmsplugin_articles.articlesplugin': {
'Meta': {'object_name': 'ArticlesPlugin', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'limit': ('django.db.models.fields.PositiveIntegerField', [], {})
}
}
complete_apps = ['cmsplugin_articles']
|
mit
| -4,347,444,621,616,893,000 | 55.924528 | 157 | 0.589191 | false |
alejolp/argentum-py-server
|
argentumserver/util.py
|
1
|
2663
|
# -*- coding: utf-8 -*-
"""
AONX Server - Pequeรฑo servidor de Argentum Online.
Copyright (C) 2011 Alejandro Santos <alejolp@alejolp.com.ar>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Aca va todo lo que no encaja en otros modulos.
"""
from ConfigParser import SafeConfigParser
def debug_print(*args):
print "[debug] ",
for a in args:
print a,
print
def between(x, a, b):
return x >= a and x <= b
def espiral(pos, lim=100):
"""Genera un patron de busqueda en forma de espiral."""
x, y = pos
yield (x, y)
d = 1
s = 1
while lim > 0:
for a in xrange(d):
x = x + s
lim = lim - 1
yield (x, y)
if lim == 0: return
for a in xrange(d):
y = y + s
lim = lim - 1
yield (x, y)
if lim == 0: return
s = s * -1
d = d + 1
class MyConfigParser(SafeConfigParser):
def read(self, *args, **kwargs):
ret = SafeConfigParser.read(self, *args, **kwargs)
secs = list(self.sections())
for s in secs:
items = self.items(s)
self.remove_section(s)
s = s.lower()
self.add_section(s)
for i in items:
self.set(s, i[0].lower(), i[1])
return ret
def get(self, section, option, *args, **kwargs):
return SafeConfigParser.get(self, section.lower(), option.lower(), *args, **kwargs)
def has_section(self, section):
return SafeConfigParser.has_section(self, section.lower())
def getint(self, section, option):
val = self.get(section, option)
if "'" in val:
val = val.split("'", 1)[0]
return int(val)
class positiverolist(object):
__slots__ = ('data',)
def __init__(self, data):
self.data = data
def __getitem__(self, i):
if i < 0:
raise IndexError('negative')
return self.data[i]
def __setitem__(self, i, v):
raise TypeError('read only list')
|
gpl-3.0
| -5,969,421,147,287,314,000 | 25.356436 | 91 | 0.574756 | false |
SYSTRAN/nlp-api-python-client
|
tests/test_ner.py
|
1
|
2741
|
#!/usr/bin/env python
# coding: utf-8
import os
import unittest
import systran_nlp_api
import systran_nlp_api.configuration
class NerApiTests(unittest.TestCase):
def setUp(self):
api_key_file = os.path.join(os.path.dirname(__file__), "../", "api_key.txt")
systran_nlp_api.configuration.load_api_key(api_key_file)
self.api_client = systran_nlp_api.ApiClient()
self.ner_api = systran_nlp_api.NerApi(self.api_client)
def test_ner_extract_entity(self):
lang = "en"
input = "Bodies from the MH17 crash are being kept on this train, as Natalia Antelava reports\n" \
"Pro-Russian rebels have allowed Dutch investigators to examine bodies from the crashed Malaysia Airlines plane at a railway station in eastern Ukraine.\n" \
"The three Dutch experts said the train might leave the town of Torez later.\n" \
"All 298 people on board flight MH17 died when it crashed over the rebel-held area on 17 July. The US and other nations say there is growing evidence of Russian complicity in the crash."
result = self.ner_api.nlp_ner_extract_entities_get(lang=lang, input=input)
self.assertIsNotNone(result)
print result.__repr__()
def test_lid_extract_entity_with_file(self):
lang = "en"
input_file = os.path.join(os.path.dirname(__file__), "", "ner_extraction.txt")
result = self.ner_api.nlp_ner_extract_entities_get(lang=lang, input_file=input_file)
self.assertIsNotNone(result)
print result.__repr__()
def test_ner_extract_annotation(self):
lang = "en"
input = "Bodies from the MH17 crash are being kept on this train, as Natalia Antelava reports\n" \
"Pro-Russian rebels have allowed Dutch investigators to examine bodies from the crashed Malaysia Airlines plane at a railway station in eastern Ukraine.\n" \
"The three Dutch experts said the train might leave the town of Torez later.\n" \
"All 298 people on board flight MH17 died when it crashed over the rebel-held area on 17 July. The US and other nations say there is growing evidence of Russian complicity in the crash."
result = self.ner_api.nlp_ner_extract_annotations_get(lang=lang, input=input)
self.assertIsNotNone(result)
print result.__repr__()
def test_lid_extract_annotation_with_file(self):
lang = "en"
input_file = os.path.join(os.path.dirname(__file__), "", "ner_extraction.txt")
result = self.ner_api.nlp_ner_extract_annotations_get(lang=lang, input_file=input_file)
self.assertIsNotNone(result)
print result.__repr__()
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -1,255,574,166,305,409,500 | 51.711538 | 202 | 0.669464 | false |
magooos/KITT
|
interface.py
|
1
|
2476
|
__author__ = 'laurentmeyer'
# Set the command type to auto
import sys, telnetlib, time
MOCK_SERVER = "192.168.2.217"
MOCK_SERVER_PORT = 35000
def beginCommunication(telnetaddr = MOCK_SERVER, telnetport = MOCK_SERVER_PORT):
telnet = telnetlib.Telnet(telnetaddr, port=telnetport)
return telnet
def startInfiniteLoop(telnetConnection):
if telnetConnection is not None:
getRPM(telnetConnection)
fuelGals(telnetConnection)
getTheFirstTemperature(telnetConnection)
getSpeedOfCar(telnetConnection)
getFuelPressure(telnetConnection)
def getRPM(telnet):
telnet.write(b"010C")
time.sleep(0.2)
output = telnet.read_eager()
output = output.splitlines()
oBytes = output[1].split(b' ')
dataString = oBytes[4]+oBytes[5]
# The rpm is given multiplied by 4.
value = int(dataString, 16)/4
print("RPM: "+str(value))
def getSpeedOfCar(telnet):
telnet.write(b"010d")
time.sleep(0.2)
output = telnet.read_eager()
output = output.splitlines()
oBytes = output[1].split(b' ')
dataString = oBytes[len(oBytes)-1]
value = int(dataString, 16)
print ("Speed: "+ str(value)+" km/h")
def getTheFirstTemperature(telnet):
telnet.write(b"0105")
time.sleep(0.2)
output = telnet.read_eager()
output = output.splitlines()
oBytes = output[1].split(b' ')
dataString = oBytes[len(oBytes)-1]
value = int(dataString, 16)-40
print ("Engine coolant temp: "+str(value))
# Seems quite complicated, have to be real tested
def getEgt(telnet):
pass
def fuelGals(telnet):
telnet.write(b"0107")
time.sleep(0.2)
output = telnet.read_eager()
output = output.splitlines()
oBytes = output[1].split(b' ')
dataString = oBytes[len(oBytes)-1]
value = ((int(dataString, 16)-128)*100/128)
print ("Fuel: "+ str(value)+" %")
def getFuelPressure(telnet):
telnet.write(b"010a")
time.sleep(0.2)
output = telnet.read_eager()
output = output.splitlines()
oBytes = output[1].split(b' ')
dataString = oBytes[len(oBytes)-1]
value = int(dataString, 16)*3
print ("Fuel pressure: "+str(value) + " kPa")
if __name__ == '__main__':
if (sys.argv.__len__()==2):
telnetaddr = sys.argv[1]
telnetport = sys.argv[2]
telnetConnection = beginCommunication(telnetaddr = telnetaddr, telnetport = telnetport)
else:
telnetConnection = beginCommunication()
startInfiniteLoop(telnetConnection)
|
apache-2.0
| 6,595,756,557,333,647,000 | 29.207317 | 95 | 0.656704 | false |
SociedadMartinez/ToolShare
|
registration/views.py
|
1
|
31482
|
"""
@authors Tina Howard, Grant Gadomski, Nick Mattis, Laura Silva, Nils Sohn
"""
from django.shortcuts import render, get_object_or_404, redirect
from django.utils.datastructures import MultiValueDictKeyError
from django.http import Http404, HttpResponseRedirect, HttpResponse #TODO Take off HttpResponse after figuring out form error problem.
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.contrib.auth import authenticate, login
from django.contrib.auth import login as django_login, authenticate, logout as django_logout
from datetime import datetime, timedelta, date
from registration.models import ShareZone
from registration.models import TYPES_OF_TOOLS, CONDITIONS, AVAILABILITY
from registration.models import SECURITY_QUESTIONS_1, SECURITY_QUESTIONS_2
from registration.models import User, Tool, Reservation
from registration.forms import UserRegistrationForm, ToolRegistrationForm, ToolEditForm, UserEditForm,LoginForm
from notification import signals
from notification.models import Notification
from notification.views import rate_user
from sheds.models import Sheds, ShedMember
from messaging.models import Message
import sys
def user_login(request):
"""
Takes in login attempt, logs the user in if correct or redirects back to index if not.
"""
email = request.POST['email']
password = request.POST['password']
try:
email_check = User.objects.get(email=email)
except User.DoesNotExist:
return render(request, 'home/index.html', {'password': True, 'user': False})
user = authenticate(email=email, password=password)
if (user is not None):
django_login(request, user)
return HttpResponseRedirect(reverse('home:welcome', args=()))
else:
return render(request, 'home/index.html', {'password': False, 'user': True})
def user_profile(request, user_id):
'''
Shows the user's profile and the tools that owner owns
'''
user = User.objects.get(id=user_id)
tools = Tool.objects.filter(owner_of_tool=user)
if user == request.user:
is_user = True
else:
is_user = False
return render(request, 'registration/user_profile.html',{'tools':tools, 'the_user':user, 'is_user': is_user, 'tool_types': TYPES_OF_TOOLS, 'conditions': CONDITIONS})
def show_user_messages(request, user_id):
'''
Part of the messaging app, that shows the message that were sent to that user
'''
user = User.objects.get(id=user_id)
#Get all messages either with the to_user being the request.user and from_user being the var user, or the to_user
#being the var user and the from_user being the request.user.
to_messages = list(Message.objects.filter(receiving_user=user, sending_user=request.user))
from_messages = list(Message.objects.filter(receiving_user=request.user, sending_user=user))
all_messages_unsorted = (to_messages + from_messages)
all_messages = sorted(all_messages_unsorted, key=lambda x: x.date_sent, reverse=True)
if len(all_messages) == 0:
messages = None
else:
if len(all_messages) < 10:
messages = all_messages
else:
messages = all_messages[:10]
messages.reverse()
return render(request, 'registration/show_user_messages.html', {'messages': messages, 'profile_user': user})
def user_logout(request):
"""
Logs the currently logged-in user out.
"""
django_logout(request)
request.session.flush()
return HttpResponseRedirect(reverse('home:index', args=()))
def user_registration(request):
"""
Renders the form for the new user to create an account.
@param request: A HTTP request object.
@returns: A render of user_registration.html.
"""
form = UserRegistrationForm()
return render(request, 'registration/user_registration.html', {'form': form, 'email_not_unique': False, 'password': False}) #Renders registration page.
def register_user(request):
"""
Takes in the user registration data from the template, checks to make sure it's all valid, saves to model.
@param request: A HTTP request object.
@returns: A redirect to the new user's dashboard if successful, a re-render of the form if not.
"""
if request.method == 'POST':
form = UserRegistrationForm(request.POST)
if form.is_valid():
full_name = form.cleaned_data['full_name']
email = form.cleaned_data['email']
zipcode = form.cleaned_data['zipcode']
password = form.cleaned_data['password']
password_confirmation = form.cleaned_data['password_confirmation']
pick_up = form.cleaned_data['pick_up']
if full_name.isspace():
return render(request, 'registration/user_registration.html', {'form': form, 'email_not_unique': False, 'password': False, 'whitespace':True})
if (password == password_confirmation):
share_zone = ShareZone(request.POST['zipcode'])
share_zone.save()
user = User(full_name=full_name, email=email, password=password,
zipcode=zipcode, my_zone=share_zone, pick_up=pick_up)
user.set_password(request.POST['password'])
else:
return render(request, 'registration/user_registration.html', {'form': form, 'email_not_unique': False, 'password': True, 'whitespace':False})
try:
user.save()
except IntegrityError:
return (render(request, 'registration/user_registration.html', {'form': form, 'email_not_unique': True, 'password': False,"whitespace": False}))
"""
creates a community shed if one does not exists and sets up the relationship between the new user and shed
if the community shed for that zipcode already exists then just add relationship for the new user
"""
if not Sheds.objects.filter(shed_zipcode=zipcode, shed_type=1):
community_shed = Sheds.objects.create(shed_name='Community'+ str(zipcode), shed_type=1, shed_zipcode=zipcode)
community_shed.save()
member = ShedMember.objects.create(user=user, shed=community_shed, member_type=4)
member.save()
else:
community_shed = Sheds.objects.filter(shed_zipcode=zipcode, shed_type=1)[0]
member = ShedMember.objects.create(user=user, shed=community_shed, member_type=5)
member.save()
#create a home shed and relationship for the new user
home_shed = Sheds.objects.create(shed_zipcode=zipcode, shed_type=3, shed_name=user.email, shed_address=user.address)
home_shed.save()
membership = ShedMember.objects.create(user=user, shed=home_shed, member_type=4)
membership.save()
new_user = authenticate(email=email, password=password)
login(request, new_user)
return HttpResponseRedirect(reverse('home:get_started', args=()))
else:
#return HttpResponse(user_registration_form.errors) #Use this one to see where the form is failing validation.
return(render(request, 'registration/user_registration.html', {'form': form, 'email_not_unique': False}))
else:
return render(request, 'home/index')
def tool_registration(request):
"""
Renders the form for the user to add a tool to their account.
@param request: A HTTP request object.
@returns: A rendering of tool_registration.html.
"""
form = ToolRegistrationForm()
return render(request, 'registration/tool_registration.html', {'form': form, 'tool_types' : TYPES_OF_TOOLS, 'conditions' : CONDITIONS, 'current_user': request.user}) #Render tool registration class.
def register_tool(request):
"""
Takes in the tool registration data from the template, checks to make sure it's all valid, saves to model.
@param request: A HTTP request object.
@returns: A redirect to the user's dashboard if successful, a re-render fo the form if not.
"""
if request.method == 'POST':
form = ToolRegistrationForm(request.POST, request.FILES)
current_user = request.user
tool_owner = current_user
tool_zipcode = current_user.zipcode
home_shed = Sheds.objects.filter(user=current_user, shed_type=3)[0]
if form.is_valid():
tool_name = form.cleaned_data['tool_name']
type_of_tool = form.cleaned_data['type_of_tool']
description = form.cleaned_data['description']
condition = form.cleaned_data['condition']
pick_up_tool = form.cleaned_data['pick_up_tool']
image = form.cleaned_data['image']
print(image)
if image == None:
image = "toolimg/defaults/" + str(type_of_tool) + ".jpg"
print(image)
tool = Tool(tool_name=tool_name, type_of_tool=type_of_tool, owner_of_tool=tool_owner, image=image, current_shed=home_shed,
description=description, condition=condition, pick_up_tool=pick_up_tool, tool_zipcode=tool_zipcode)
if tool_name.isspace():
return render(request, 'registration/tool_registration.html', {'form': form, 'tool_types' : TYPES_OF_TOOLS, 'conditions' : CONDITIONS, 'current_user': request.user, 'whitespace':True})
tool.save()
user = request.user
user_tools = Tool.objects.filter(owner_of_tool=user)
if not user_tools:
user_tools = None
alert_type = "success"
alert_message = "Your tool was made successfully!"
return render(request, 'registration/my_tools.html', {'user': user, 'tools': user_tools, 'alert_type': alert_type, 'alert_message': alert_message}) #TODO Change to Render user's dashboard.
else:
return render(request, 'registration/tool_registration.html', {'form': form, 'tool_types': TYPES_OF_TOOLS, 'conditions': CONDITIONS})
else:
return HttpResponse("Not POST Data.") #Debug
def remove_tool(request, tool_id):
"""
Allows the user to deregister a tool they own.
"""
tool = Tool.objects.get(id=tool_id)
reservations = Reservation.objects.filter(tool=tool)
for reservation in reservations:
borrower = reservation.borrower
signals.tool_deregistered.send(sender=None, tool=tool, owner=tool.owner_of_tool, requester=borrower)
reservation.delete()
tool.delete()
alert_type = "success"
alert_message = "Tool has been removed!"
user = request.user
user_tools = Tool.objects.filter(owner_of_tool=user)
if not user_tools:
user_tools = None
return render(request, 'registration/my_tools.html', {'user': user, 'tools': user_tools, 'alert_type': alert_type, 'alert_message': alert_message})
def return_tool(request, reservation_id):
"""
Sends notification to the owner of the tool ensuring that the tool has been returned IRL.
"""
reservation = Reservation.objects.get(id=reservation_id)
tool_to_return = reservation.tool
tool_returned_notificaion = signals.tool_returned_check.send(sender=None, owner=tool_to_return.owner_of_tool, borrower=reservation.borrower, tool_id=tool_to_return.id, reservation_id=reservation.id)
return HttpResponseRedirect(reverse('home:welcome', args=()))
def tool_returned(request, notification_id):
"""
Tool has been confirmed as being returned, resets everything.
"""
the_notification = Notification.objects.get(id=notification_id)
tool = the_notification.tool
reservation = the_notification.reservation
tool.current_user_of_tool = None
tool.save()
reservation.delete()
return rate_user(request, the_notification) #TODO BREAKING HERE Notification id doesn't exist.
def change_tool_availability(request, tool_id):
"""
Allows the user to change the availability of a tool they own.
"""
tool = Tool.objects.get(id=tool_id)
if (tool.availability == 5):
tool.availability = 4
else:
tool.availability = 5
tool.save()
return HttpResponseRedirect(reverse('registration:my_tools', args=()))
def add_address(request, tool_id):
'''
Add address is used for when a shed needs an address to find
'''
user = request.user
address = request.POST['address']
tool = Tool.objects.get(id=tool_id)
shed = Sheds.objects.filter(shed_type=3).get(user=user)
if (address != ''):
user.address = address
shed.shed_address = address
tool.availability = 4
user.save()
shed.save()
tool.save()
return my_tools(request)
def borrow_tool(request, tool_id):
"""
Changes current user to the borrower and sends a notification to the owner
"""
tool = Tool.objects.get(id=tool_id)
owner = tool.owner_of_tool
borrower = request.user
tool.current_user_of_tool = borrower
if owner != borrower:
tool_borrowed = signals.tool_borrowed.send(sender=None, owner=owner, borrower=request.user, tool_id=tool_id)
tool.uses += 1
tool.date_last_used = datetime.now()
owner.times_lended += 1
owner.save()
request.user.times_borrowed += 1
borrower.save()
tool.save()
return HttpResponseRedirect(reverse('home:welcome', args=()))
def cancel_reservation(request, reservation_id):
"""
Deletes reservation for the tool
"""
reservation = Reservation.objects.get(id=reservation_id)
reservation.delete()
tools = Tool.objects.filter(owner_of_tool=request.user)
alert_type = "success"
alert_message = "Reservation has been canceled!"
user = request.user
current_date = date.today()
users_reservations = Reservation.objects.filter(borrower=user)
for reservation in users_reservations:
tool = reservation.tool
if (reservation.start_date >= current_date and reservation.end_date <= current_date):
if (tool.current_user_of_tool != user):
#Send notification.
tool = reservation.tool
pickup_available = signals.tool_ready_for_borrow.send(sender=None, owner=tool.owner_of_tool, borrower=user, tool=tool)
notifications = Notification.objects.filter(to_user=user, hasBeenViewed=False)
if not notifications:
notifications = None
user_tools = Tool.objects.filter(owner_of_tool=user)
if not user_tools:
user_tools = None
reservation_list = Reservation.objects.filter(borrower=user).order_by('start_date')
if (not reservation_list) and (not type(reservation_list) is Reservation):
reservation_list = None
user_messages = Message.objects.filter(receiving_user=user).order_by('-date_sent')
if len(user_messages) > 0:
user_message = user_messages[0]
else:
user_message = None
zipcode = user.zipcode
user_stats = User.objects.filter(zipcode=zipcode)
times_lended = list(user_stats.order_by('-times_lended'))
if len(times_lended) == 0:
time_lended == None
else:
time_lended = times_lended[0]
times_borrowed = list(user_stats.order_by('-times_borrowed'))
if len(times_borrowed) == 0:
time_borrowed = None
else:
time_borrowed = times_borrowed[0]
tool_stats = Tool.objects.filter(tool_zipcode=zipcode)
times_used = list(tool_stats.order_by('-uses'))
if len(times_used) == 0:
time_used = None
else:
time_used = times_used[0]
dates_used = list(tool_stats.order_by('date_last_used'))
if len(dates_used) == 0:
date_used = None
else:
date_used = dates_used[0]
ratings_stats = User.objects.filter(zipcode=zipcode)
thumbs_up = list(ratings_stats.order_by('-thumbs_up'))
if len(thumbs_up) == 0:
thumb_up = None
else:
thumb_up = thumbs_up[0]
community_shed = Sheds.objects.get(shed_zipcode=user.zipcode, shed_type=1)
if community_shed.shed_address == None:
c_address = True #user does need to add address
else:
c_address = False #shed already has an address
return render(request, 'home/welcome.html', {'user': user, 'community_address': c_address, 'notifications': notifications, 'tools': user_tools, 'reservation_list': reservation_list, 'user_message': user_message, 'today':date.today(),
'time_lended': time_lended, 'time_borrowed': time_borrowed, 'time_used': time_used, 'date_used': date_used, 'thumb_up': thumb_up, 'alert_type': alert_type, 'alert_message': alert_message})
return render(request, 'home/welcome.html', {'tools': tools, 'alert_type': alert_type, 'alert_message': alert_message})
def tool_request_accept(request, notification_id):
"""
If the owner of a tool accepted a request to borrow it from their home shed, it changes the current user of the tool
to be the requester and sends a message to the requester notifying that they're now borrowing the tool.
"""
notification = Notification.objects.get(id=notification_id)
tool = Tool.objects.get(id=notification.tool_id)
borrower = notification.from_user
owner = notification.to_user
message = ("You have reserved " + tool.owner_of_tool.email + "'s " + tool.tool_name + ". It can be picked up at " + tool.owner_of_tool.address + ". The owner wants you to " + tool.owner_of_tool.pick_up)
request_result = signals.request_result.send(sender=None, result=message, owner=tool.owner_of_tool, borrower=borrower)
notification.hasBeenViewed = True
notification.save()
reservation = Reservation.objects.create(tool=tool, borrower=borrower, start_date=notification.start_date, end_date=notification.end_date)
return HttpResponseRedirect(reverse('home:welcome', args=()))
def tool_request_decline(request, notification_id):
"""
This will only be used in the home_shed area. With the request decline, a window will pop up with
a reason why the user has declined. This will also be used for reserved times as well.
"""
notification = Notification.objects.get(id=notification_id)
tool_owner = request.user
message = "I need the tool for the given time."
message = (tool_owner.email + " has declined your request to borrow their " + notification.tool.tool_name + " because '" + message + "'.")
request_result = signals.request_result.send(sender=None, result=message, owner=tool_owner, borrower=notification.from_user)
notification.hasBeenViewed = True
notification.save()
return HttpResponseRedirect(reverse('home:welcome', args=()))
def display_tool(request, tool_id):
"""
displays the current tools you are borrowing from the user and what tool it is
"""
tool = Tool.objects.get(id=tool_id)
user = request.user
return render(request, 'registration/tool.html', {'tool': tool, 'user': user, 'tool_types': TYPES_OF_TOOLS, 'conditions': CONDITIONS})
def my_tools(request):
user = request.user
user_tools = Tool.objects.filter(owner_of_tool=user)
if not user_tools:
user_tools = None
return render(request, 'registration/my_tools.html', {'user': user, 'tools': user_tools})
def inately_is_not_a_gun(not_a_gun):
"""
Ensures that the tool coming in is not a gun for legailty reasons. (Note: This method is a joke.)
@param not_a_gun: A tool that is not a gun.
@returns: True.
"""
if (not_a_gun):
return True
def profile_editing(request):
"""
Allows a user to edit their profile.
"""
user = request.user
return render(request, 'registration/profile.html', {'user': user, 'questions1': SECURITY_QUESTIONS_1, 'questions2': SECURITY_QUESTIONS_2, 'password' : True}) #Render profile class.
def edit_profile(request):
"""
Edits the user info in the database.
"""
user = request.user
if user == request.user:
is_user = True
else:
is_user = False
tools = Tool.objects.filter(owner_of_tool=user)
alert_type = None
alert_message = None
if request.method == 'POST':
form = UserEditForm(request.POST)
if form.is_valid():
#user.profile_image = form.cleaned_data['profile_image']
user.full_name = form.cleaned_data['full_name']
user.pick_up = form.cleaned_data['pick_up']
user.question1 = form.cleaned_data['question1']
user.answer1 = form.cleaned_data['answer1']
user.question2 = form.cleaned_data['question2']
user.answer2 = form.cleaned_data['answer2']
user.address = form.cleaned_data['address']
shed = Sheds.objects.filter(shed_type=3).get(user=user)
shed.shed_address = form.cleaned_data['address']
shed.save()
if user.answer1 == "" or user.answer1 == "None":
user.answer1 = None
if user.answer2 == "" or user.answer2 == "None":
user.answer2 = None
new_password = form.cleaned_data['new_password']
new_password_confirm = form.cleaned_data['new_password_confirm']
if new_password == new_password_confirm:
user.set_password(new_password)
else:
return render(request, 'registration/profile.html', {'user': user, 'questions1': SECURITY_QUESTIONS_1, 'questions2': SECURITY_QUESTIONS_2, 'password' : False})
user.save()
home_shed = Sheds.objects.get(user=user, shed_type=3)
home_shed.address = user.address
home_shed.save()
alert_type = "success"
alert_message = "Profile changes have been saved!"
user = request.user
current_date = date.today()
users_reservations = Reservation.objects.filter(borrower=user)
for reservation in users_reservations:
tool = reservation.tool
if (reservation.start_date >= current_date and reservation.end_date <= current_date):
if (tool.current_user_of_tool != user):
#Send notification.
tool = reservation.tool
pickup_available = signals.tool_ready_for_borrow.send(sender=None, owner=tool.owner_of_tool, borrower=user, tool=tool)
notifications = Notification.objects.filter(to_user=user, hasBeenViewed=False)
if not notifications:
notifications = None
user_tools = Tool.objects.filter(owner_of_tool=user)
if not user_tools:
user_tools = None
reservation_list = Reservation.objects.filter(borrower=user).order_by('start_date')
if (not reservation_list) and (not type(reservation_list) is Reservation):
reservation_list = None
user_messages = Message.objects.filter(receiving_user=user).order_by('-date_sent')
if len(user_messages) > 0:
user_message = user_messages[0]
else:
user_message = None
zipcode = user.zipcode
user_stats = User.objects.filter(zipcode=zipcode)
times_lended = list(user_stats.order_by('-times_lended'))
if len(times_lended) == 0:
time_lended == None
else:
time_lended = times_lended[0]
times_borrowed = list(user_stats.order_by('-times_borrowed'))
if len(times_borrowed) == 0:
time_borrowed = None
else:
time_borrowed = times_borrowed[0]
tool_stats = Tool.objects.filter(tool_zipcode=zipcode)
times_used = list(tool_stats.order_by('-uses'))
if len(times_used) == 0:
time_used = None
else:
time_used = times_used[0]
dates_used = list(tool_stats.order_by('date_last_used'))
if len(dates_used) == 0:
date_used = None
else:
date_used = dates_used[0]
ratings_stats = User.objects.filter(zipcode=zipcode)
thumbs_up = list(ratings_stats.order_by('-thumbs_up'))
if len(thumbs_up) == 0:
thumb_up = None
else:
thumb_up = thumbs_up[0]
community_shed = Sheds.objects.get(shed_zipcode=user.zipcode, shed_type=1)
if community_shed.shed_address == None:
c_address = True #user does need to add address
else:
c_address = False #shed already has an address
return render(request, 'home/welcome.html', {'user': user, 'community_address': c_address, 'notifications': notifications, 'tools': user_tools, 'reservation_list': reservation_list, 'user_message': user_message, 'today':date.today(),
'time_lended': time_lended, 'time_borrowed': time_borrowed, 'time_used': time_used, 'date_used': date_used, 'thumb_up': thumb_up, 'alert_type': alert_type, 'alert_message': alert_message})
else:
alert_type = "failure"
alert_message = "The fields were not filled out properly!"
return render(request, 'registration/profile.html', {'user': user, 'questions1': SECURITY_QUESTIONS_1, 'questions2': SECURITY_QUESTIONS_2, 'alert_type': alert_type, 'alert_message': alert_message})
else:
return HttpResponse("Not POST Data.") #Debug
def tool_editing(request, tool_id):
"""
Allows a user to edit the tool they own.
"""
tool = Tool.objects.get(id=tool_id)
user_sheds = Sheds.objects.filter(user=request.user)
user = request.user
return render(request, 'registration/tool_editing.html', {'sheds': user_sheds, 'user': user, 'tool': tool, 'tool_types': TYPES_OF_TOOLS, 'conditions': CONDITIONS, 'availability': AVAILABILITY})
def edit_tool(request, tool_id):
"""
Edits the tool in the database.
"""
tool = Tool.objects.get(id=tool_id)
# blackout = Reservation.objects.filter(tool=tool, borrower=request.user)
if request.method == 'POST':
form = ToolEditForm(request.POST)
if form.is_valid():
tool.tool_name = form.cleaned_data['tool_name']
tool.type_of_tool = form.cleaned_data['type_of_tool']
tool.description = form.cleaned_data['description']
tool.condition = form.cleaned_data['condition']
tool.pick_up_tool = form.cleaned_data['pick_up_tool']
tool.availability = form.cleaned_data['availability']
image = form.cleaned_data['image']
shed_id = form.cleaned_data['new_shed']
tool.current_shed = Sheds.objects.get(pk=shed_id)
if tool.tool_name.isspace():
return render(request, 'registration/tool_editing.html', {'tool': tool, 'tool_types': TYPES_OF_TOOLS, 'conditions': CONDITIONS, 'availability': AVAILABILITY, 'whitespace':True})
tool.save()
reservation_list = Reservation.objects.filter(tool=tool)
dates = request.POST['dates'].split(', ')
if dates:
day = timedelta(days=1)
last_date = None
first_date = None
blackout = None
for date in dates:
try:
date_element = datetime.strptime(date, '%m/%d/%y').date()
if date_element < datetime.today().date():
continue
except ValueError:
break
if last_date == None:
last_date = date_element
first_date = last_date
elif date_element != last_date + day:
blackout = Reservation(start_date=first_date, end_date=last_date, borrower=tool.owner_of_tool, tool=tool)
for reservation in reservation_list:
if reservation.start_date < blackout.start_date:
if reservation.end_date > blackout.start_date:
signals.cancel_reservation.send(sender=None, shed_name=tool.current_shed.shed_name, user_to_cancel=reservation.borrower, tool=tool, user_who_canceled=request.user)
reservation.delete()
elif reservation.start_date > blackout.start_date:
if reservation.start_date < blackout.end_date:
signals.cancel_reservation.send(sender=None, shed_name=tool.current_shed.shed_name, user_to_cancel=reservation.borrower, tool=tool, user_who_canceled=request.user)
reservation.delete()
else:
signals.cancel_reservation.send(sender=None, shed_name=tool.current_shed.shed_name, user_to_cancel=reservation.borrower, tool=tool, user_who_canceled=request.user)
reservation.delete()
blackout.save()
first_date = date_element
last_date = first_date
elif date_element == last_date + day:
last_date = date_element
if last_date and first_date:
blackout = Reservation(start_date=first_date, end_date=last_date, borrower=tool.owner_of_tool, tool=tool)
for reservation in reservation_list:
if reservation.start_date < blackout.start_date:
if reservation.end_date > blackout.start_date:
reservation.delete()
elif reservation.start_date > blackout.start_date:
if reservation.start_date < blackout.end_date:
reservation.delete()
else:
reservation.delete()
blackout.save()
user = request.user
user_tools = Tool.objects.filter(owner_of_tool=user)
alert_type = "success"
alert_message = "Tool has been edited!"
if not user_tools:
user_tools = None
return render(request, 'registration/my_tools.html', {'user': user, 'tools': user_tools, 'alert_type': alert_type, 'alert_message': alert_message})
else:
user_sheds = Sheds.objects.filter(user=request.user)
return render(request, 'registration/tool_editing.html', {'sheds': user_sheds, 'tool': tool, 'tool_types': TYPES_OF_TOOLS, 'conditions': CONDITIONS, 'availability': AVAILABILITY})
else:
return HttpResponseRedirect(reverse('registration:edit_tool', args=()))
|
gpl-3.0
| -6,212,815,428,847,660,000 | 42.909344 | 245 | 0.61794 | false |
elimence/edx-platform
|
common/lib/xmodule/xmodule/contentstore/mongo.py
|
1
|
4555
|
from bson.son import SON
from pymongo import Connection
import gridfs
from gridfs.errors import NoFile
from xmodule.modulestore.mongo import location_to_query, Location
from xmodule.contentstore.content import XASSET_LOCATION_TAG
import logging
from .content import StaticContent, ContentStore
from xmodule.exceptions import NotFoundError
from fs.osfs import OSFS
import os
class MongoContentStore(ContentStore):
def __init__(self, host, db, port=27017, user=None, password=None, **kwargs):
logging.debug('Using MongoDB for static content serving at host={0} db={1}'.format(host, db))
_db = Connection(host=host, port=port, **kwargs)[db]
if user is not None and password is not None:
_db.authenticate(user, password)
self.fs = gridfs.GridFS(_db)
self.fs_files = _db["fs.files"] # the underlying collection GridFS uses
def save(self, content):
id = content.get_id()
# Seems like with the GridFS we can't update existing ID's we have to do a delete/add pair
self.delete(id)
with self.fs.new_file(_id=id, filename=content.get_url_path(), content_type=content.content_type,
displayname=content.name, thumbnail_location=content.thumbnail_location,
import_path=content.import_path) as fp:
fp.write(content.data)
return content
def delete(self, id):
if self.fs.exists({"_id": id}):
self.fs.delete(id)
def find(self, location):
id = StaticContent.get_id_from_location(location)
try:
with self.fs.get(id) as fp:
return StaticContent(location, fp.displayname, fp.content_type, fp.read(),
fp.uploadDate,
thumbnail_location=fp.thumbnail_location if hasattr(fp, 'thumbnail_location') else None,
import_path=fp.import_path if hasattr(fp, 'import_path') else None)
except NoFile:
raise NotFoundError()
def export(self, location, output_directory):
content = self.find(location)
if content.import_path is not None:
output_directory = output_directory + '/' + os.path.dirname(content.import_path)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
disk_fs = OSFS(output_directory)
with disk_fs.open(content.name, 'wb') as asset_file:
asset_file.write(content.data)
def export_all_for_course(self, course_location, output_directory):
assets = self.get_all_content_for_course(course_location)
for asset in assets:
asset_location = Location(asset['_id'])
self.export(asset_location, output_directory)
def get_all_content_thumbnails_for_course(self, location):
return self._get_all_content_for_course(location, get_thumbnails=True)
def get_all_content_for_course(self, location):
return self._get_all_content_for_course(location, get_thumbnails=False)
def _get_all_content_for_course(self, location, get_thumbnails=False):
'''
Returns a list of all static assets for a course. The return format is a list of dictionary elements. Example:
[
{u'displayname': u'profile.jpg', u'chunkSize': 262144, u'length': 85374,
u'uploadDate': datetime.datetime(2012, 10, 3, 5, 41, 54, 183000), u'contentType': u'image/jpeg',
u'_id': {u'category': u'asset', u'name': u'profile.jpg', u'course': u'6.002x', u'tag': u'c4x',
u'org': u'MITx', u'revision': None}, u'md5': u'36dc53519d4b735eb6beba51cd686a0e'},
{u'displayname': u'profile.thumbnail.jpg', u'chunkSize': 262144, u'length': 4073,
u'uploadDate': datetime.datetime(2012, 10, 3, 5, 41, 54, 196000), u'contentType': u'image/jpeg',
u'_id': {u'category': u'asset', u'name': u'profile.thumbnail.jpg', u'course': u'6.002x', u'tag': u'c4x',
u'org': u'MITx', u'revision': None}, u'md5': u'ff1532598830e3feac91c2449eaa60d6'},
....
]
'''
course_filter = Location(XASSET_LOCATION_TAG, category="asset" if not get_thumbnails else "thumbnail",
course=location.course, org=location.org)
# 'borrow' the function 'location_to_query' from the Mongo modulestore implementation
items = self.fs_files.find(location_to_query(course_filter))
return list(items)
|
agpl-3.0
| 2,441,600,805,920,882,700 | 41.175926 | 125 | 0.626125 | false |
tyb0807/angr
|
angr/procedures/libc/fseek.py
|
1
|
1124
|
import angr
from . import io_file_data_for_arch
from ...errors import SimSolverError
######################################
# fseek
######################################
class fseek(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, file_ptr, offset, whence):
# TODO: Support symbolic file_ptr, offset, and whence
# Make sure whence can only be one of the three values: SEEK_SET(0), SEEK_CUR(1), and SEEK_END(2)
try:
whence = self.state.solver.eval_one(whence)
except SimSolverError:
raise angr.SimProcedureError('multi-valued "whence" is not supported in fseek.')
try:
whence = {0: 'start', 1: 'current', 2: 'end'}[whence]
except KeyError:
return -1 # EINVAL
fd_offset = io_file_data_for_arch(self.state.arch)['fd']
fd = self.state.mem[file_ptr + fd_offset].int.resolved
simfd = self.state.posix.get_fd(fd)
if simfd is None:
return -1
return self.state.solver.If(simfd.seek(offset, whence), self.state.solver.BVV(0, self.state.arch.bits), -1)
|
bsd-2-clause
| -4,569,895,961,317,021,700 | 34.125 | 115 | 0.580961 | false |
musicbrainz/picard
|
picard/webservice/api_helpers.py
|
1
|
11716
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018 Laurent Monin
# Copyright (C) 2018-2019 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from PyQt5.QtCore import QUrl
from picard import (
PICARD_VERSION_STR,
config,
)
from picard.const import (
ACOUSTID_HOST,
ACOUSTID_KEY,
ACOUSTID_PORT,
CAA_HOST,
CAA_PORT,
)
from picard.webservice import (
CLIENT_STRING,
DEFAULT_RESPONSE_PARSER_TYPE,
ratecontrol,
)
ratecontrol.set_minimum_delay((ACOUSTID_HOST, ACOUSTID_PORT), 333)
ratecontrol.set_minimum_delay((CAA_HOST, CAA_PORT), 0)
def escape_lucene_query(text):
return re.sub(r'([+\-&|!(){}\[\]\^"~*?:\\/])', r'\\\1', text)
def _wrap_xml_metadata(data):
return ('<?xml version="1.0" encoding="UTF-8"?>'
'<metadata xmlns="http://musicbrainz.org/ns/mmd-2.0#">%s</metadata>'
% data)
class APIHelper(object):
def __init__(self, host, port, api_path, webservice):
self._host = host
self._port = port
self.api_path = api_path
self._webservice = webservice
@property
def webservice(self):
return self._webservice
@property
def host(self):
return self._host
@property
def port(self):
return self._port
def get(self, path_list, handler, priority=False, important=False, mblogin=False,
cacheloadcontrol=None, refresh=False, queryargs=None, parse_response_type=DEFAULT_RESPONSE_PARSER_TYPE):
path = self.api_path + "/".join(path_list)
return self._webservice.get(self.host, self.port, path, handler,
priority=priority, important=important, mblogin=mblogin,
refresh=refresh, queryargs=queryargs, parse_response_type=parse_response_type)
def post(self, path_list, data, handler, priority=False, important=False,
mblogin=True, queryargs=None, parse_response_type=DEFAULT_RESPONSE_PARSER_TYPE,
request_mimetype=None):
path = self.api_path + "/".join(path_list)
return self._webservice.post(self.host, self.port, path, data, handler,
priority=priority, important=important, mblogin=mblogin,
queryargs=queryargs, parse_response_type=parse_response_type,
request_mimetype=request_mimetype)
def put(self, path_list, data, handler, priority=True, important=False,
mblogin=True, queryargs=None, request_mimetype=None):
path = self.api_path + "/".join(path_list)
return self._webservice.put(self.host, self.port, path, data, handler,
priority=priority, important=important, mblogin=mblogin,
queryargs=queryargs, request_mimetype=request_mimetype)
def delete(self, path_list, handler, priority=True, important=False,
mblogin=True, queryargs=None):
path = self.api_path + "/".join(path_list)
return self._webservice.delete(self.host, self.port, path, handler,
priority=priority, important=important, mblogin=mblogin,
queryargs=queryargs)
class MBAPIHelper(APIHelper):
def __init__(self, webservice):
super().__init__(None, None, "/ws/2/", webservice)
@property
def host(self):
return config.setting['server_host']
@property
def port(self):
return config.setting['server_port']
def _get_by_id(self, entitytype, entityid, handler, inc=None, queryargs=None,
priority=False, important=False, mblogin=False, refresh=False):
path_list = [entitytype, entityid]
if queryargs is None:
queryargs = {}
if inc:
queryargs["inc"] = "+".join(inc)
return self.get(path_list, handler,
priority=priority, important=important, mblogin=mblogin,
refresh=refresh, queryargs=queryargs)
def get_release_by_id(self, releaseid, handler, inc=None,
priority=False, important=False, mblogin=False, refresh=False):
if inc is None:
inc = []
return self._get_by_id('release', releaseid, handler, inc,
priority=priority, important=important, mblogin=mblogin, refresh=refresh)
def get_track_by_id(self, trackid, handler, inc=None,
priority=False, important=False, mblogin=False, refresh=False):
if inc is None:
inc = []
return self._get_by_id('recording', trackid, handler, inc,
priority=priority, important=important, mblogin=mblogin, refresh=refresh)
def lookup_discid(self, discid, handler, priority=True, important=True, refresh=False):
inc = ['artist-credits', 'labels']
return self._get_by_id('discid', discid, handler, inc, queryargs={"cdstubs": "no"},
priority=priority, important=important, refresh=refresh)
def _find(self, entitytype, handler, **kwargs):
filters = []
limit = kwargs.pop("limit")
if limit:
filters.append(("limit", limit))
is_search = kwargs.pop("search", False)
if is_search:
use_advanced_search = kwargs.pop("advanced_search", config.setting["use_adv_search_syntax"])
if use_advanced_search:
query = kwargs["query"]
else:
query = escape_lucene_query(kwargs["query"]).strip().lower()
filters.append(("dismax", 'true'))
else:
query = []
for name, value in kwargs.items():
value = escape_lucene_query(value).strip().lower()
if value:
query.append('%s:(%s)' % (name, value))
query = ' '.join(query)
if query:
filters.append(("query", query))
queryargs = {}
for name, value in filters:
queryargs[name] = bytes(QUrl.toPercentEncoding(str(value))).decode()
path_list = [entitytype]
return self.get(path_list, handler, queryargs=queryargs,
priority=True, important=True, mblogin=False,
refresh=False)
def find_releases(self, handler, **kwargs):
return self._find('release', handler, **kwargs)
def find_tracks(self, handler, **kwargs):
return self._find('recording', handler, **kwargs)
def find_artists(self, handler, **kwargs):
return self._find('artist', handler, **kwargs)
def _browse(self, entitytype, handler, inc=None, **kwargs):
path_list = [entitytype]
queryargs = kwargs
if inc:
queryargs["inc"] = "+".join(inc)
return self.get(path_list, handler, queryargs=queryargs,
priority=True, important=True, mblogin=False,
refresh=False)
def browse_releases(self, handler, **kwargs):
inc = ["media", "labels"]
return self._browse("release", handler, inc, **kwargs)
def submit_ratings(self, ratings, handler):
path_list = ['rating']
params = {"client": CLIENT_STRING}
recordings = (''.join(['<recording id="%s"><user-rating>%s</user-rating></recording>' %
(i[1], j*20) for i, j in ratings.items() if i[0] == 'recording']))
data = _wrap_xml_metadata('<recording-list>%s</recording-list>' % recordings)
return self.post(path_list, data, handler, priority=True,
queryargs=params, parse_response_type="xml",
request_mimetype="application/xml; charset=utf-8")
def get_collection(self, collection_id, handler, limit=100, offset=0):
path_list = ["collection"]
queryargs = None
if collection_id is not None:
inc = ["releases", "artist-credits", "media"]
path_list.extend([collection_id, "releases"])
queryargs = {}
queryargs["inc"] = "+".join(inc)
queryargs["limit"] = limit
queryargs["offset"] = offset
return self.get(path_list, handler, priority=True, important=True,
mblogin=True, queryargs=queryargs)
def get_collection_list(self, handler):
return self.get_collection(None, handler)
@staticmethod
def _collection_request(collection_id, releases):
while releases:
ids = ";".join(releases if len(releases) <= 400 else releases[:400])
releases = releases[400:]
yield ["collection", collection_id, "releases", ids]
@staticmethod
def _get_client_queryarg():
return {"client": CLIENT_STRING}
def put_to_collection(self, collection_id, releases, handler):
for path_list in self._collection_request(collection_id, releases):
self.put(path_list, "", handler,
queryargs=self._get_client_queryarg())
def delete_from_collection(self, collection_id, releases, handler):
for path_list in self._collection_request(collection_id, releases):
self.delete(path_list, handler,
queryargs=self._get_client_queryarg())
class AcoustIdAPIHelper(APIHelper):
def __init__(self, webservice):
super().__init__(ACOUSTID_HOST, ACOUSTID_PORT,
'/v2/', webservice)
@staticmethod
def _encode_acoustid_args(args, format_='json'):
filters = []
args['client'] = ACOUSTID_KEY
args['clientversion'] = PICARD_VERSION_STR
args['format'] = format_
for name, value in args.items():
value = bytes(QUrl.toPercentEncoding(value)).decode()
filters.append('%s=%s' % (name, value))
return '&'.join(filters)
def query_acoustid(self, handler, **args):
path_list = ['lookup']
body = self._encode_acoustid_args(args)
return self.post(path_list, body, handler, priority=False, important=False,
mblogin=False, request_mimetype="application/x-www-form-urlencoded")
def submit_acoustid_fingerprints(self, submissions, handler):
path_list = ['submit']
args = {'user': config.setting["acoustid_apikey"]}
for i, submission in enumerate(submissions):
args['fingerprint.%d' % i] = submission.fingerprint
args['duration.%d' % i] = str(submission.duration)
args['mbid.%d' % i] = submission.recordingid
if submission.puid:
args['puid.%d' % i] = submission.puid
body = self._encode_acoustid_args(args, format_='json')
return self.post(path_list, body, handler, priority=True, important=False,
mblogin=False, request_mimetype="application/x-www-form-urlencoded")
|
gpl-2.0
| 2,615,057,318,125,829,600 | 38.986348 | 116 | 0.59662 | false |
sundhaug92/binwalk
|
src/binwalk/modules/entropy.py
|
1
|
11569
|
# Calculates and optionally plots the entropy of input files.
import os
import math
import zlib
import binwalk.core.common
from binwalk.core.compat import *
from binwalk.core.module import Module, Option, Kwarg
class Entropy(Module):
XLABEL = 'Offset'
YLABEL = 'Entropy'
XUNITS = 'B'
YUNITS = 'E'
FILE_WIDTH = 1024
FILE_FORMAT = 'png'
COLORS = ['r', 'g', 'c', 'b', 'm']
DEFAULT_BLOCK_SIZE = 1024
DEFAULT_DATA_POINTS = 2048
DEFAULT_TRIGGER_HIGH = .95
DEFAULT_TRIGGER_LOW = .85
TITLE = "Entropy Analysis"
ORDER = 8
# TODO: Add --dpoints option to set the number of data points?
CLI = [
Option(short='E',
long='entropy',
kwargs={'enabled': True},
description='Calculate file entropy'),
Option(short='F',
long='fast',
kwargs={'use_zlib': True},
description='Use faster, but less detailed, entropy analysis'),
Option(short='J',
long='save',
kwargs={'save_plot': True},
description='Save plot as a PNG'),
Option(short='Q',
long='nlegend',
kwargs={'show_legend': False},
description='Omit the legend from the entropy plot graph'),
Option(short='N',
long='nplot',
kwargs={'do_plot': False},
description='Do not generate an entropy plot graph'),
Option(short='H',
long='high',
type=float,
kwargs={'trigger_high': DEFAULT_TRIGGER_HIGH},
description='Set the rising edge entropy trigger threshold (default: %.2f)' % DEFAULT_TRIGGER_HIGH),
Option(short='L',
long='low',
type=float,
kwargs={'trigger_low': DEFAULT_TRIGGER_LOW},
description='Set the falling edge entropy trigger threshold (default: %.2f)' % DEFAULT_TRIGGER_LOW),
]
KWARGS = [
Kwarg(name='enabled', default=False),
Kwarg(name='save_plot', default=False),
Kwarg(name='trigger_high', default=DEFAULT_TRIGGER_HIGH),
Kwarg(name='trigger_low', default=DEFAULT_TRIGGER_LOW),
Kwarg(name='use_zlib', default=False),
Kwarg(name='display_results', default=True),
Kwarg(name='do_plot', default=True),
Kwarg(name='show_legend', default=True),
Kwarg(name='block_size', default=0),
]
# Run this module last so that it can process all other module's results
# and overlay them on the entropy graph
PRIORITY = 0
def init(self):
self.HEADER[-1] = "ENTROPY"
self.max_description_length = 0
self.file_markers = {}
if self.use_zlib:
self.algorithm = self.gzip
else:
self.algorithm = self.shannon
# Get a list of all other module's results to mark on the entropy graph
for (module, obj) in iterator(self.modules):
for result in obj.results:
if result.plot and result.file and result.description:
description = result.description.split(',')[0]
if not has_key(self.file_markers, result.file.name):
self.file_markers[result.file.name] = []
if len(description) > self.max_description_length:
self.max_description_length = len(description)
self.file_markers[result.file.name].append(
(result.offset, description))
# If other modules have been run and they produced results, don't spam
# the terminal with entropy results
if self.file_markers:
self.display_results = False
if not self.block_size:
if self.config.block:
self.block_size = self.config.block
else:
self.block_size = None
def _entropy_sigterm_handler(self, *args):
print("FUck it all.")
def run(self):
# If generating a graphical plot, this function will never return, as it invokes
# pg.exit. Calling pg.exit is pretty much required, but pg.exit calls os._exit in
# order to work around QT cleanup issues.
self._run()
def _run(self):
# Sanity check and warning if pyqtgraph isn't found
if self.do_plot:
try:
import pyqtgraph as pg
except ImportError as e:
binwalk.core.common.warning(
"Failed to import pyqtgraph module, visual entropy graphing will be disabled")
self.do_plot = False
for fp in iter(self.next_file, None):
if self.display_results:
self.header()
self.calculate_file_entropy(fp)
if self.display_results:
self.footer()
if self.do_plot:
if not self.save_plot:
from pyqtgraph.Qt import QtGui
QtGui.QApplication.instance().exec_()
pg.exit()
def calculate_file_entropy(self, fp):
# Tracks the last displayed rising/falling edge (0 for falling, 1 for
# rising, None if nothing has been printed yet)
last_edge = None
# Auto-reset the trigger; if True, an entropy above/below
# self.trigger_high/self.trigger_low will be printed
trigger_reset = True
# Clear results from any previously analyzed files
self.clear(results=True)
# If -K was not specified, calculate the block size to create
# DEFAULT_DATA_POINTS data points
if self.block_size is None:
block_size = fp.size / self.DEFAULT_DATA_POINTS
# Round up to the nearest DEFAULT_BLOCK_SIZE (1024)
block_size = int(
block_size + ((self.DEFAULT_BLOCK_SIZE - block_size) % self.DEFAULT_BLOCK_SIZE))
else:
block_size = self.block_size
# Make sure block size is greater than 0
if block_size <= 0:
block_size = self.DEFAULT_BLOCK_SIZE
binwalk.core.common.debug("Entropy block size (%d data points): %d" %
(self.DEFAULT_DATA_POINTS, block_size))
while True:
file_offset = fp.tell()
(data, dlen) = fp.read_block()
if not data:
break
i = 0
while i < dlen:
entropy = self.algorithm(data[i:i + block_size])
display = self.display_results
description = "%f" % entropy
if not self.config.verbose:
if last_edge in [None, 0] and entropy > self.trigger_low:
trigger_reset = True
elif last_edge in [None, 1] and entropy < self.trigger_high:
trigger_reset = True
if trigger_reset and entropy >= self.trigger_high:
description = "Rising entropy edge (%f)" % entropy
display = self.display_results
last_edge = 1
trigger_reset = False
elif trigger_reset and entropy <= self.trigger_low:
description = "Falling entropy edge (%f)" % entropy
display = self.display_results
last_edge = 0
trigger_reset = False
else:
display = False
description = "%f" % entropy
r = self.result(offset=(file_offset + i),
file=fp,
entropy=entropy,
description=description,
display=display)
i += block_size
if self.do_plot:
self.plot_entropy(fp.name)
def shannon(self, data):
'''
Performs a Shannon entropy analysis on a given block of data.
'''
entropy = 0
if data:
length = len(data)
seen = dict(((chr(x), 0) for x in range(0, 256)))
for byte in data:
seen[byte] += 1
for x in range(0, 256):
p_x = float(seen[chr(x)]) / length
if p_x > 0:
entropy -= p_x * math.log(p_x, 2)
return (entropy / 8)
def gzip(self, data, truncate=True):
'''
Performs an entropy analysis based on zlib compression ratio.
This is faster than the shannon entropy analysis, but not as accurate.
'''
# Entropy is a simple ratio of: <zlib compressed size> / <original
# size>
e = float(
float(len(zlib.compress(str2bytes(data), 9))) / float(len(data)))
if truncate and e > 1.0:
e = 1.0
return e
def plot_entropy(self, fname):
try:
import numpy as np
import pyqtgraph as pg
import pyqtgraph.exporters as exporters
except ImportError as e:
return
i = 0
x = []
y = []
plotted_colors = {}
for r in self.results:
x.append(r.offset)
y.append(r.entropy)
plt = pg.plot(title=fname, clear=True)
# Disable auto-ranging of the Y (entropy) axis, as it
# can cause some very un-intuitive graphs, particularly
# for files with only high-entropy data.
plt.setYRange(0, 1)
if self.show_legend and has_key(self.file_markers, fname):
plt.addLegend(size=(self.max_description_length * 10, 0))
for (offset, description) in self.file_markers[fname]:
# If this description has already been plotted at a different offset, we need to
# use the same color for the marker, but set the description to None to prevent
# duplicate entries in the graph legend.
#
# Else, get the next color and use it to mark descriptions of
# this type.
if has_key(plotted_colors, description):
color = plotted_colors[description]
description = None
else:
color = self.COLORS[i]
plotted_colors[description] = color
i += 1
if i >= len(self.COLORS):
i = 0
plt.plot(x=[offset, offset], y=[0, 1.1],
name=description, pen=pg.mkPen(color, width=2.5))
# Plot data points
plt.plot(x, y, pen='y')
# TODO: legend is not displayed properly when saving plots to disk
if self.save_plot:
# Save graph to CWD
out_file = os.path.join(os.getcwd(), os.path.basename(fname))
# exporters.ImageExporter is different in different versions of
# pyqtgraph
try:
exporter = exporters.ImageExporter(plt.plotItem)
except TypeError:
exporter = exporters.ImageExporter.ImageExporter(plt.plotItem)
exporter.parameters()['width'] = self.FILE_WIDTH
exporter.export(
binwalk.core.common.unique_file_name(out_file, self.FILE_FORMAT))
else:
plt.setLabel('left', self.YLABEL, units=self.YUNITS)
plt.setLabel('bottom', self.XLABEL, units=self.XUNITS)
|
mit
| 1,616,502,433,162,220,500 | 34.271341 | 115 | 0.532112 | false |
flypy/flypy
|
flypy/runtime/obj/stringobject.py
|
1
|
2309
|
# -*- coding: utf-8 -*-
"""
String implementation.
"""
from __future__ import print_function, division, absolute_import
import flypy
from flypy import sjit, jit, typeof
from .bufferobject import Buffer, newbuffer, copyto
from .pointerobject import Pointer
@sjit
class String(object):
layout = [('buf', 'Buffer[char]')]
@jit('a -> a -> bool')
def __eq__(self, other):
return self.buf == other.buf
@jit('a -> b -> bool')
def __eq__(self, other):
return False
# TODO: Fix the below
#@jit('a -> int64 -> a')
#def __getitem__(self, idx):
# #c = self.buf[idx]
# p = self.buf.p + idx
# # TODO: Keep original string alive!
# return String(Buffer(p, 1)) # <- this is not \0 terminated
@jit('a -> a')
def __str__(self):
return self
@jit('a -> int64')
def __len__(self):
return len(self.buf) - 1
@jit('a -> a -> a')
def __add__(self, other):
n = len(self) + len(other) + 1
buf = newbuffer(flypy.char, n)
copyto(self.buf, buf, 0)
copyto(other.buf, buf, len(self))
return String(buf)
@jit('a -> bool')
def __nonzero__(self):
return bool(len(self))
# __________________________________________________________________
@staticmethod
def fromobject(strobj, type):
assert isinstance(strobj, str)
p = flypy.runtime.lib.librt.asstring(strobj)
buf = Buffer(Pointer(p), len(strobj) + 1)
return String(buf)
@staticmethod
def toobject(obj, type):
buf = obj.buf
return flypy.runtime.lib.librt.fromstring(buf.p, len(obj))
# __________________________________________________________________
#===------------------------------------------------------------------===
# String <-> char *
#===------------------------------------------------------------------===
@jit('Pointer[char] -> String[]')
def from_cstring(p):
return String(Buffer(p, flypy.runtime.lib.strlen(p)))
@jit('String[] -> Pointer[char]')
def as_cstring(s):
return s.buf.pointer()
#===------------------------------------------------------------------===
# typeof
#===------------------------------------------------------------------===
@typeof.case(str)
def typeof(pyval):
return String[()]
|
bsd-2-clause
| 564,705,248,364,372,860 | 24.384615 | 73 | 0.459939 | false |
mohclips/k5-ansible-modules
|
k5_inter_project_link.py
|
1
|
9324
|
#!/usr/bin/python
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: k5_inter_project_link
short_description: Create inter-project link on K5 in particular AZ
version_added: "1.0"
description:
- K5 call to inter-project network link in an AZ - the inter-project link is custom to K5 therefore there is no Openstack module.
options:
router_name:
description:
- Name of the router network.
required: true
default: None
state:
description:
- State of the network. Can be 'present' or 'absent'.
required: true
default: None
k5_port:
description:
- dict of k5_port module output.
required: true
default: None
k5_auth:
description:
- dict of k5_auth module output.
required: true
default: None
requirements:
- "python >= 2.6"
'''
EXAMPLES = '''
# Create an inter-project link in an AZ
- k5_create_inter_project_link:
state: present
k5_port: "{{ k5_port_reg.k5_port_facts }}"
router_name: "nx-test-net-1a"
k5_auth: "{{ k5_auth_reg.k5_auth_facts }}"
'''
RETURN = '''
-
'''
import requests
import os
import json
from ansible.module_utils.basic import *
############## Common debug ###############
k5_debug = False
k5_debug_out = []
def k5_debug_get():
"""Return our debug list"""
return k5_debug_out
def k5_debug_clear():
"""Clear our debug list"""
k5_debug_out = []
def k5_debug_add(s):
"""Add string to debug list if env K5_DEBUG is defined"""
if k5_debug:
k5_debug_out.append(s)
############## inter-project link functions #############
def k5_get_endpoint(e,name):
"""Pull particular endpoint name from dict"""
return e['endpoints'][name]
def k5_get_router_id_from_name(module, k5_facts):
"""Get an id from a router_name"""
endpoint = k5_facts['endpoints']['networking']
auth_token = k5_facts['auth_token']
router_name = module.params['router_name']
session = requests.Session()
headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': auth_token }
url = endpoint + '/v2.0/routers'
k5_debug_add('endpoint: {0}'.format(endpoint))
k5_debug_add('REQ: {0}'.format(url))
k5_debug_add('headers: {0}'.format(headers))
try:
response = session.request('GET', url, headers=headers)
except requests.exceptions.RequestException as e:
module.fail_json(msg=e)
# we failed to get data
if response.status_code not in (200,):
module.fail_json(msg="RESP: HTTP Code:" + str(response.status_code) + " " + str(response.content), debug=k5_debug_out)
#k5_debug_add("RESP: " + str(response.json()))
for n in response.json()['routers']:
#k5_debug_add("Found router name: " + str(n['name']))
if str(n['name']) == router_name:
#k5_debug_add("Found it!")
return n['id']
return ''
def k5_create_inter_project_link(module):
"""Create an inter-project link in an AZ on K5"""
global k5_debug
k5_debug_clear()
if 'K5_DEBUG' in os.environ:
k5_debug = True
if 'auth_spec' in module.params['k5_auth']:
k5_facts = module.params['k5_auth']
else:
module.fail_json(msg="k5_auth_facts not found, have you run k5_auth?")
if 'id' in module.params['k5_port']:
k5_port = module.params['k5_port']
else:
module.fail_json(msg="k5_port_id not found, have you run k5_create_port?")
endpoint = k5_facts['endpoints']['networking-ex']
auth_token = k5_facts['auth_token']
port_id = k5_port['id']
router_name = module.params['router_name']
# we need the router_id not router_name, so grab it
router_id = k5_get_router_id_from_name(module, k5_facts)
if router_id == '':
if k5_debug:
module.exit_json(changed=False, msg="Router " + router_name + " not found", debug=k5_debug_out)
else:
module.exit_json(changed=False, msg="Router " + router_name + " not found")
if router_id == k5_port['device_id']:
module.exit_json(changed=False, msg="Port already connected to the correct router")
elif k5_port['device_id'] != '':
if k5_debug:
module.fail_json(changed=False, msg="Port already attached to " + k5_port['device_id'], debug=k5_debug_out)
else:
module.fail_json(changed=False, msg="Port already attached to " + k5_port['device_id'])
# actually the project_id, but stated as tenant_id in the API
tenant_id = k5_facts['auth_spec']['os_project_id']
k5_debug_add('router_name: {0}'.format(router_name))
k5_debug_add('port_id: {0}'.format(port_id))
session = requests.Session()
headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': auth_token }
url = endpoint + '/v2.0/routers/' + router_id + '/add_cross_project_router_interface'
query_json = { "port_id": port_id }
k5_debug_add('REQ: {0}'.format(url))
k5_debug_add('headers: {0}'.format(headers))
k5_debug_add('json: {0}'.format(query_json))
try:
response = session.request('PUT', url, headers=headers, json=query_json)
except requests.exceptions.RequestException as e:
module.fail_json(msg=e)
# we failed to make a change
if response.status_code not in (200,):
module.fail_json(msg="RESP: HTTP Code:" + str(response.status_code) + " " + str(response.content), debug=k5_debug_out)
if k5_debug:
module.exit_json(changed=True, msg="Inter-porject Link Creation Successful", debug=k5_debug_out )
module.exit_json(changed=True, msg="Inter-porject Link Creation Successful")
def k5_delete_inter_project_link(module):
"""Delete an inter-project link in an AZ on K5"""
global k5_debug
k5_debug_clear()
if 'K5_DEBUG' in os.environ:
k5_debug = True
if 'auth_spec' in module.params['k5_auth']:
k5_facts = module.params['k5_auth']
else:
module.fail_json(msg="k5_auth_facts not found, have you run k5_auth?")
if 'id' in module.params['k5_port']:
k5_port = module.params['k5_port']
port_id = k5_port['id']
elif 'id' in module.params:
port_id = module.params['port_id']
else:
module.fail_json(msg="port_id or k5_port not supplied")
endpoint = k5_facts['endpoints']['networking-ex']
auth_token = k5_facts['auth_token']
router_name = module.params['router_name']
# we need the router_id not router_name, so grab it
router_id = k5_get_router_id_from_name(module, k5_facts)
if router_id == '':
if k5_debug:
module.exit_json(changed=False, msg="Router " + router_name + " not found", debug=k5_debug_out)
else:
module.exit_json(changed=False, msg="Router " + router_name + " not found")
# actually the project_id, but stated as tenant_id in the API
tenant_id = k5_facts['auth_spec']['os_project_id']
k5_debug_add('router_name: {0}'.format(router_name))
k5_debug_add('port_id: {0}'.format(port_id))
session = requests.Session()
headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': auth_token }
url = endpoint + '/v2.0/routers/' + router_id + '/remove_cross_project_router_interface'
query_json = { "port_id": port_id }
k5_debug_add('REQ: {0}'.format(url))
k5_debug_add('headers: {0}'.format(headers))
k5_debug_add('json: {0}'.format(query_json))
try:
response = session.request('PUT', url, headers=headers, json=query_json)
except requests.exceptions.RequestException as e:
module.fail_json(msg=e)
# we failed to make a change
if response.status_code not in (200,):
if "does not have an interface with id" in response.content:
if k5_debug:
module.exit_json(changed=False, msg="Inter-project Link did not exist", debug=k5_debug_out)
module.exit_json(changed=False, msg="Inter-project Link did not exist")
module.fail_json(msg="RESP: HTTP Code:" + str(response.status_code) + " " + str(response.content), debug=k5_debug_out)
if k5_debug:
module.exit_json(changed=True, msg="Inter-porject Link Deleted Successful", debug=k5_debug_out )
module.exit_json(changed=True, msg="Inter-porject Link Deleted Successful" )
######################################################################################
def main():
module = AnsibleModule( argument_spec=dict(
router_name = dict(required=True, default=None, type='str'),
state = dict(required=True, type='str'), # should be a choice
k5_port = dict(required=True, default=None, type='dict'),
k5_auth = dict(required=True, default=None, type='dict')
) )
if module.params['state'] == 'present':
k5_create_inter_project_link(module)
elif module.params['state'] == 'absent':
k5_delete_inter_project_link(module)
else:
module.fail_json(msg="Unknown state")
######################################################################################
if __name__ == '__main__':
main()
|
gpl-3.0
| 5,519,801,665,012,305,000 | 30.5 | 134 | 0.604569 | false |
mdzhang/goodreads-api-client-python
|
goodreads_api_client/resources/review.py
|
1
|
1247
|
# -*- coding: utf-8 -*-
"""Module containing review resource class."""
from goodreads_api_client.exceptions import OauthEndpointNotImplemented
from goodreads_api_client.resources.base import Resource
class Review(Resource):
resource_name = 'review'
def create(self):
raise OauthEndpointNotImplemented('review.create')
def destroy(self):
raise OauthEndpointNotImplemented('review.destroy')
def edit(self):
raise OauthEndpointNotImplemented('review.edit')
def list(self):
raise OauthEndpointNotImplemented('review.list')
def recent_reviews(self):
endpoint = 'review/recent_reviews'
res = self._transport.req(endpoint=endpoint)
return res['reviews']
def show(self, id_: str):
return self._show_single_resource(id_)
def show_by_user_and_book(self, user_id: str, book_id: str,
include_review_on_work: bool=False):
endpoint = 'review/show_by_user_and_book'
params = {
'book_id': book_id,
'include_review_on_work': include_review_on_work,
'user_id': user_id,
}
res = self._transport.req(endpoint=endpoint, params=params)
return res['review']
|
mit
| -8,722,092,569,328,240,000 | 30.175 | 71 | 0.635926 | false |
piensa/geonode
|
geonode/people/models.py
|
1
|
7186
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.db import models
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AbstractUser, UserManager
from django.contrib import auth
from django.db.models import signals
from django.conf import settings
from taggit.managers import TaggableManager
from geonode.base.enumerations import COUNTRIES
from geonode.groups.models import GroupProfile
from geonode.notifications_helper import has_notifications, send_notification
from account.models import EmailAddress
from .utils import format_address
class ProfileUserManager(UserManager):
def get_by_natural_key(self, username):
return self.get(username__iexact=username)
class Profile(AbstractUser):
"""Fully featured Geonode user"""
organization = models.CharField(
_('Organization Name'),
max_length=255,
blank=True,
null=True,
help_text=_('name of the responsible organization'))
profile = models.TextField(_('Profile'), null=True, blank=True, help_text=_('introduce yourself'))
position = models.CharField(
_('Position Name'),
max_length=255,
blank=True,
null=True,
help_text=_('role or position of the responsible person'))
voice = models.CharField(_('Voice'), max_length=255, blank=True, null=True, help_text=_(
'telephone number by which individuals can speak to the responsible organization or individual'))
fax = models.CharField(_('Facsimile'), max_length=255, blank=True, null=True, help_text=_(
'telephone number of a facsimile machine for the responsible organization or individual'))
delivery = models.CharField(
_('Delivery Point'),
max_length=255,
blank=True,
null=True,
help_text=_('physical and email address at which the organization or individual may be contacted'))
city = models.CharField(
_('City'),
max_length=255,
blank=True,
null=True,
help_text=_('city of the location'))
area = models.CharField(
_('Administrative Area'),
max_length=255,
blank=True,
null=True,
help_text=_('state, province of the location'))
zipcode = models.CharField(
_('Postal Code'),
max_length=255,
blank=True,
null=True,
help_text=_('ZIP or other postal code'))
country = models.CharField(
choices=COUNTRIES,
max_length=3,
blank=True,
null=True,
help_text=_('country of the physical address'))
keywords = TaggableManager(_('keywords'), blank=True, help_text=_(
'commonly used word(s) or formalised word(s) or phrase(s) used to describe the subject \
(space or comma-separated'))
is_certifier = models.BooleanField(_('Allowed to certify maps & layers'),
blank=False,
null=False,
default=False)
def get_absolute_url(self):
return reverse('profile_detail', args=[self.username, ])
def __unicode__(self):
return u"%s" % (self.username)
def class_name(value):
return value.__class__.__name__
objects = ProfileUserManager()
USERNAME_FIELD = 'username'
def group_list_public(self):
return GroupProfile.objects.exclude(access="private").filter(groupmember__user=self)
def group_list_all(self):
return GroupProfile.objects.filter(groupmember__user=self)
def keyword_list(self):
"""
Returns a list of the Profile's keywords.
"""
return [kw.name for kw in self.keywords.all()]
@property
def name_long(self):
if self.first_name and self.last_name:
return '%s %s (%s)' % (self.first_name, self.last_name, self.username)
elif (not self.first_name) and self.last_name:
return '%s (%s)' % (self.last_name, self.username)
elif self.first_name and (not self.last_name):
return '%s (%s)' % (self.first_name, self.username)
else:
return self.username
@property
def location(self):
return format_address(self.delivery, self.zipcode, self.city, self.area, self.country)
def get_anonymous_user_instance(Profile):
return Profile(pk=-1, username='AnonymousUser')
def profile_post_save(instance, sender, **kwargs):
"""
Make sure the user belongs by default to the anonymous group.
This will make sure that anonymous permissions will be granted to the new users.
"""
from django.contrib.auth.models import Group
anon_group, created = Group.objects.get_or_create(name='anonymous')
instance.groups.add(anon_group)
# do not create email, when user-account signup code is in use
if getattr(instance, '_disable_account_creation', False):
return
# keep in sync Profile email address with Account email address
if instance.email not in [u'', '', None] and not kwargs.get('raw', False):
address, created = EmailAddress.objects.get_or_create(
user=instance, primary=True,
defaults={'email': instance.email, 'verified': False})
if not created:
EmailAddress.objects.filter(user=instance, primary=True).update(email=instance.email)
def email_post_save(instance, sender, **kw):
if instance.primary:
Profile.objects.filter(id=instance.user.pk).update(email=instance.email)
def profile_pre_save(instance, sender, **kw):
matching_profiles = Profile.objects.filter(id=instance.id)
if matching_profiles.count() == 0:
return
if instance.is_active and not matching_profiles.get().is_active:
send_notification((instance,), "account_active")
def profile_signed_up(user, form, **kwargs):
staff = auth.get_user_model().objects.filter(is_staff=True)
send_notification(staff, "account_approve", {"from_user": user})
signals.pre_save.connect(profile_pre_save, sender=Profile)
signals.post_save.connect(profile_post_save, sender=Profile)
signals.post_save.connect(email_post_save, sender=EmailAddress)
if has_notifications and 'account' in settings.INSTALLED_APPS and getattr(settings, 'ACCOUNT_APPROVAL_REQUIRED', False):
from account import signals as s
from account.forms import SignupForm
s.user_signed_up.connect(profile_signed_up, sender=SignupForm)
|
gpl-3.0
| -6,361,352,256,820,345,000 | 36.233161 | 120 | 0.662399 | false |
yeleman/snisi
|
snisi_reprohealth/migrations/0003_auto_20141022_1400.py
|
1
|
1402
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('snisi_reprohealth', '0002_auto_20141008_1709'),
]
operations = [
migrations.RenameField(
model_name='aggpfactivitiesr',
old_name='emergency_controls_removal_price',
new_name='emergency_controls_price',
),
migrations.RenameField(
model_name='aggpfactivitiesr',
old_name='emergency_controls_removal_qty',
new_name='emergency_controls_qty',
),
migrations.RenameField(
model_name='aggpfactivitiesr',
old_name='emergency_controls_removal_revenue',
new_name='emergency_controls_revenue',
),
migrations.RenameField(
model_name='pfactivitiesr',
old_name='emergency_controls_removal_price',
new_name='emergency_controls_price',
),
migrations.RenameField(
model_name='pfactivitiesr',
old_name='emergency_controls_removal_qty',
new_name='emergency_controls_qty',
),
migrations.RenameField(
model_name='pfactivitiesr',
old_name='emergency_controls_removal_revenue',
new_name='emergency_controls_revenue',
),
]
|
mit
| -234,128,467,318,790,200 | 30.863636 | 58 | 0.587019 | false |
tensorflow/tfx
|
tfx/components/example_gen/utils_test.py
|
1
|
29481
|
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.example_gen.utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from typing import Text
# Standard Imports
import tensorflow as tf
from tfx.components.example_gen import utils
from tfx.dsl.io import fileio
from tfx.orchestration import data_types
from tfx.proto import example_gen_pb2
from tfx.proto import range_config_pb2
from tfx.utils import io_utils
from tfx.utils import json_utils
class UtilsTest(tf.test.TestCase):
def setUp(self):
super(UtilsTest, self).setUp()
# Create input splits.
test_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
self._input_base_path = os.path.join(test_dir, 'input_base')
fileio.makedirs(self._input_base_path)
def testDictToExample(self):
instance_dict = {
'int': 10,
'float': 5.0,
'str': 'abc',
'int_list': [1, 2],
'float_list': [3.0],
'str_list': ['ab', 'cd'],
'none': None,
'empty_list': [],
}
example = utils.dict_to_example(instance_dict)
self.assertProtoEquals(
"""
features {
feature {
key: "empty_list"
value {
}
}
feature {
key: "float"
value {
float_list {
value: 5.0
}
}
}
feature {
key: "float_list"
value {
float_list {
value: 3.0
}
}
}
feature {
key: "int"
value {
int64_list {
value: 10
}
}
}
feature {
key: "int_list"
value {
int64_list {
value: 1
value: 2
}
}
}
feature {
key: "none"
value {
}
}
feature {
key: "str"
value {
bytes_list {
value: "abc"
}
}
}
feature {
key: "str_list"
value {
bytes_list {
value: "ab"
value: "cd"
}
}
}
}
""", example)
def testMakeOutputSplitNames(self):
split_names = utils.generate_output_split_names(
input_config=example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='train', pattern='train/*'),
example_gen_pb2.Input.Split(name='eval', pattern='eval/*')
]),
output_config=example_gen_pb2.Output())
self.assertListEqual(['train', 'eval'], split_names)
split_names = utils.generate_output_split_names(
input_config=example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='single', pattern='single/*')
]),
output_config=example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(name='train', hash_buckets=2),
example_gen_pb2.SplitConfig.Split(name='eval', hash_buckets=1)
])))
self.assertListEqual(['train', 'eval'], split_names)
def testMakeDefaultOutputConfig(self):
output_config = utils.make_default_output_config(
utils.make_default_input_config())
self.assertEqual(2, len(output_config.split_config.splits))
output_config = utils.make_default_output_config(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='train', pattern='train/*'),
example_gen_pb2.Input.Split(name='eval', pattern='eval/*')
]))
self.assertEqual(0, len(output_config.split_config.splits))
def testMakeOutputSplitNamesWithParameter(self):
split_name_param = data_types.RuntimeParameter(
name='split-name', ptype=Text, default=u'train')
split_names = utils.generate_output_split_names(
input_config={
'splits': [{
'name': split_name_param,
'pattern': 'train/*'
}, {
'name': 'eval',
'pattern': 'eval/*'
}]
},
output_config=example_gen_pb2.Output())
# Assert the json serialized version because RuntimeParameters only get
# serialized after that.
self.assertEqual(
json_utils.dumps([split_name_param, 'eval']),
json_utils.dumps(split_names))
split_names = utils.generate_output_split_names(
input_config=example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(name='single', pattern='single/*')
]),
output_config={
'split_config': {
'splits': [{
'name': split_name_param,
'hash_buckets': 2
}, {
'name': 'eval',
'hash_buckets': 1
}]
}
})
# Assert the json serialized version because RuntimeParameters only get
# serialized after that.
self.assertEqual(
json_utils.dumps([split_name_param, 'eval']),
json_utils.dumps(split_names))
def testMakeDefaultOutputConfigWithParameter(self):
split_name_param = data_types.RuntimeParameter(
name='split-name', ptype=Text, default=u'train')
output_config = utils.make_default_output_config({
'splits': [{
'name': split_name_param,
'pattern': 'train/*'
}, {
'name': 'eval',
'pattern': 'eval/*'
}]
})
self.assertEqual(0, len(output_config.split_config.splits))
def testGlobToRegex(self):
glob_pattern = 'a(b)c'
self.assertEqual(1, re.compile(glob_pattern).groups)
regex_pattern = utils._glob_to_regex(glob_pattern) # pylint: disable=protected-access
self.assertEqual(0, re.compile(regex_pattern).groups)
self.assertEqual(glob_pattern,
re.match(regex_pattern, glob_pattern).group())
def testCalculateSplitsFingerprint(self):
split1 = os.path.join(self._input_base_path, 'split1', 'data')
io_utils.write_string_file(split1, 'testing')
os.utime(split1, (0, 1))
split2 = os.path.join(self._input_base_path, 'split2', 'data')
io_utils.write_string_file(split2, 'testing2')
os.utime(split2, (0, 3))
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='split1/*'),
example_gen_pb2.Input.Split(name='s2', pattern='split2/*')
]
fingerprint, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(
fingerprint,
'split:s1,num_files:1,total_bytes:7,xor_checksum:1,sum_checksum:1\n'
'split:s2,num_files:1,total_bytes:8,xor_checksum:3,sum_checksum:3')
self.assertEqual(span, 0)
self.assertIsNone(version)
def testSpanNoMatching(self):
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN}/split1/*'),
example_gen_pb2.Input.Split(name='s2', pattern='span{SPAN}/split2/*')
]
with self.assertRaisesRegexp(ValueError, 'Cannot find matching for split'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testVersionNoMatching(self):
span_dir = os.path.join(self._input_base_path, 'span01', 'wrong', 'data')
io_utils.write_string_file(span_dir, 'testing_version_no_matching')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/version{VERSION}/split1/*')
]
with self.assertRaisesRegexp(ValueError, 'Cannot find matching for split'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testSpanWrongFormat(self):
wrong_span = os.path.join(self._input_base_path, 'spanx', 'split1', 'data')
io_utils.write_string_file(wrong_span, 'testing_wrong_span')
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN}/split1/*')
]
with self.assertRaisesRegexp(ValueError, 'Cannot find span number'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testVersionWrongFormat(self):
wrong_version = os.path.join(self._input_base_path, 'span01', 'versionx',
'split1', 'data')
io_utils.write_string_file(wrong_version, 'testing_wrong_version')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/version{VERSION}/split1/*')
]
with self.assertRaisesRegexp(ValueError, 'Cannot find version number'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testMultipleSpecs(self):
splits1 = [
example_gen_pb2.Input.Split(
name='s1', pattern='span1{SPAN}/span2{SPAN}/split1/*')
]
with self.assertRaisesRegexp(ValueError, 'Only one {SPAN} is allowed'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits1)
splits2 = [
example_gen_pb2.Input.Split(
name='s1',
pattern='span{SPAN}/ver1{VERSION}/ver2{VERSION}/split1/*')
]
with self.assertRaisesRegexp(ValueError, 'Only one {VERSION} is allowed'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits2)
splits3 = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}-{MM}-{DD}-{MM}/split1/*')
]
with self.assertRaisesRegexp(ValueError, 'Exactly one of each date spec'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits3)
def testHaveSpanNoVersion(self):
# Test specific behavior when Span spec is present but Version is not.
split1 = os.path.join(self._input_base_path, 'span1', 'split1', 'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 1)
self.assertIsNone(version)
def testHaveSpanAndVersion(self):
# Test specific behavior when both Span and Version are present.
split1 = os.path.join(self._input_base_path, 'span1', 'version1', 'split1',
'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/version{VERSION}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 1)
self.assertEqual(version, 1)
def testHaveVersionNoSpan(self):
# Test specific behavior when Version spec is present but Span is not.
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='version{VERSION}/split1/*')
]
with self.assertRaisesRegexp(
ValueError,
'Version spec provided, but Span or Date spec is not present'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testNoSpanOrVersion(self):
# Test specific behavior when neither Span nor Version spec is present.
split1 = os.path.join(self._input_base_path, 'split1', 'data')
io_utils.write_string_file(split1, 'testing')
splits = [example_gen_pb2.Input.Split(name='s1', pattern='split1/*')]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 0)
self.assertIsNone(version)
def testNewSpanWithOlderVersionAlign(self):
# Test specific behavior when a newer Span has older Version.
span1_ver2 = os.path.join(self._input_base_path, 'span1', 'ver2', 'split1',
'data')
io_utils.write_string_file(span1_ver2, 'testing')
span2_ver1 = os.path.join(self._input_base_path, 'span2', 'ver1', 'split1',
'data')
io_utils.write_string_file(span2_ver1, 'testing')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/ver{VERSION}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 2)
self.assertEqual(version, 1)
def testDateSpecPartiallyMissing(self):
splits1 = [
example_gen_pb2.Input.Split(name='s1', pattern='{YYYY}-{MM}/split1/*')
]
with self.assertRaisesRegexp(ValueError, 'Exactly one of each date spec'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits1)
def testBothSpanAndDate(self):
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}-{MM}-{DD}/{SPAN}/split1/*')
]
with self.assertRaisesRegexp(
ValueError,
'Either span spec or date specs must be specified exclusively'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testDateBadFormat(self):
# Test improperly formed date.
split1 = os.path.join(self._input_base_path, 'yyyymmdd', 'split1', 'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}{MM}{DD}/split1/*')
]
with self.assertRaisesRegexp(ValueError,
'Cannot find span number using date'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testInvalidDate(self):
split1 = os.path.join(self._input_base_path, '20201301', 'split1', 'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}{MM}{DD}/split1/*')
]
with self.assertRaisesRegexp(ValueError, 'Retrieved date is invalid'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testHaveDateNoVersion(self):
# Test specific behavior when Date spec is present but Version is not.
split1 = os.path.join(self._input_base_path, '19700102', 'split1', 'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}{MM}{DD}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 1)
self.assertIsNone(version)
def testHaveDateAndVersion(self):
# Test specific behavior when both Date and Version are present.
split1 = os.path.join(self._input_base_path, '19700102', 'ver1', 'split1',
'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}{MM}{DD}/ver{VERSION}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 1)
self.assertEqual(version, 1)
def testSpanInvalidWidth(self):
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='{SPAN:x}/split1/*')
]
with self.assertRaisesRegexp(
ValueError, 'Width modifier in span spec is not a positive integer'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testVersionInvalidWidth(self):
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{SPAN}/{VERSION:x}/split1/*')
]
with self.assertRaisesRegexp(
ValueError, 'Width modifier in version spec is not a positive integer'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
def testSpanWidth(self):
split1 = os.path.join(self._input_base_path, 'span1', 'split1', 'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN:2}/split1/*')
]
# TODO(jjma): find a better way of describing this error to user.
with self.assertRaisesRegexp(ValueError,
'Glob pattern does not match regex pattern'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN:1}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 1)
self.assertIsNone(version)
def testVersionWidth(self):
split1 = os.path.join(self._input_base_path, 'span1', 'ver1', 'split1',
'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/ver{VERSION:2}/split1/*')
]
# TODO(jjma): find a better way of describing this error to user.
with self.assertRaisesRegexp(ValueError,
'Glob pattern does not match regex pattern'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/ver{VERSION:1}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 1)
self.assertEqual(version, 1)
def testSpanVersionWidthNoSeperator(self):
split1 = os.path.join(self._input_base_path, '1234', 'split1', 'data')
io_utils.write_string_file(split1, 'testing')
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{SPAN:2}{VERSION:2}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 12)
self.assertEqual(version, 34)
def testCalculateSplitsFingerprintSpanAndVersionWithSpan(self):
# Test align of span and version numbers.
span1_v1_split1 = os.path.join(self._input_base_path, 'span01', 'ver01',
'split1', 'data')
io_utils.write_string_file(span1_v1_split1, 'testing11')
span1_v1_split2 = os.path.join(self._input_base_path, 'span01', 'ver01',
'split2', 'data')
io_utils.write_string_file(span1_v1_split2, 'testing12')
span2_v1_split1 = os.path.join(self._input_base_path, 'span02', 'ver01',
'split1', 'data')
io_utils.write_string_file(span2_v1_split1, 'testing21')
# Test if error raised when span does not align.
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/ver{VERSION}/split1/*'),
example_gen_pb2.Input.Split(
name='s2', pattern='span{SPAN}/ver{VERSION}/split2/*')
]
with self.assertRaisesRegexp(
ValueError, 'Latest span should be the same for each split'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
span2_v1_split2 = os.path.join(self._input_base_path, 'span02', 'ver01',
'split2', 'data')
io_utils.write_string_file(span2_v1_split2, 'testing22')
span2_v2_split1 = os.path.join(self._input_base_path, 'span02', 'ver02',
'split1', 'data')
io_utils.write_string_file(span2_v2_split1, 'testing21')
# Test if error raised when span aligns but version does not.
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/ver{VERSION}/split1/*'),
example_gen_pb2.Input.Split(
name='s2', pattern='span{SPAN}/ver{VERSION}/split2/*')
]
with self.assertRaisesRegexp(
ValueError, 'Latest version should be the same for each split'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
span2_v2_split2 = os.path.join(self._input_base_path, 'span02', 'ver02',
'split2', 'data')
io_utils.write_string_file(span2_v2_split2, 'testing22')
# Test if latest span and version is selected when aligned for each split.
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='span{SPAN}/ver{VERSION}/split1/*'),
example_gen_pb2.Input.Split(
name='s2', pattern='span{SPAN}/ver{VERSION}/split2/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 2)
self.assertEqual(version, 2)
self.assertEqual(splits[0].pattern, 'span02/ver02/split1/*')
self.assertEqual(splits[1].pattern, 'span02/ver02/split2/*')
def testCalculateSplitsFingerprintSpanAndVersionWithDate(self):
# Test align of span and version numbers.
span1_v1_split1 = os.path.join(self._input_base_path, '19700102', 'ver01',
'split1', 'data')
io_utils.write_string_file(span1_v1_split1, 'testing11')
span1_v1_split2 = os.path.join(self._input_base_path, '19700102', 'ver01',
'split2', 'data')
io_utils.write_string_file(span1_v1_split2, 'testing12')
span2_v1_split1 = os.path.join(self._input_base_path, '19700103', 'ver01',
'split1', 'data')
io_utils.write_string_file(span2_v1_split1, 'testing21')
# Test if error raised when date does not align.
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}{MM}{DD}/ver{VERSION}/split1/*'),
example_gen_pb2.Input.Split(
name='s2', pattern='{YYYY}{MM}{DD}/ver{VERSION}/split2/*')
]
with self.assertRaisesRegexp(
ValueError, 'Latest span should be the same for each split'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
span2_v1_split2 = os.path.join(self._input_base_path, '19700103', 'ver01',
'split2', 'data')
io_utils.write_string_file(span2_v1_split2, 'testing22')
span2_v2_split1 = os.path.join(self._input_base_path, '19700103', 'ver02',
'split1', 'data')
io_utils.write_string_file(span2_v2_split1, 'testing21')
# Test if error raised when date aligns but version does not.
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}{MM}{DD}/ver{VERSION}/split1/*'),
example_gen_pb2.Input.Split(
name='s2', pattern='{YYYY}{MM}{DD}/ver{VERSION}/split2/*')
]
with self.assertRaisesRegexp(
ValueError, 'Latest version should be the same for each split'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
span2_v2_split2 = os.path.join(self._input_base_path, '19700103', 'ver02',
'split2', 'data')
io_utils.write_string_file(span2_v2_split2, 'testing22')
# Test if latest span and version is selected when aligned for each split.
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}{MM}{DD}/ver{VERSION}/split1/*'),
example_gen_pb2.Input.Split(
name='s2', pattern='{YYYY}{MM}{DD}/ver{VERSION}/split2/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits)
self.assertEqual(span, 2)
self.assertEqual(version, 2)
self.assertEqual(splits[0].pattern, '19700103/ver02/split1/*')
self.assertEqual(splits[1].pattern, '19700103/ver02/split2/*')
def testRangeConfigWithNonexistentSpan(self):
# Test behavior when specified span in RangeConfig does not exist.
span1_split1 = os.path.join(self._input_base_path, 'span01', 'split1',
'data')
io_utils.write_string_file(span1_split1, 'testing11')
range_config = range_config_pb2.RangeConfig(
static_range=range_config_pb2.StaticRange(
start_span_number=2, end_span_number=2))
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN:2}/split1/*')
]
with self.assertRaisesRegexp(ValueError, 'Cannot find matching for split'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits, range_config=range_config)
def testSpanAlignWithRangeConfig(self):
span1_split1 = os.path.join(self._input_base_path, 'span01', 'split1',
'data')
io_utils.write_string_file(span1_split1, 'testing11')
span2_split1 = os.path.join(self._input_base_path, 'span02', 'split1',
'data')
io_utils.write_string_file(span2_split1, 'testing21')
# Test static range in RangeConfig.
range_config = range_config_pb2.RangeConfig(
static_range=range_config_pb2.StaticRange(
start_span_number=1, end_span_number=1))
splits = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN:2}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits, range_config)
self.assertEqual(span, 1)
self.assertIsNone(version)
self.assertEqual(splits[0].pattern, 'span01/split1/*')
def testRangeConfigSpanWidthPresence(self):
# Test RangeConfig.static_range behavior when span width is not given.
span1_split1 = os.path.join(self._input_base_path, 'span01', 'split1',
'data')
io_utils.write_string_file(span1_split1, 'testing11')
range_config = range_config_pb2.RangeConfig(
static_range=range_config_pb2.StaticRange(
start_span_number=1, end_span_number=1))
splits1 = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN}/split1/*')
]
# RangeConfig cannot find zero padding span without width modifier.
with self.assertRaisesRegexp(ValueError, 'Cannot find matching for split'):
utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits1, range_config=range_config)
splits2 = [
example_gen_pb2.Input.Split(name='s1', pattern='span{SPAN:2}/split1/*')
]
# With width modifier in span spec, RangeConfig.static_range makes
# correct zero-padded substitution.
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits2, range_config=range_config)
self.assertEqual(span, 1)
self.assertIsNone(version)
self.assertEqual(splits2[0].pattern, 'span01/split1/*')
def testRangeConfigWithDateSpec(self):
span1_split1 = os.path.join(self._input_base_path, '19700102', 'split1',
'data')
io_utils.write_string_file(span1_split1, 'testing11')
start_span = utils.date_to_span_number(1970, 1, 2)
end_span = utils.date_to_span_number(1970, 1, 2)
range_config = range_config_pb2.RangeConfig(
static_range=range_config_pb2.StaticRange(
start_span_number=start_span, end_span_number=end_span))
splits = [
example_gen_pb2.Input.Split(
name='s1', pattern='{YYYY}{MM}{DD}/split1/*')
]
_, span, version = utils.calculate_splits_fingerprint_span_and_version(
self._input_base_path, splits, range_config=range_config)
self.assertEqual(span, 1)
self.assertIsNone(version)
self.assertEqual(splits[0].pattern, '19700102/split1/*')
def testGetQueryForSpan(self):
query = 'select * from table'
self.assertEqual(utils.get_query_for_span(query, 1), 'select * from table')
query = 'select * from table where date=@span_yyyymmdd_utc'
self.assertEqual(
utils.get_query_for_span(query, 1),
"select * from table where date='19700102'")
query = ('select * from table where '
'ts>=TIMESTAMP_SECONDS(@span_begin_timestamp) and '
'ts<TIMESTAMP_SECONDS(@span_end_timestamp)')
self.assertEqual(
utils.get_query_for_span(query, 2),
'select * from table where ts>=TIMESTAMP_SECONDS(172800) and ts<TIMESTAMP_SECONDS(259200)'
)
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
| -5,650,820,876,118,375,000 | 36.893316 | 98 | 0.614599 | false |
jburel/openmicroscopy
|
components/tools/OmeroPy/src/omero/plugins/prefs.py
|
1
|
21186
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
prefs plugin
Plugin read by omero.cli.Cli during initialization. The method(s)
defined here will be added to the Cli class for later use.
The pref plugin makes use of prefs.class from the common component.
Copyright 2007-2013 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import sys
import traceback
from path import path
from omero.cli import CLI
from omero.cli import BaseControl
from omero.cli import ExistingFile
from omero.cli import NonZeroReturnCode
from omero.config import ConfigXml
from omero.util import edit_path, get_omero_userdir
from omero.util.decorators import wraps
from omero.util.upgrade_check import UpgradeCheck
from omero_ext import portalocker
from omero_ext.argparse import SUPPRESS
import omero.java
HELP = """Commands for server configuration
A config.xml file will be modified under your etc/grid directory. If you do
not have one, "upgrade" will create a new 4.2 configuration file.
The configuration values are used by bin/omero admin {start,deploy} to set
properties on launch. See etc/grid/(win)default.xml. The "Profile" block
contains a reference to "__ACTIVE__" which is the current value in config.xml
By default, OMERO.grid will use the file in etc/grid/config.xml. If you would
like to configure your system to use $HOME/omero/config.xml, you will need to
modify the application descriptor.
Environment variables:
OMERO_CONFIG - Changes the active profile
"""
def getprefs(args, dir):
"""
Kept around temporarily for upgrading users from pre-4.2 configurations.
"""
if not isinstance(args, list):
raise Exception("Not a list")
cmd = ["prefs"] + list(args)
return omero.java.run(cmd, chdir=dir)
def _make_open_and_close_config(func, allow_readonly):
def open_and_close_config(*args, **kwargs):
args = list(args)
self = args[0]
argp = args[1]
config = None
if len(args) == 2:
config = self.open_config(argp)
if not allow_readonly:
self.die_on_ro(config)
args.append(config)
try:
return func(*args, **kwargs)
finally:
if config:
config.close()
return open_and_close_config
def with_config(func):
"""
opens a config and passes it as the second argument.
"""
return wraps(func)(_make_open_and_close_config(func, True))
def with_rw_config(func):
"""
opens a config and passes it as the second argument.
Requires that the returned config be writeable
"""
return wraps(func)(_make_open_and_close_config(func, False))
class WriteableConfigControl(BaseControl):
"""
Base class for controls which need write access to the OMERO configuration
using the @with_rw_config decorator
Note BaseControl should be used for read-only access using @with_config
"""
def die_on_ro(self, config):
if not config.save_on_close:
self.ctx.die(333, "Cannot modify %s" % config.filename)
def run_upgrade_check(self, config, agent):
properties = config.as_map()
upgrade_url = properties.get("omero.upgrades.url", None)
if upgrade_url:
uc = UpgradeCheck(agent, url=upgrade_url)
else:
uc = UpgradeCheck(agent)
uc.run()
return uc
class PrefsControl(WriteableConfigControl):
def _configure(self, parser):
parser.add_argument(
"--source", help="Configuration file to be used. Default:"
" etc/grid/config.xml")
sub = parser.sub()
parser.add(
sub, self.all,
"List all profiles in the current config.xml file.")
default = sub.add_parser(
"def", help="List (or set) the current active profile.",
description="List (or set) the current active profile.")
default.set_defaults(func=self.default)
default.add_argument(
"NAME", nargs="?",
help="Name of the profile which should be made the new active"
" profile.")
list = parser.add(
sub, self.list,
"List all key-value pairs from the current profile (deprecated)")
list.set_defaults(func=self.list)
get = parser.add(
sub, self.get,
"Get key-value pairs from the current profile. All by default")
get.set_defaults(func=self.get)
get.add_argument(
"KEY", nargs="*", help="Names of keys in the current profile")
for x in [get, list]:
secrets = x.add_mutually_exclusive_group()
secrets.add_argument(
"--show-password", action="store_true",
help="Show values of sensitive keys (passwords, "
"tokens, etc.) in the current profile")
secrets.add_argument(
"--hide-password", action="store_false",
dest="show_password", help=SUPPRESS)
set = parser.add(
sub, self.set,
"Set key-value pair in the current profile. Omit the"
" value to remove the key.")
append = parser.add(
sub, self.append, "Append value to a key in the current profile.")
remove = parser.add(
sub, self.remove,
"Remove value from a key in the current profile.")
for x in [set, append, remove]:
x.add_argument(
"KEY", help="Name of the key in the current profile")
# report is intended for use with cfg mgmt tools
x.add_argument("--report", action="store_true",
help="Report if changes are made")
set.add_argument(
"-f", "--file", type=ExistingFile('r'),
help="Load value from file")
set.add_argument(
"VALUE", nargs="?",
help="Value to be set. If it is missing, the key will be removed")
append.add_argument(
"--set", action="store_true",
help="Append only if not already in the list")
append.add_argument("VALUE", help="Value to be appended")
remove.add_argument("VALUE", help="Value to be removed")
drop = parser.add(
sub, self.drop, "Remove the profile from the configuration file")
drop.add_argument("NAME", help="Name of the profile to remove")
parser.add(sub, self.keys, "List all keys for the current profile")
load = parser.add(
sub, self.load,
"Read into current profile from a file or standard input")
load.add_argument(
"-q", action="store_true", help="No error on conflict")
load.add_argument(
"file", nargs="*", type=ExistingFile('r'), default="-",
help="Files to read from. Default to standard input if not"
" specified")
parse = parser.add(
sub, self.parse,
"Parse the configuration properties from the etc/omero.properties"
" file and Web properties for readability.")
parse.add_argument(
"-f", "--file", type=ExistingFile('r'),
help="Alternative location for a Java properties file")
parse_group = parse.add_mutually_exclusive_group()
parse_group.add_argument(
"--defaults", action="store_true",
help="Show key/value configuration defaults")
parse_group.add_argument(
"--rst", action="store_true",
help="Generate reStructuredText from omero.properties")
parse_group.add_argument(
"--keys", action="store_true",
help="Print just the keys from omero.properties")
parse_group.add_argument(
"--headers", action="store_true",
help="Print all headers from omero.properties")
parse.add_argument(
"--no-web", action="store_true",
help="Do not parse Web properties")
parser.add(sub, self.edit, "Present the properties for the current"
" profile in your editor. Saving them will update your"
" profile.")
parser.add(sub, self.version, "Print the configuration version for"
" the current profile.")
parser.add(sub, self.path, "Print the file that is used for "
" configuration")
parser.add(sub, self.lock, "Acquire the config file lock and hold"
" it")
parser.add(sub, self.upgrade, "Create a 4.2 config.xml file based on"
" your current Java Preferences")
def open_config(self, args):
if args.source:
cfg_xml = path(args.source)
if not cfg_xml.exists():
self.ctx.die(124, "File not found: %s" % args.source)
else:
grid_dir = self.ctx.dir / "etc" / "grid"
if grid_dir.exists():
cfg_xml = grid_dir / "config.xml"
else:
usr_xml = get_omero_userdir() / "config.xml"
self.ctx.err("%s not found; using %s" % (grid_dir, usr_xml))
cfg_xml = usr_xml
try:
return ConfigXml(str(cfg_xml))
except portalocker.LockException:
self.ctx.die(112, "Could not acquire lock on %s" % cfg_xml)
except Exception, e:
self.ctx.die(113, str(e))
def assert_valid_property_name(self, key):
from re import search
if search(r'[^A-Za-z0-9._-]', key):
self.ctx.die(506, 'Illegal property name: {0}'.format(key))
@with_config
def all(self, args, config):
for k, v in config.properties(None, True):
self.ctx.out(k)
@with_config
def default(self, args, config):
if args.NAME is not None:
self.die_on_ro(config)
self.ctx.out(config.default(args.NAME))
@with_config
def drop(self, args, config):
try:
config.remove(args.NAME)
except KeyError:
self.ctx.err("Unknown configuration: %s" % args.NAME)
@with_config
def list(self, args, config):
self.ctx.err('WARNING: "config list" is deprecated, '
'use "config get" instead')
args.KEY = []
self.get(args, config)
@with_config
def get(self, args, config):
orig = sorted(list(config.keys()))
keys = sorted(list(args.KEY))
if not keys:
keys = orig
for k in config.IGNORE:
k in keys and keys.remove(k)
is_password = (lambda x: x.lower().endswith('pass') or
x.lower().endswith('password'))
for k in keys:
if k not in orig:
continue
if args.KEY and len(args.KEY) == 1:
self.ctx.out(config[k])
else:
if is_password(k) and not getattr(
args, "show_password", False):
self.ctx.out("%s=%s" % (k, '*' * 8 if config[k] else ''))
else:
self.ctx.out("%s=%s" % (k, config[k]))
@with_rw_config
def set(self, args, config):
if "=" in args.KEY:
k, v = args.KEY.split("=", 1)
msg = """ "=" in key name. Did you mean "...set %s %s"?"""
if args.VALUE is None:
k, v = args.KEY.split("=", 1)
self.ctx.err(msg % (k, v))
elif args.KEY.endswith("="):
self.ctx.err(msg % (k, args.VALUE))
elif args.file:
if args.file == "-":
# Read from standard input
import fileinput
f = fileinput.input(args.file)
else:
f = args.file
try:
self.assert_valid_property_name(args.KEY)
config[args.KEY] = (''.join(f)).rstrip()
finally:
f.close()
elif args.VALUE is None:
del config[args.KEY]
if args.report:
self.ctx.out('Changed: Removed %s' % args.KEY)
else:
self.assert_valid_property_name(args.KEY)
config[args.KEY] = args.VALUE
if args.report:
self.ctx.out('Changed: Set %s:%s' % (args.KEY, args.VALUE))
def get_list_value(self, args, config):
import json
try:
list_value = json.loads(config[args.KEY])
except ValueError:
self.ctx.die(510, "No JSON object could be decoded")
if not isinstance(list_value, list):
self.ctx.die(511, "Property %s is not a list" % args.KEY)
return list_value
def get_omeroweb_default(self, key):
try:
from omeroweb import settings
setting = settings.CUSTOM_SETTINGS_MAPPINGS.get(key)
default = setting[2](setting[1]) if setting else []
except Exception, e:
self.ctx.dbg(traceback.format_exc())
self.ctx.die(514,
"Cannot retrieve default value for property %s: %s" %
(key, e))
if not isinstance(default, list):
self.ctx.die(515, "Property %s is not a list" % key)
return default
@with_rw_config
def append(self, args, config):
import json
if args.KEY in config.keys():
list_value = self.get_list_value(args, config)
elif args.KEY.startswith('omero.web.'):
list_value = self.get_omeroweb_default(args.KEY)
else:
list_value = []
jv = json.loads(args.VALUE)
if not args.set or jv not in list_value:
list_value.append(json.loads(args.VALUE))
config[args.KEY] = json.dumps(list_value)
if args.report:
self.ctx.out(
'Changed: Appended %s:%s' % (args.KEY, args.VALUE))
@with_rw_config
def remove(self, args, config):
if args.KEY not in config.keys():
if args.KEY.startswith('omero.web.'):
list_value = self.get_omeroweb_default(args.KEY)
else:
self.ctx.die(512, "Property %s is not defined" % (args.KEY))
else:
list_value = self.get_list_value(args, config)
import json
if json.loads(args.VALUE) not in list_value:
self.ctx.die(513, "%s is not defined in %s"
% (args.VALUE, args.KEY))
list_value.remove(json.loads(args.VALUE))
config[args.KEY] = json.dumps(list_value)
if args.report:
self.ctx.out('Changed: Removed %s:%s' % (args.KEY, args.VALUE))
@with_config
def keys(self, args, config):
for k in config.keys():
if k not in config.IGNORE:
self.ctx.out(k)
def parse(self, args):
if args.file:
args.file.close()
cfg = path(args.file.name)
else:
cfg = self.dir / "etc" / "omero.properties"
from omero.install.config_parser import PropertyParser
pp = PropertyParser()
pp.parse_file(str(cfg.abspath()))
# Parse PSQL profile file
for p in pp:
if p.key == "omero.db.profile":
psql_file = self.dir / "etc" / "profiles" / p.val
pp.parse_file(str(psql_file.abspath()))
break
# Parse OMERO.web configuration properties
if not args.no_web:
pp.parse_module('omeroweb.settings')
# Display options
if args.headers:
pp.print_headers()
elif args.keys:
pp.print_keys()
elif args.rst:
pp.print_rst()
else:
pp.print_defaults()
@with_rw_config
def load(self, args, config):
keys = None
if not args.q:
keys = config.keys()
# Handle all lines before updating config in case of error.
new_config = dict(config)
try:
for f in args.file:
if f == "-":
# Read from standard input
import fileinput
f = fileinput.input(f)
try:
previous = None
for line in f:
if previous:
line = previous + line
previous = self.handle_line(line, new_config, keys)
finally:
if f != "-":
f.close()
except NonZeroReturnCode:
raise
except Exception, e:
self.ctx.die(968, "Cannot read %s: %s" % (args.file, e))
for key, value in new_config.items():
config[key] = value
@with_rw_config
def edit(self, args, config, edit_path=edit_path):
from omero.util.temp_files import create_path, remove_path
start_text = "# Edit your preferences below. Comments are ignored\n"
for k in sorted(config.keys()):
start_text += ("%s=%s\n" % (k, config[k]))
temp_file = create_path()
try:
edit_path(temp_file, start_text)
except RuntimeError, re:
self.ctx.dbg(traceback.format_exc())
self.ctx.die(954, "%s: Failed to edit %s"
% (getattr(re, "pid", "Unknown"), temp_file))
args.NAME = config.default()
old_config = dict(config)
self.drop(args, config)
args.file = [open(str(temp_file), "r")]
args.q = True
try:
self.load(args, config)
except Exception as e:
for key, value in old_config.items():
config[key] = value
raise e
finally:
remove_path(temp_file)
@with_config
def version(self, args, config):
self.ctx.out(config.version(config.default()))
@with_config
def path(self, args, config):
self.ctx.out(config.filename)
@with_rw_config
def lock(self, args, config):
self.ctx.input("Press enter to unlock")
@with_rw_config
def upgrade(self, args, config):
self.ctx.out("Importing pre-4.2 preferences")
txt = getprefs(["get"], str(self.ctx.dir / "lib"))
# Handle all lines before updating config in case of error.
new_config = dict(config)
for line in txt.split("\n"):
self.handle_line(line, new_config, None)
for key, value in new_config.items():
config[key] = value
# Upgrade procedure for 4.2
MSG = """Manually modify them via "omero config old set ..." and \
re-run"""
m = config.as_map()
for x in ("keyStore", "keyStorePassword", "trustStore",
"trustStorePassword"):
old = "omero.ldap." + x
new = "omero.security." + x
if old in m:
config[new] = config[old]
attributes, values = [], []
if "omero.ldap.attributes" in m:
attributes = config["omero.ldap.attributes"]
attributes = attributes.split(",")
if "omero.ldap.values" in m:
values = config["omero.ldap.values"]
values = values.split(",")
if len(attributes) != len(values):
raise ValueError("%s != %s\nLDAP properties in pre-4.2"
" configuration are invalid.\n%s"
% (attributes, values, MSG))
pairs = zip(attributes, values)
if pairs:
if len(pairs) == 1:
user_filter = "(%s=%s)" % (tuple(pairs[0]))
else:
user_filter = "(&%s)" % ["(%s=%s)" % tuple(pair)
for pair in pairs]
config["omero.ldap.user_filter"] = user_filter
if "omero.ldap.groups" in m:
raise ValueError("Not currently handling omero.ldap.groups\n%s"
% MSG)
config["omero.config.upgraded"] = "4.2.0"
def handle_line(self, line, config, keys):
line = line.strip()
if not line or line.startswith("#"):
return None
if line.endswith("\\"):
return line[:-1]
parts = line.split("=", 1)
if len(parts[0]) == 0:
return
if len(parts) == 1:
parts.append("")
_key = parts[0]
_new = parts[1]
if _key in config.keys():
_old = config[_key]
else:
self.assert_valid_property_name(_key)
_old = None
if keys and _key in keys and _new != _old:
self.ctx.die(502, "Duplicate property: %s ('%s' => '%s')"
% (_key, _old, _new))
keys.append(_key)
config[_key] = _new
def old(self, args):
self.ctx.out(getprefs(args.target, str(self.ctx.dir / "lib")))
try:
register("config", PrefsControl, HELP)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("config", PrefsControl, HELP)
cli.invoke(sys.argv[1:])
|
gpl-2.0
| -7,798,762,105,862,879,000 | 33.960396 | 78 | 0.544841 | false |
jardiacaj/finem_imperii
|
turn/test/test_turn.py
|
1
|
7924
|
from django.test import TestCase
from django.urls.base import reverse
from battle.models import Battle
from organization.models.organization import Organization
from turn.barbarians import do_settlement_barbarian_generation
from turn.battle import organizations_with_battle_ready_units, \
battle_ready_units_in_tile, opponents_in_organization_list, \
get_largest_conflict_in_list, create_battle_from_conflict
from turn.conquest import worldwide_conquests
from turn.demography import do_settlement_population_changes
from turn.unit import do_unit_debt_increase
from unit.models import WorldUnit
from world.admin import pass_turn
from world.initialization import initialize_unit, initialize_settlement
from world.models.events import TileEvent
from world.models.geography import Tile, World, Settlement
class TestEmortuusTurn(TestCase):
fixtures = ['world1']
def test_pass_one_year_in_emortuus(self):
for i in range(12):
pass_turn(None, None, World.objects.all())
class TestTurn(TestCase):
fixtures = ['simple_world']
def test_pass_turn_admin_action(self):
pass_turn(None, None, World.objects.all())
def test_population_generation_in_empty_settlement(self):
settlement = Settlement.objects.get(name="Small Fynkah")
settlement.npc_set.all().delete()
settlement.update_population()
self.assertEqual(settlement.population, 0)
do_settlement_population_changes(settlement)
self.assertEqual(settlement.population, 5)
def test_organizations_with_battle_ready_units(self):
tile = Tile.objects.get(id=108)
result = organizations_with_battle_ready_units(tile)
self.assertIn(Organization.objects.get(id=105), result)
self.assertIn(Organization.objects.get(id=112), result)
self.assertEqual(len(result), 2)
def test_battle_ready_units_in_tile(self):
tile = Tile.objects.get(id=108)
result = battle_ready_units_in_tile(tile)
self.assertIn(WorldUnit.objects.get(id=1), result)
self.assertIn(WorldUnit.objects.get(id=2), result)
self.assertEqual(len(result), 3)
def test_opponents_in_organization_list(self):
tile = Tile.objects.get(id=108)
result = opponents_in_organization_list(organizations_with_battle_ready_units(tile), tile)
self.assertEqual(len(result), 1)
opponents = result[0]
self.assertIn(Organization.objects.get(id=105), opponents[0])
self.assertIn(Organization.objects.get(id=112), opponents[1])
self.assertEqual(len(opponents), 2)
def test_get_largest_conflict_in_list(self):
initialize_unit(WorldUnit.objects.get(id=1))
initialize_unit(WorldUnit.objects.get(id=2))
tile = Tile.objects.get(id=108)
conflicts = opponents_in_organization_list(organizations_with_battle_ready_units(tile), tile)
result = get_largest_conflict_in_list(conflicts, tile)
self.assertEqual(len(result), 2)
self.assertIn(Organization.objects.get(id=105), result[0])
self.assertIn(Organization.objects.get(id=112), result[1])
self.assertEqual(len(result), 2)
def test_create_battle_from_conflict(self):
initialize_unit(WorldUnit.objects.get(id=1))
initialize_unit(WorldUnit.objects.get(id=2))
tile = Tile.objects.get(id=108)
organization1 = Organization.objects.get(id=105)
organization2 = Organization.objects.get(id=112)
battle = create_battle_from_conflict(
[
[organization1],
[organization2]
],
tile
)
self.assertEqual(battle.tile, tile)
def test_create_only_one_conflict(self):
world = World.objects.get(id=2)
for unit in WorldUnit.objects.filter(world=world):
initialize_unit(unit)
pass_turn(None, None, World.objects.filter(id=2))
pass_turn(None, None, World.objects.filter(id=2))
self.assertEqual(Battle.objects.count(), 1)
Battle.objects.filter(tile__world=world).update(current=False)
pass_turn(None, None, World.objects.filter(id=2))
self.assertEqual(Battle.objects.count(), 2)
def test_world_blocking(self):
world = World.objects.get(id=2)
world.blocked_for_turn = True
world.save()
self.client.post(
reverse('account:login'),
{'username': 'alice', 'password': 'test'},
)
response = self.client.get(
reverse('character:activate', kwargs={'char_id': 3}),
follow=True
)
self.assertRedirects(response, reverse('account:home'))
def test_conquest(self):
tile = Tile.objects.get(name="More mountains")
conqueror = Organization.objects.get(id=105)
tile_event = TileEvent.objects.create(
tile=tile,
type=TileEvent.CONQUEST,
organization=conqueror,
counter=0,
start_turn=0,
active=True
)
worldwide_conquests(tile.world)
tile.refresh_from_db()
self.assertNotEqual(tile.controlled_by, conqueror)
tile_event.refresh_from_db()
self.assertEqual(tile_event.end_turn, None)
self.assertEqual(tile_event.counter, 0)
def test_conquest_end(self):
tile = Tile.objects.get(id=107)
conqueror = Organization.objects.get(id=105)
tile_event = TileEvent.objects.create(
tile=tile,
type=TileEvent.CONQUEST,
organization=conqueror,
counter=0,
start_turn=0,
active=True
)
worldwide_conquests(tile.world)
tile.refresh_from_db()
self.assertNotEqual(tile.controlled_by, conqueror)
tile_event.refresh_from_db()
self.assertEqual(tile_event.end_turn, 0)
self.assertNotEqual(tile_event.end_turn, None)
self.assertFalse(tile_event.active)
def test_conquest_success(self):
tile = Tile.objects.get(name="More mountains")
conqueror = Organization.objects.get(id=105)
tile_event = TileEvent.objects.create(
tile=tile,
type=TileEvent.CONQUEST,
organization=conqueror,
counter=100000,
start_turn=0,
active=True
)
worldwide_conquests(tile.world)
tile.refresh_from_db()
self.assertEqual(tile.controlled_by, conqueror)
tile_event.refresh_from_db()
self.assertEqual(tile_event.end_turn, 0)
self.assertNotEqual(tile_event.end_turn, None)
self.assertFalse(tile_event.active)
def test_barbarian_non_creation_in_occupied_settlement(self):
settlement = Settlement.objects.get(name="Small Fynkah")
initialize_settlement(settlement)
for unit in settlement.worldunit_set.all():
initialize_unit(unit)
do_settlement_barbarian_generation(settlement)
self.assertFalse(
settlement.worldunit_set.filter(owner_character__isnull=True).exists()
)
def test_barbarian_creation_in_barbaric_settlement(self):
settlement = Settlement.objects.get(name="Small Shaax")
initialize_settlement(settlement)
do_settlement_barbarian_generation(settlement)
self.assertTrue(
settlement.worldunit_set.filter(owner_character__isnull=True).exists()
)
def test_unit_debt_increase(self):
unit = WorldUnit.objects.get(id=4)
initialize_unit(unit)
do_unit_debt_increase(unit)
self.assertEqual(unit.get_owners_debt(), 100)
def test_unit_debt_for_barbarian_unit(self):
unit = WorldUnit.objects.get(id=4)
unit.owner_character = None
unit.save()
initialize_unit(unit)
do_unit_debt_increase(unit)
self.assertEqual(unit.get_owners_debt(), 0)
|
agpl-3.0
| 5,303,196,034,817,540,000 | 36.733333 | 101 | 0.652196 | false |
aakashrana1995/svnit-tnp
|
tnp/consent/migrations/0003_consentdeadline_userconsent_userdatafields.py
|
1
|
1971
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-07 08:16
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('company', '0004_auto_20170204_1238'),
('consent', '0002_educationdetail_hsc_passing_year'),
]
operations = [
migrations.CreateModel(
name='ConsentDeadline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deadline', models.DateTimeField()),
('strict', models.BooleanField(default=True)),
('slack_time', models.IntegerField(default=0)),
('job', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='consent_deadline', to='company.Job')),
],
),
migrations.CreateModel(
name='UserConsent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_valid', models.BooleanField(default=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_consent', to='company.Job')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_consent', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UserDataFields',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('slug', models.CharField(max_length=255)),
],
),
]
|
mit
| 4,100,357,406,943,850,000 | 41.847826 | 147 | 0.595637 | false |
OmeGak/indico-plugins
|
vc_vidyo/indico_vc_vidyo/zodbimport.py
|
1
|
8102
|
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
from indico.core.config import Config
from indico.core.db import db
from indico.util.console import cformat
from indico.util.struct.iterables import committing_iterator
from indico.modules.vc.models.vc_rooms import VCRoom, VCRoomEventAssociation, VCRoomStatus, VCRoomLinkType
from indico_zodbimport import Importer, option_value, convert_principal_list, convert_to_unicode
from indico_vc_vidyo.models.vidyo_extensions import VidyoExtension
from indico_vc_vidyo.plugin import VidyoPlugin
LINKED_ID_RE = re.compile(r'.*[st](\w+)$')
MAP_LINK_TYPES = {
'event': 'event',
'contribution': 'contribution',
'session': 'block'
}
def extract_id(full_id):
return LINKED_ID_RE.match(full_id).group(1).replace('l', ':')
def is_valid_link(event, link_type, link_id):
if link_type == VCRoomLinkType.block:
session_id, slot_id = link_id.split(':')
session = event.sessions.get(session_id)
if session is None:
return False
return slot_id in session.slots
elif link_type == VCRoomLinkType.contribution:
return str(link_id) in event.contributions
class VidyoImporter(Importer):
plugins = {'vc_vidyo'}
def pre_check(self):
return self.check_plugin_schema('vc_vidyo')
def has_data(self):
return VCRoom.find(type='vidyo').count() or VidyoExtension.find().count()
def migrate(self):
self.booking_root = self.zodb_root['catalog']['cs_bookingmanager_conference']._tree
self.migrate_settings()
self.migrate_event_bookings()
def migrate_event_bookings(self):
self.vc_rooms_by_extension = {}
with VidyoPlugin.instance.plugin_context():
for event_id, csbm in committing_iterator(self.booking_root.iteritems(), n=1000):
for bid, booking in csbm._bookings.iteritems():
if booking._type == 'Vidyo':
vc_room = self.vc_rooms_by_extension.get(int(booking._extension))
if not vc_room:
vc_room = self.migrate_vidyo_room(booking)
self.migrate_event_booking(vc_room, booking)
def migrate_settings(self):
print cformat('%{white!}migrating settings')
VidyoPlugin.settings.delete_all()
opts = self.zodb_root['plugins']['Collaboration']._PluginType__plugins['Vidyo']._PluginBase__options
VidyoPlugin.settings.set('managers', convert_principal_list(opts['admins']))
VidyoPlugin.settings.set('acl', convert_principal_list(opts['AuthorisedUsersGroups']))
settings_map = {
'adminAPIURL': 'admin_api_wsdl',
'userAPIURL': 'user_api_wsdl',
'prefix': 'indico_room_prefix',
'indicoGroup': 'room_group_name',
'phoneNumbers': 'vidyo_phone_link',
'maxDaysBeforeClean': 'num_days_old',
'indicoUsername': 'username',
'indicoPassword': 'password',
'contactSupport': 'support_email',
'cleanWarningAmount': 'max_rooms_warning',
'additionalEmails': 'notification_emails'
}
for old, new in settings_map.iteritems():
value = option_value(opts[old])
if old == 'prefix':
value = int(value)
elif old == 'phoneNumbers':
match = next((re.search(r'https?://[^"]+', convert_to_unicode(v)) for v in value), None)
if match is None:
continue
value = match.group(0)
elif old == 'additionalEmails':
value = list(set(value) | {x.email for x in option_value(opts['admins'])})
VidyoPlugin.settings.set(new, value)
db.session.commit()
def migrate_vidyo_room(self, booking):
booking_params = booking._bookingParams
vc_room = VCRoom(created_by_id=Config.getInstance().getJanitorUserId())
vc_room.type = 'vidyo'
vc_room.status = VCRoomStatus.created if booking._created else VCRoomStatus.deleted
vc_room.name = booking_params['roomName']
vc_room.data = {
'description': booking_params['roomDescription'],
'room_pin': booking._pin,
'moderation_pin': getattr(booking, '_moderatorPin', ''),
'vidyo_id': booking._roomId,
'url': booking._url,
'owner': ('User', int(booking._owner.id)),
'owner_identity': booking._ownerVidyoAccount,
'auto_mute': booking_params.get('autoMute', True)
}
vc_room.modified_dt = booking._modificationDate
vc_room.created_dt = booking._creationDate
db.session.add(vc_room)
vidyo_ext = VidyoExtension(vc_room=vc_room, extension=int(booking._extension),
owned_by_id=int(booking._owner.id))
db.session.add(vidyo_ext)
db.session.flush()
self.vc_rooms_by_extension[vidyo_ext.extension] = vc_room
print cformat('%{green}+++%{reset} %{cyan}{}%{reset} [%{yellow!}{}%{reset}]').format(
vc_room.name, booking._roomId)
return vc_room
def migrate_event_booking(self, vc_room, booking):
ch_idx = self.zodb_root['conferences']
booking_params = booking._bookingParams
old_link_type = getattr(booking, '_linkVideoType', None)
link_type = (VCRoomLinkType.get(MAP_LINK_TYPES[booking._linkVideoType]) if old_link_type
else VCRoomLinkType.event)
if booking._conf.id not in ch_idx:
print cformat(
"[%{red!}WARNING%{reset}] %{yellow!}{} is linked to event '{}' but the latter seems to have been"
" deleted. Removing link."
).format(vc_room, booking._conf.id)
return
if link_type == VCRoomLinkType.event:
extracted_id = None
elif not booking._linkVideoId:
print cformat(
"[%{red!}WARNING%{reset}] %{yellow!}{} is linked to a {} but no id given%{reset}. Linking to event."
).format(vc_room, link_type.name)
extracted_id = None
link_type = VCRoomLinkType.event
else:
extracted_id = extract_id(booking._linkVideoId)
if link_type != VCRoomLinkType.event and not is_valid_link(booking._conf, link_type, extracted_id):
print cformat(
"[%{red!}WARNING%{reset}] %{yellow!}{} is linked to a {} but it does not exist%{reset}. "
"Linking to event."
).format(vc_room, link_type.name)
link_type = VCRoomLinkType.event
extracted_id = None
event_vc_room = VCRoomEventAssociation(
event_id=booking._conf.id,
vc_room=vc_room,
link_type=link_type,
link_id=extracted_id,
show=not booking._hidden
)
event_vc_room.data = {
'show_pin': booking_params['displayPin'],
'show_phone_numbers': booking_params.get('displayPhoneNumbers', True),
'show_autojoin': booking_params['displayURL'],
}
db.session.add(event_vc_room)
print cformat('%{green}<->%{reset} %{cyan!}{}%{reset} %{red!}{}%{reset} [%{yellow}{}%{reset}]').format(
booking._conf.id, booking._roomId, old_link_type)
|
gpl-3.0
| 6,832,290,952,876,015,000 | 40.762887 | 116 | 0.608615 | false |
phraust1612/cifar100challenge
|
cnn_renet/init_param.py
|
1
|
1166
|
import numpy as np
import caffe
import os
def extract_caffe_model(model, weights, output_path):
"""extract caffe model's parameters to numpy array, and write them to files
Args:
model: path of '.prototxt'
weights: path of '.caffemodel'
output_path: output path of numpy params
Returns:
None
"""
net = caffe.Net(model, caffe.TEST)
net.copy_from(weights)
if not os.path.exists(output_path):
os.makedirs(output_path)
for item in net.params.items():
name, layer = item
print('convert layer: ' + name)
num = 0
for p in net.params[name]:
np.save(output_path + '/' + str(name) + '_' + str(num), p.data)
num += 1
def init_resnet (output):
d = 'cnn_resnet/param_resnet/'
l = os.listdir(d)
extract_caffe_model ("cnn_resnet/ResNet-152-deploy.prototxt",
"cnn_resnet/ResNet-152-model.caffemodel",
d)
for i in l:
x = np.load (d+i)
if x.ndim == 4:
x = x.transpose ([2,3,1,0])
np.save (d+i, x)
x = np.random.randn(2048,output)
x = x.astype ('float32')
np.save (d+"fc_0.npy", x)
x = np.random.randn(output)
x = x.astype ('float32')
np.save (d+"fc_1.npy", x)
|
mit
| 4,951,470,029,215,791,000 | 23.291667 | 77 | 0.614923 | false |
pignacio/python-nvd3
|
examples/MultiBarChart.py
|
1
|
1100
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Examples for Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
from nvd3.multiBarChart import MultiBarChart
import random
#Open File for test
output_file = open('test_multiBarChart.html', 'w')
type = "multiBarChart"
chart = MultiBarChart(name=type, height=350)
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
nb_element = 10
xdata = list(range(nb_element))
ydata = [random.randint(1, 10) for i in range(nb_element)]
ydata2 = [x * 2 for x in ydata]
extra_serie = {"tooltip": {"y_start": "", "y_end": " call"}}
chart.add_serie(name="Count", y=ydata, x=xdata, extra=extra_serie)
extra_serie = {"tooltip": {"y_start": "", "y_end": " min"}}
chart.add_serie(name="Duration", y=ydata2, x=xdata, extra=extra_serie)
chart.buildhtml()
output_file.write(chart.htmlcontent)
#---------------------------------------
#close Html file
output_file.close()
|
mit
| 5,690,117,570,469,445,000 | 29.555556 | 70 | 0.677273 | false |
bigswitch/nova
|
nova/tests/unit/objects/test_instance_pci_requests.py
|
1
|
7650
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
from nova import objects
from nova.tests.unit.objects import test_objects
from nova.tests import uuidsentinel as uuids
FAKE_UUID = '79a53d6b-0893-4838-a971-15f4f382e7c2'
FAKE_REQUEST_UUID = '69b53d6b-0793-4839-c981-f5c4f382e7d2'
# NOTE(danms): Yes, these are the same right now, but going forward,
# we have changes to make which will be reflected in the format
# in instance_extra, but not in system_metadata.
fake_pci_requests = [
{'count': 2,
'spec': [{'vendor_id': '8086',
'device_id': '1502'}],
'alias_name': 'alias_1',
'is_new': False,
'request_id': FAKE_REQUEST_UUID},
{'count': 2,
'spec': [{'vendor_id': '6502',
'device_id': '07B5'}],
'alias_name': 'alias_2',
'is_new': True,
'request_id': FAKE_REQUEST_UUID},
]
fake_legacy_pci_requests = [
{'count': 2,
'spec': [{'vendor_id': '8086',
'device_id': '1502'}],
'alias_name': 'alias_1'},
{'count': 1,
'spec': [{'vendor_id': '6502',
'device_id': '07B5'}],
'alias_name': 'alias_2'},
]
class _TestInstancePCIRequests(object):
@mock.patch('nova.db.instance_extra_get_by_instance_uuid')
def test_get_by_instance_uuid(self, mock_get):
mock_get.return_value = {
'instance_uuid': FAKE_UUID,
'pci_requests': jsonutils.dumps(fake_pci_requests),
}
requests = objects.InstancePCIRequests.get_by_instance_uuid(
self.context, FAKE_UUID)
self.assertEqual(2, len(requests.requests))
for index, request in enumerate(requests.requests):
self.assertEqual(fake_pci_requests[index]['alias_name'],
request.alias_name)
self.assertEqual(fake_pci_requests[index]['count'],
request.count)
self.assertEqual(fake_pci_requests[index]['spec'],
[dict(x.items()) for x in request.spec])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
def test_get_by_instance_uuid_and_newness(self, mock_get):
pcir = objects.InstancePCIRequests
mock_get.return_value = objects.InstancePCIRequests(
instance_uuid=uuids.instance,
requests=[objects.InstancePCIRequest(count=1, is_new=False),
objects.InstancePCIRequest(count=2, is_new=True)])
old_req = pcir.get_by_instance_uuid_and_newness(self.context,
uuids.instance,
False)
mock_get.return_value = objects.InstancePCIRequests(
instance_uuid=uuids.instance,
requests=[objects.InstancePCIRequest(count=1, is_new=False),
objects.InstancePCIRequest(count=2, is_new=True)])
new_req = pcir.get_by_instance_uuid_and_newness(self.context,
uuids.instance,
True)
self.assertEqual(1, old_req.requests[0].count)
self.assertEqual(2, new_req.requests[0].count)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
def test_get_by_instance_current(self, mock_get):
instance = objects.Instance(uuid=uuids.instance,
system_metadata={})
objects.InstancePCIRequests.get_by_instance(self.context,
instance)
mock_get.assert_called_once_with(self.context, uuids.instance)
def test_get_by_instance_legacy(self):
fakesysmeta = {
'pci_requests': jsonutils.dumps([fake_legacy_pci_requests[0]]),
'new_pci_requests': jsonutils.dumps([fake_legacy_pci_requests[1]]),
}
instance = objects.Instance(uuid=uuids.instance,
system_metadata=fakesysmeta)
requests = objects.InstancePCIRequests.get_by_instance(self.context,
instance)
self.assertEqual(2, len(requests.requests))
self.assertEqual('alias_1', requests.requests[0].alias_name)
self.assertFalse(requests.requests[0].is_new)
self.assertEqual('alias_2', requests.requests[1].alias_name)
self.assertTrue(requests.requests[1].is_new)
def test_new_compatibility(self):
request = objects.InstancePCIRequest(is_new=False)
self.assertFalse(request.new)
def test_backport_1_0(self):
requests = objects.InstancePCIRequests(
requests=[objects.InstancePCIRequest(count=1,
request_id=FAKE_UUID),
objects.InstancePCIRequest(count=2,
request_id=FAKE_UUID)])
primitive = requests.obj_to_primitive(target_version='1.0')
backported = objects.InstancePCIRequests.obj_from_primitive(
primitive)
self.assertEqual('1.0', backported.VERSION)
self.assertEqual(2, len(backported.requests))
self.assertFalse(backported.requests[0].obj_attr_is_set('request_id'))
self.assertFalse(backported.requests[1].obj_attr_is_set('request_id'))
def test_obj_from_db(self):
req = objects.InstancePCIRequests.obj_from_db(None, FAKE_UUID, None)
self.assertEqual(FAKE_UUID, req.instance_uuid)
self.assertEqual(0, len(req.requests))
db_req = jsonutils.dumps(fake_pci_requests)
req = objects.InstancePCIRequests.obj_from_db(None, FAKE_UUID, db_req)
self.assertEqual(FAKE_UUID, req.instance_uuid)
self.assertEqual(2, len(req.requests))
self.assertEqual('alias_1', req.requests[0].alias_name)
def test_from_request_spec_instance_props(self):
requests = objects.InstancePCIRequests(
requests=[objects.InstancePCIRequest(count=1,
request_id=FAKE_UUID,
spec=[{'vendor_id': '8086',
'device_id': '1502'}])
],
instance_uuid=FAKE_UUID)
result = jsonutils.to_primitive(requests)
result = objects.InstancePCIRequests.from_request_spec_instance_props(
result)
self.assertEqual(1, len(result.requests))
self.assertEqual(1, result.requests[0].count)
self.assertEqual(FAKE_UUID, result.requests[0].request_id)
self.assertEqual([{'vendor_id': '8086', 'device_id': '1502'}],
result.requests[0].spec)
class TestInstancePCIRequests(test_objects._LocalTest,
_TestInstancePCIRequests):
pass
class TestRemoteInstancePCIRequests(test_objects._RemoteTest,
_TestInstancePCIRequests):
pass
|
apache-2.0
| 5,847,374,360,693,659,000 | 44.535714 | 79 | 0.584314 | false |
wagnerand/olympia
|
src/olympia/addons/tests/test_decorators.py
|
2
|
5258
|
from django import http
import mock
from six.moves.urllib_parse import quote
from olympia.addons import decorators as dec
from olympia.addons.models import Addon
from olympia.amo.tests import TestCase, addon_factory
class TestAddonView(TestCase):
def setUp(self):
super(TestAddonView, self).setUp()
self.addon = addon_factory()
self.func = mock.Mock()
self.func.return_value = mock.sentinel.OK
self.func.__name__ = 'mock_function'
self.view = dec.addon_view(self.func)
self.request = mock.Mock()
self.slug_path = (
'http://testserver/addon/%s/reviews' %
quote(self.addon.slug.encode('utf-8')))
self.request.path = self.id_path = (
u'http://testserver/addon/%s/reviews' % self.addon.id)
self.request.GET = {}
def test_301_by_id(self):
res = self.view(self.request, str(self.addon.id))
self.assert3xx(res, self.slug_path, 301)
def test_slug_replace_no_conflict(self):
path = u'http://testserver/addon/{id}/reviews/{id}345/path'
self.request.path = path.format(id=self.addon.id)
res = self.view(self.request, str(self.addon.id))
redirection = (
u'http://testserver/addon/{slug}/reviews/{id}345/path'.format(
id=self.addon.id,
slug=quote(self.addon.slug.encode('utf8'))))
self.assert3xx(res, redirection, 301)
def test_301_with_querystring(self):
self.request.GET = mock.Mock()
self.request.GET.urlencode.return_value = 'q=1'
res = self.view(self.request, str(self.addon.id))
self.assert3xx(res, self.slug_path + '?q=1', 301)
def test_200_by_slug(self):
res = self.view(self.request, self.addon.slug)
assert res == mock.sentinel.OK
def test_404_by_id(self):
with self.assertRaises(http.Http404):
self.view(self.request, str(self.addon.id * 2))
def test_404_by_slug(self):
with self.assertRaises(http.Http404):
self.view(self.request, self.addon.slug + 'xx')
def test_alternate_qs_301_by_id(self):
def qs():
return Addon.objects.filter(type=1)
view = dec.addon_view_factory(qs=qs)(self.func)
res = view(self.request, str(self.addon.id))
self.assert3xx(res, self.slug_path, 301)
def test_alternate_qs_200_by_slug(self):
def qs():
return Addon.objects.filter(type=1)
view = dec.addon_view_factory(qs=qs)(self.func)
res = view(self.request, self.addon.slug)
assert res == mock.sentinel.OK
def test_alternate_qs_404_by_id(self):
def qs():
return Addon.objects.filter(type=2)
view = dec.addon_view_factory(qs=qs)(self.func)
with self.assertRaises(http.Http404):
view(self.request, str(self.addon.id))
def test_alternate_qs_404_by_slug(self):
def qs():
return Addon.objects.filter(type=2)
view = dec.addon_view_factory(qs=qs)(self.func)
with self.assertRaises(http.Http404):
view(self.request, self.addon.slug)
def test_addon_no_slug(self):
addon = addon_factory(slug=None)
res = self.view(self.request, addon.slug)
assert res == mock.sentinel.OK
def test_slug_isdigit(self):
addon = addon_factory()
addon.update(slug=str(addon.id))
r = self.view(self.request, addon.slug)
assert r == mock.sentinel.OK
request, addon_ = self.func.call_args[0]
assert addon_ == addon
class TestAddonViewWithUnlisted(TestAddonView):
def setUp(self):
super(TestAddonViewWithUnlisted, self).setUp()
self.view = dec.addon_view_factory(
qs=Addon.objects.all)(self.func)
@mock.patch('olympia.access.acl.check_unlisted_addons_reviewer',
lambda r: False)
@mock.patch('olympia.access.acl.check_addon_ownership',
lambda *args, **kwargs: False)
def test_unlisted_addon(self):
"""Return a 404 for non authorized access."""
self.make_addon_unlisted(self.addon)
with self.assertRaises(http.Http404):
self.view(self.request, self.addon.slug)
@mock.patch('olympia.access.acl.check_unlisted_addons_reviewer',
lambda r: False)
@mock.patch('olympia.access.acl.check_addon_ownership',
lambda *args, **kwargs: True)
def test_unlisted_addon_owner(self):
"""Addon owners have access."""
self.make_addon_unlisted(self.addon)
assert self.view(self.request, self.addon.slug) == mock.sentinel.OK
request, addon = self.func.call_args[0]
assert addon == self.addon
@mock.patch('olympia.access.acl.check_unlisted_addons_reviewer',
lambda r: True)
@mock.patch('olympia.access.acl.check_addon_ownership',
lambda *args, **kwargs: False)
def test_unlisted_addon_unlisted_admin(self):
"""Unlisted addon reviewers have access."""
self.make_addon_unlisted(self.addon)
assert self.view(self.request, self.addon.slug) == mock.sentinel.OK
request, addon = self.func.call_args[0]
assert addon == self.addon
|
bsd-3-clause
| -7,378,501,312,480,588,000 | 35.262069 | 75 | 0.618486 | false |
skosukhin/spack
|
var/spack/repos/builtin/packages/atompaw/package.py
|
1
|
2376
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Atompaw(Package):
"""A Projector Augmented Wave (PAW) code for generating
atom-centered functions.
Official website: http://pwpaw.wfu.edu
User's guide: ~/doc/atompaw-usersguide.pdf
"""
homepage = "http://users.wfu.edu/natalie/papers/pwpaw/man.html"
url = "http://users.wfu.edu/natalie/papers/pwpaw/atompaw-4.0.0.13.tar.gz"
version('4.0.0.13', 'af4a042380356f6780183c4b325aad1d')
version('3.1.0.3', 'c996a277e11707887177f47bbb229aa6')
depends_on("lapack")
depends_on("blas")
# pin libxc version
depends_on("libxc@2.2.1")
def install(self, spec, prefix):
options = ['--prefix=%s' % prefix]
linalg = spec['lapack'].libs + spec['blas'].libs
options.extend([
"--with-linalg-libs=%s" % linalg.ld_flags,
"--enable-libxc",
"--with-libxc-incs=-I%s" % spec["libxc"].prefix.include,
"--with-libxc-libs=-L%s -lxcf90 -lxc" % spec["libxc"].prefix.lib,
])
configure(*options)
make(parallel=False) # parallel build fails
make("check")
make("install")
|
lgpl-2.1
| 127,523,794,832,472,270 | 36.714286 | 78 | 0.638468 | false |
vivisect/synapse
|
synapse/lib/crypto/ecc.py
|
1
|
6535
|
import hashlib
import logging
import cryptography.hazmat.primitives.hashes as c_hashes
import cryptography.hazmat.primitives.kdf.hkdf as c_hkdf
import cryptography.hazmat.primitives.asymmetric.ec as c_ec
import cryptography.hazmat.primitives.serialization as c_ser
import cryptography.hazmat.primitives.asymmetric.utils as c_utils
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
import synapse.common as s_common
logger = logging.getLogger(__name__)
class PriKey:
'''
A helper class for using ECC private keys.
'''
def __init__(self, priv):
self.priv = priv # type: c_ec.EllipticCurvePrivateKey
self.publ = PubKey(self.priv.public_key())
def iden(self):
'''
Return a SHA256 hash for the public key (to be used as a GUID).
Returns:
str: The SHA256 hash of the public key bytes.
'''
return self.publ.iden()
def sign(self, byts):
'''
Compute the ECC signature for the given bytestream.
Args:
byts (bytes): The bytes to sign.
Returns:
bytes: The RSA Signature bytes.
'''
chosen_hash = c_hashes.SHA256()
hasher = c_hashes.Hash(chosen_hash, default_backend())
hasher.update(byts)
digest = hasher.finalize()
return self.priv.sign(digest,
c_ec.ECDSA(c_utils.Prehashed(chosen_hash))
)
def exchange(self, pubkey):
'''
Perform a ECDH key exchange with a public key.
Args:
pubkey (PubKey): A PubKey to perform the ECDH with.
Returns:
bytes: The ECDH bytes. This is deterministic for a given pubkey
and private key.
'''
try:
return self.priv.exchange(c_ec.ECDH(), pubkey.publ)
except ValueError as e:
raise s_common.BadEccExchange(mesg=str(e))
def public(self):
'''
Get the PubKey which corresponds to the ECC PriKey.
Returns:
PubKey: A new PubKey object whose key corresponds to the private key.
'''
return PubKey(self.priv.public_key())
@staticmethod
def generate():
'''
Generate a new ECC PriKey instance.
Returns:
PriKey: A new PriKey instance.
'''
return PriKey(c_ec.generate_private_key(
c_ec.SECP384R1(),
default_backend()
))
def dump(self):
'''
Get the private key bytes in DER/PKCS8 format.
Returns:
bytes: The DER/PKCS8 encoded private key.
'''
return self.priv.private_bytes(
encoding=c_ser.Encoding.DER,
format=c_ser.PrivateFormat.PKCS8,
encryption_algorithm=c_ser.NoEncryption())
@staticmethod
def load(byts):
'''
Create a PriKey instance from DER/PKCS8 encoded bytes.
Args:
byts (bytes): Bytes to load
Returns:
PriKey: A new PubKey instance.
'''
return PriKey(c_ser.load_der_private_key(
byts,
password=None,
backend=default_backend()))
class PubKey:
'''
A helper class for using ECC public keys.
'''
def __init__(self, publ):
self.publ = publ # type: c_ec.EllipticCurvePublicKey
def dump(self):
'''
Get the public key bytes in DER/SubjectPublicKeyInfo format.
Returns:
bytes: The DER/SubjectPublicKeyInfo encoded public key.
'''
return self.publ.public_bytes(
encoding=c_ser.Encoding.DER,
format=c_ser.PublicFormat.SubjectPublicKeyInfo)
def verify(self, byts, sign):
'''
Verify the signature for the given bytes using the ECC
public key.
Args:
byts (bytes): The data bytes.
sign (bytes): The signature bytes.
Returns:
bool: True if the data was verified, False otherwise.
'''
try:
chosen_hash = c_hashes.SHA256()
hasher = c_hashes.Hash(chosen_hash, default_backend())
hasher.update(byts)
digest = hasher.finalize()
self.publ.verify(sign,
digest,
c_ec.ECDSA(c_utils.Prehashed(chosen_hash))
)
return True
except InvalidSignature as e:
logger.exception('Error in publ.verify')
return False
def iden(self):
'''
Return a SHA256 hash for the public key (to be used as a GUID).
Returns:
str: The SHA256 hash of the public key bytes.
'''
return hashlib.sha256(self.dump()).hexdigest()
@staticmethod
def load(byts):
'''
Create a PubKey instance from DER/PKCS8 encoded bytes.
Args:
byts (bytes): Bytes to load
Returns:
PubKey: A new PubKey instance.
'''
return PubKey(c_ser.load_der_public_key(
byts,
backend=default_backend()))
def doECDHE(statprv_u, statpub_v, ephmprv_u, ephmpub_v,
length=64,
salt=None,
info=None):
'''
Perform one side of an Ecliptic Curve Diffie Hellman Ephemeral key exchange.
Args:
statprv_u (PriKey): Static Private Key for U
statpub_v (PubKey: Static Public Key for V
ephmprv_u (PriKey): Ephemeral Private Key for U
ephmpub_v (PubKey): Ephemeral Public Key for V
length (int): Number of bytes to return
salt (bytes): Salt to use when computing the key.
info (bytes): Additional information to use when computing the key.
Notes:
This makes no assumption about the reuse of the Ephemeral keys passed
to the function. It is the caller's responsibility to destroy the keys
after they are used for doing key generation. This implementation is
the dhHybrid1 scheme described in NIST 800-56A Revision 2.
Returns:
bytes: The derived key.
'''
zs = statprv_u.exchange(statpub_v)
ze = ephmprv_u.exchange(ephmpub_v)
z = ze + zs
kdf = c_hkdf.HKDF(c_hashes.SHA256(),
length=length,
salt=salt,
info=info,
backend=default_backend())
k = kdf.derive(z)
return k
|
apache-2.0
| 910,276,138,974,004,000 | 28.570136 | 81 | 0.57215 | false |
djsegal/ahab_legacy_
|
pequod/readInput.py
|
1
|
4463
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 4 17:53:55 2013
@author: dan
"""
from parseLine import parseLine
from findAndChar import findAndChar
from addScalarInput import addScalarInput
from addVectorInput import addVectorInput
def readInput( fileName ):
redInput = open( fileName , 'r' )
varList = [] # list of all the variables
valuesList = [] # list of lists of all variables' values
constsList = {} # dict that contains consts names (keys) and their values
vectorList = [] # list of tuples that contain : vec names , vec vals ,
# and mode information
isVector = False
vectorComplete = False
alreadyNamed = False
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# read in red.inp one line at a time
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
for aLine in redInput:
# =========================
# get aLine in a workable
# condition for reading
# =========================
curLine = parseLine( aLine )
if curLine == '' :
continue
# ============================
# find the name of a var AND
# if it's scalar , add it
# ============================
if not isVector:
# -------------------------------
# get variable names, if needed
# -------------------------------
if alreadyNamed :
alreadyNamed = False
else:
varList.append( curLine.pop(0) )
if len( curLine ) == 0 :
alreadyNamed = True
continue
# =============================
# check for new vector inputs
# =============================
if '[' in curLine[ 0 ] :
isVector = True
vectorLine = []
curName = varList.pop()
if '[' == curLine[ 0 ] :
curLine.pop( 0 )
else :
curLine[ 0 ] = curLine[ 0 ][ 1 : ]
# =================================
# if the variable is a scalar,
# add it and skip vector protocol
# =================================
if not isVector :
addScalarInput( curLine , varList , valuesList ,
constsList , vectorList )
continue
# =================================
# check for the end of the vector
# =================================
if curLine :
andFound = findAndChar( curLine )
# ----------------------------------
# now that andChar is gone,
# ']' has to be the very last char
# ----------------------------------
if ']' in curLine[-1] :
vectorComplete = True
if len(curLine[-1]) > 1:
curLine[-1] = curLine[-1][0:len(curLine[-1])-1]
else:
curLine.pop()
vectorLine.append(curLine)
# ==============================
# if complete , read in vector
# ==============================
if vectorComplete :
vectorComplete = False
isVector = addVectorInput( vectorLine , constsList ,
vectorList , curName ,
andFound , varList )
# -------------------------------
# close input and return values
# -------------------------------
redInput.close()
return ( varList , valuesList , vectorList )
|
apache-2.0
| -4,641,275,776,112,193,000 | 29.575342 | 78 | 0.315259 | false |
Virako/Rocamgo-ng
|
rocamgo/detection/perspective.py
|
1
|
2062
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Rocamgo is recogniter of the go games by processing digital images with opencv.
# Copyright (C) 2012 Vรญctor Ramirez de la Corte <virako.9 at gmail dot com>
# Copyright (C) 2012 David Medina Velasco <cuidadoconeltecho at gmail dot com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from cv2 import CV_32FC1
from cv2 import getPerspectiveTransform
from cv2 import warpPerspective
from rocamgo.detection.functions import get_external_corners_prespective_correction
from rocamgo.detection.functions import get_max_edge
def perspective(img, corners):
"""Crea una imagen en modelo ideal del tablero dado en perspectiva.
:Param img: imagen con el tablero en perspectiva
:Todo: comprobar de que tipo es la imagen TODO
:Type img: IplImage or CvMat
:Param corners: lista de las esquinas del tablero
:Type corners: list
:Return: imagen en modelo ideal
:Rtype: IplImage
"""
corners = get_external_corners_prespective_correction(corners)
max_edge = 480
# The goban have a relation 15/14 height/width
# relation = 14/15.0
# In the sequence, the orden of corners are ul, dl, dr, ur
corners_transf = np.float32([
[0,0],
[0,max_edge],
[max_edge,0],
[max_edge,max_edge]]
)
mat = getPerspectiveTransform(corners, corners_transf)
dst = warpPerspective(img, mat, (max_edge, max_edge))
return dst
|
gpl-3.0
| -8,436,673,398,612,741,000 | 35.803571 | 83 | 0.716642 | false |
drabastomek/practicalDataAnalysisCookbook
|
Codes/Chapter07/ts_detrendAndRemoveSeasonality.py
|
1
|
2625
|
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# change the font size
matplotlib.rc('xtick', labelsize=9)
matplotlib.rc('ytick', labelsize=9)
matplotlib.rc('font', size=14)
# time series tools
import statsmodels.api as sm
def period_mean(data, freq):
'''
Method to calculate mean for each frequency
'''
return np.array(
[np.mean(data[i::freq]) for i in range(freq)])
# folder with data
data_folder = '../../Data/Chapter07/'
# colors
colors = ['#FF6600', '#000000', '#29407C', '#660000']
# read the data
riverFlows = pd.read_csv(data_folder + 'combined_flow.csv',
index_col=0, parse_dates=[0])
# detrend the data
detrended = sm.tsa.tsatools.detrend(riverFlows,
order=1, axis=0)
# create a data frame with the detrended data
detrended = pd.DataFrame(detrended, index=riverFlows.index,
columns=['american_flow_d', 'columbia_flow_d'])
# join to the main dataset
riverFlows = riverFlows.join(detrended)
# calculate trend
riverFlows['american_flow_t'] = riverFlows['american_flow'] \
- riverFlows['american_flow_d']
riverFlows['columbia_flow_t'] = riverFlows['columbia_flow'] \
- riverFlows['columbia_flow_d']
# number of observations and frequency of seasonal component
nobs = len(riverFlows)
freq = 12 # yearly seasonality
# remove the seasonality
for col in ['american_flow_d', 'columbia_flow_d']:
period_averages = period_mean(riverFlows[col], freq)
riverFlows[col[:-2]+'_s'] = np.tile(period_averages,
nobs // freq + 1)[:nobs]
riverFlows[col[:-2]+'_r'] = np.array(riverFlows[col]) \
- np.array(riverFlows[col[:-2]+'_s'])
# save the decomposed dataset
with open(data_folder + 'combined_flow_d.csv', 'w') as o:
o.write(riverFlows.to_csv(ignore_index=True))
# plot the data
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
# set the size of the figure explicitly
fig.set_size_inches(12, 7)
# plot the charts for american
ax[0, 0].plot(riverFlows['american_flow_t'], colors[0])
ax[0, 1].plot(riverFlows['american_flow_s'], colors[1])
ax[0, 2].plot(riverFlows['american_flow_r'], colors[2])
# plot the charts for columbia
ax[1, 0].plot(riverFlows['columbia_flow_t'], colors[0])
ax[1, 1].plot(riverFlows['columbia_flow_s'], colors[1])
ax[1, 2].plot(riverFlows['columbia_flow_r'], colors[2])
# set titles for columns
ax[0, 0].set_title('Trend')
ax[0, 1].set_title('Seasonality')
ax[0, 2].set_title('Residuals')
# set titles for rows
ax[0, 0].set_ylabel('American')
ax[1, 0].set_ylabel('Columbia')
# save the chart
plt.savefig(data_folder + 'charts/detrended.png', dpi=300)
|
gpl-2.0
| 3,813,772,106,951,367,000 | 28.166667 | 61 | 0.684952 | false |
yarikoptic/NiPy-OLD
|
nipy/neurospin/datasets/volumes/tests/test_volume_field.py
|
1
|
1076
|
"""
Testing data image interface.
"""
import numpy as np
# Local imports
from ..volume_field import VolumeField
from ...transforms.transform import Transform, CompositionError
################################################################################
# Tests
def test_interface():
img = VolumeField()
img.world_space = 'world'
for method in ('get_transform', 'as_volume_img'):
method = getattr(img, method)
yield np.testing.assert_raises, NotImplementedError, method
yield np.testing.assert_raises, CompositionError, \
img.composed_with_transform, \
Transform('world2', 'world', mapping=map)
yield np.testing.assert_raises, NotImplementedError, \
img.composed_with_transform, \
Transform('world', 'world2', mapping=map)
yield np.testing.assert_raises, NotImplementedError, \
img.resampled_to_img, None
yield np.testing.assert_raises, NotImplementedError, \
img.values_in_world, None, None, None
|
bsd-3-clause
| 3,817,453,609,082,468,400 | 30.647059 | 80 | 0.587361 | false |
mthh/geopy
|
test/geocoders/photon.py
|
1
|
2704
|
# -*- coding: UTF-8 -*-
import unittest
from geopy.compat import u
from geopy.point import Point
from geopy.geocoders import Photon
from test.geocoders.util import GeocoderTestBase
class PhotonTestCase(GeocoderTestBase): # pylint: disable=R0904,C0111
@classmethod
def setUpClass(cls):
cls.geocoder = Photon(timeout=10)
cls.known_country_it = "Francia"
cls.known_country_fr = "France"
def test_user_agent_custom(self):
geocoder = Photon(
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
def test_geocode(self):
"""
Photon.geocode
"""
self.geocode_run(
{"query": "14 rue de la Soie, Villeurbanne"},
{"latitude": 45.7621004, "longitude": 4.916648},
)
def test_unicode_name(self):
"""
Photon.geocode unicode
"""
self.geocode_run(
{"query": u("\u6545\u5bab")},
{"latitude": 39.917252950000005, "longitude": 116.39077025499873}
)
def test_reverse_string(self):
"""
Photon.reverse string
"""
self.reverse_run(
{"query": "45.7733105, 4.8869339"},
{"latitude": 45.7733105, "longitude": 4.8869339}
)
def test_reverse_point(self):
"""
Photon.reverse Point
"""
self.reverse_run(
{"query": Point(45.7733105, 4.8869339)},
{"latitude": 45.7733105, "longitude": 4.8869339}
)
def test_geocode_language_parameter(self):
"""
Photon.geocode using `language`
"""
result_geocode = self._make_request(
self.geocoder.geocode,
self.known_country_fr,
language="it",
)
self.assertEqual(
result_geocode.raw['properties']['country'],
self.known_country_it
)
def test_reverse_language_parameter(self):
"""
Photon.reverse using `language`
"""
result_reverse_it = self._make_request(
self.geocoder.reverse,
"45.7733105, 4.8869339",
exactly_one=True,
language="it",
)
self.assertEqual(
result_reverse_it.raw['properties']['country'],
self.known_country_it
)
result_reverse_fr = self._make_request(
self.geocoder.reverse,
"45.7733105, 4.8869339",
exactly_one=True,
language="fr"
)
self.assertEqual(
result_reverse_fr.raw['properties']['country'],
self.known_country_fr
)
|
mit
| 4,715,647,223,500,360,000 | 26.591837 | 77 | 0.538092 | false |
bithinalangot/ecidadania-dev
|
src/apps/ecidadania/accounts/admin.py
|
1
|
2246
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2012 Cidadania S. Coop. Galega
#
# This file is part of e-cidadania.
#
# e-cidadania is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# e-cidadania is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from django.contrib.contenttypes.models import ContentType
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.contrib.auth.models import User
from apps.ecidadania.accounts.models import UserProfile
class ProfileAdmin(admin.ModelAdmin):
"""
This is a minimal view for Django administration interface. It shows the
user and the website.
"""
list_display = ('user', 'website')
actions = ['mass_mail']
def mass_mail(self, request, queryset):
"""
This function exports the selected ovjects to a new view to manipulate
them properly.
"""
#selected = request.POST.getlist(admin.ACTION_CHECKBOX_NAME)
# ct = ContentType.objects.get_for_model(queryset.model)
if request.method == "POST":
for obj in queryset:
get_user = get_object_or_404(User, id=obj.id)
send.mail(request.POST['massmail_subject'], request.POST['message'], 'noreply@ecidadania.org', [get_user.email])
return HttpResponseRedirect(request.get_full_path())
selected = request.POST.getlist(admin.ACTION_CHECKBOX_NAME)
ct = ContentType.objects.get_for_model(queryset.model)
return render_to_response('/mail/massmail.html', { 'people': selected })
mass_mail.short_description = 'Send a global mail to the selected users'
admin.site.register(UserProfile, ProfileAdmin)
|
gpl-3.0
| -7,367,780,527,825,596,000 | 39.836364 | 128 | 0.703918 | false |
PJUllrich/Complete-Bunq-API-Python-Wrapper
|
tests/endpoints/test_installation.py
|
1
|
1623
|
from apiwrapper.endpoints.installation import Installation
from tests.endpoints.test_endpoint import EndpointTest
class InstallationTest(EndpointTest):
__base_endpoint_url = "/installation"
@property
def _base_endpoint(self):
return self.__base_endpoint_url
def setUp(self):
super().setUp(Installation)
def test_get_base_endpoint(self):
endpoint_should_be = self._base_endpoint
endpoint_to_check = self.test_class._get_base_endpoint()
self.assert_parameters(endpoint_should_be, endpoint_to_check)
def test_get_id_for_installation(self):
endpoint_should_be = self._base_endpoint
endpoint_to_check = self.test_class.get_id_for_installation()
self.assert_parameters(endpoint_should_be, endpoint_to_check)
def test_check_installation_id(self):
endpoint_should_be = self._base_endpoint
endpoint_should_be += "/%d" % self.random_id
endpoint_to_check = self.test_class.check_installation_id(self.random_id)
self.assert_parameters(endpoint_should_be, endpoint_to_check)
def test_create_installation_endpoint(self):
endpoint_should_be = self._base_endpoint
endpoint_to_check, _ = self.test_class.create_installation()
self.assert_parameters(endpoint_should_be, endpoint_to_check)
def test_create_installation_payload(self):
payload_should_be = {
"client_public_key": self.api_client.pubkey
}
_, payload_to_check = self.test_class.create_installation()
self.assert_parameters(payload_should_be, payload_to_check )
|
mit
| 7,800,149,606,326,693,000 | 30.823529 | 81 | 0.68207 | false |
ouh-churchill/quod
|
quodsite/website/templatetags/quodsite_tags.py
|
1
|
2483
|
#!/usr/bin/python
# coding: utf-8
from __future__ import absolute_import, unicode_literals
from django import template
register = template.Library()
@register.simple_tag
def get_model_fields(object_to_query):
return dict(
(field.name, field.value_to_string(object_to_query)) for field in object_to_query._meta.fields
)
@register.simple_tag
def get_model_dir(object_to_query):
return dir(object_to_query)
@register.simple_tag
def get_model_type(object_to_query):
return type(object_to_query)
# MENUS
# From https://github.com/wagtail/wagtaildemo/blob/master/demo/templatetags/demo_tags.py
@register.assignment_tag(takes_context=True)
def get_site_root(context):
# NB this returns a core.Page, not the implementation-specific model used
# so object-comparison to self will return false as objects would differ
return context['request'].site.root_page
def has_menu_children(page):
return page.get_children().live().in_menu().exists()
# Retrieves the top menu items - the immediate children of the parent page
# The has_menu_children method is necessary because the bootstrap menu requires
# a dropdown class to be applied to a parent
@register.inclusion_tag('includes/navbar.html', takes_context=True)
def top_menu(context, parent, calling_page=None):
menuitems = parent.get_children().live().in_menu()
for menuitem in menuitems:
menuitem.show_dropdown = has_menu_children(menuitem)
# We don't directly check if calling_page is None since the template
# engine can pass an empty string to calling_page
# if the variable passed as calling_page does not exist.
menuitem.active = (calling_page.path.startswith(menuitem.path)
if calling_page else False)
return {
'calling_page': calling_page,
'menuitems': menuitems,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Retrieves the children of the top menu items for the drop downs
@register.inclusion_tag('includes/navbar_children.html', takes_context=True)
def top_menu_children(context, parent):
menuitems_children = parent.get_children()
menuitems_children = menuitems_children.live().in_menu()
return {
'parent': parent,
'menuitems_children': menuitems_children,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
|
mit
| 8,645,338,048,120,876,000 | 37.2 | 102 | 0.708417 | false |
ken-muturi/pombola
|
pombola/kenya/urls.py
|
1
|
4552
|
from django.conf.urls import patterns, url
from django.views.generic.base import TemplateView
from pombola.kenya.views import KEPersonDetail, KEPersonDetailAppearances
from pombola.experiments.views import ExperimentShare, ExperimentSurvey
from .views import (CountyPerformanceView, CountyPerformanceSenateSubmission,
CountyPerformancePetitionSubmission, ExperimentRecordTimeOnPage,
EXPERIMENT_DATA, ThanksTemplateView,
YouthEmploymentView, YouthEmploymentSupportSubmission,
YouthEmploymentCommentSubmission, ExperimentThanks,
YouthEmploymentInputSubmission, YouthEmploymentBillView,
ShujaazFinalistsView
)
urlpatterns = patterns('',
url(r'^shujaaz$', ShujaazFinalistsView.as_view(), name='shujaaz-finalists'),
url(r'^shujaaz-voting$', TemplateView.as_view(template_name='shujaaz-voting.html'), name='shujaaz-voting'),
url(r'^intro$', TemplateView.as_view(template_name='intro.html') ),
url(r'^register-to-vote$', TemplateView.as_view(template_name='register-to-vote.html') ),
url(r'^find-polling-station$', TemplateView.as_view(template_name='find-polling-station.html') ),
url(r'^person/(?P<slug>[-\w]+)/$',
KEPersonDetail.as_view(), name='person'),
url(r'^person/(?P<slug>[-\w]+)/appearances/$',
KEPersonDetailAppearances.as_view(sub_page='appearances'),
name='person'),
)
# Create the two County Performance pages:
for experiment_slug in ('mit-county', 'mit-county-larger'):
view_kwargs = {'experiment_slug': experiment_slug}
view_kwargs.update(EXPERIMENT_DATA[experiment_slug])
base_name = view_kwargs['base_view_name']
base_path = r'^' + base_name
urlpatterns.append(
url(base_path + r'$',
CountyPerformanceView.as_view(**view_kwargs),
name=base_name)
)
for name, view in (
('senate', CountyPerformanceSenateSubmission),
('petition', CountyPerformancePetitionSubmission)):
urlpatterns += (
url(base_path + r'/{0}$'.format(name),
view.as_view(**view_kwargs),
name=(base_name + '-{0}-submission'.format(name))),
url(base_path + r'/{0}/thanks$'.format(name),
ThanksTemplateView.as_view(
base_view_name=base_name,
template_name=('county-performance-{0}-submission.html'.format(name))
)),
)
urlpatterns += (
url(base_path + r'/share',
ExperimentShare.as_view(**view_kwargs),
name=(base_name + '-share')),
url(base_path + r'/survey',
ExperimentSurvey.as_view(**view_kwargs),
name=(base_name + '-survey')),
)
# Create the Youth Employment Bill page:
for experiment_slug in (
'youth-employment-bill',
'youth-employment-bill-generic-no-randomization',
):
view_kwargs = {'experiment_slug': experiment_slug}
view_kwargs.update(EXPERIMENT_DATA[experiment_slug])
base_name = view_kwargs['base_view_name']
base_path = r'^' + base_name
urlpatterns.append(
url(base_path + r'$',
YouthEmploymentView.as_view(**view_kwargs),
name=base_name)
)
for name, view in (
('support', YouthEmploymentSupportSubmission),
('input', YouthEmploymentInputSubmission),
('comment', YouthEmploymentCommentSubmission)):
urlpatterns += (
url(base_path + r'/{0}$'.format(name),
view.as_view(**view_kwargs),
name=(base_name + '-{0}-submission'.format(name))),
url(base_path + r'/{0}/thanks$'.format(name),
ThanksTemplateView.as_view(
base_view_name=base_name,
template_name=('youth-employment-{0}-submission.html'.format(name))
)),
)
urlpatterns += (
url(base_path + r'/share',
ExperimentShare.as_view(**view_kwargs),
name=(base_name + '-share')),
url(base_path + r'/survey',
ExperimentSurvey.as_view(**view_kwargs),
name=(base_name + '-survey')),
url(base_path + r'/bill',
YouthEmploymentBillView.as_view(**view_kwargs),
name=(base_name + '-bill')),
url(base_path + r'/input',
ExperimentThanks.as_view(**view_kwargs),
name=(base_name + '-input')),
url(base_path + r'/time-on-page',
ExperimentRecordTimeOnPage.as_view(**view_kwargs),
name=(base_name + '-time-on-page')),
)
|
agpl-3.0
| 6,573,117,379,751,098,000 | 38.582609 | 111 | 0.610281 | false |
linkhub-sdk/popbill.taxinvoice.example.py
|
getSendToNTSConfig.py
|
1
|
1031
|
# -*- coding: utf-8 -*-
# code for console Encoding difference. Dont' mind on it
import sys
import imp
imp.reload(sys)
try:
sys.setdefaultencoding('UTF8')
except Exception as E:
pass
import testValue
from popbill import TaxinvoiceService, PopbillException
taxinvoiceService = TaxinvoiceService(testValue.LinkID, testValue.SecretKey)
taxinvoiceService.IsTest = testValue.IsTest
taxinvoiceService.IPRestrictOnOff = testValue.IPRestrictOnOff
taxinvoiceService.UseStaticIP = testValue.UseStaticIP
taxinvoiceService.UseLocalTimeYN = testValue.UseLocalTimeYN
'''
๊ตญ์ธ์ฒญ ์ ์ก ์ค์ ํ์ธ
- https://docs.popbill.com/taxinvoice/python/api#GetSendToNTSConfig
'''
try:
print("=" * 15 + " ๊ตญ์ธ์ฒญ ์ ์ก ์ค์ ํ์ธ " + "=" * 15)
# ํ๋นํ์ ์ฌ์
์๋ฒํธ
CorpNum = testValue.testCorpNum
sendToNTSConfig = taxinvoiceService.getSendToNTSConfig(CorpNum)
print("sendToNTS: %s" % sendToNTSConfig)
except PopbillException as PE:
print("Popbill Exception : [%d] %s" % (PE.code, PE.message))
|
mit
| -1,677,486,540,256,732,400 | 24.710526 | 76 | 0.752303 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.