repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ray-project/ray
|
rllib/agents/es/es_torch_policy.py
|
1
|
5277
|
# Code in this file is adapted from:
# https://github.com/openai/evolution-strategies-starter.
import gym
import numpy as np
import tree # pip install dm_tree
import ray
from ray.rllib.models import ModelCatalog
from ray.rllib.policy.policy_template import build_policy_class
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.filter import get_filter
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space, \
unbatch
from ray.rllib.utils.torch_ops import convert_to_torch_tensor
torch, _ = try_import_torch()
def before_init(policy, observation_space, action_space, config):
policy.action_noise_std = config["action_noise_std"]
policy.action_space_struct = get_base_struct_from_space(action_space)
policy.preprocessor = ModelCatalog.get_preprocessor_for_space(
observation_space)
policy.observation_filter = get_filter(config["observation_filter"],
policy.preprocessor.shape)
policy.single_threaded = config.get("single_threaded", False)
def _set_flat_weights(policy, theta):
pos = 0
theta_dict = policy.model.state_dict()
new_theta_dict = {}
for k in sorted(theta_dict.keys()):
shape = policy.param_shapes[k]
num_params = int(np.prod(shape))
new_theta_dict[k] = torch.from_numpy(
np.reshape(theta[pos:pos + num_params], shape))
pos += num_params
policy.model.load_state_dict(new_theta_dict)
def _get_flat_weights(policy):
# Get the parameter tensors.
theta_dict = policy.model.state_dict()
# Flatten it into a single np.ndarray.
theta_list = []
for k in sorted(theta_dict.keys()):
theta_list.append(torch.reshape(theta_dict[k], (-1, )))
cat = torch.cat(theta_list, dim=0)
return cat.cpu().numpy()
type(policy).set_flat_weights = _set_flat_weights
type(policy).get_flat_weights = _get_flat_weights
def _compute_actions(policy,
obs_batch,
add_noise=False,
update=True,
**kwargs):
# Batch is given as list -> Try converting to numpy first.
if isinstance(obs_batch, list) and len(obs_batch) == 1:
obs_batch = obs_batch[0]
observation = policy.preprocessor.transform(obs_batch)
observation = policy.observation_filter(
observation[None], update=update)
observation = convert_to_torch_tensor(observation, policy.device)
dist_inputs, _ = policy.model({
SampleBatch.CUR_OBS: observation
}, [], None)
dist = policy.dist_class(dist_inputs, policy.model)
action = dist.sample()
def _add_noise(single_action, single_action_space):
single_action = single_action.detach().cpu().numpy()
if add_noise and isinstance(single_action_space, gym.spaces.Box) \
and single_action_space.dtype.name.startswith("float"):
single_action += np.random.randn(*single_action.shape) * \
policy.action_noise_std
return single_action
action = tree.map_structure(_add_noise, action,
policy.action_space_struct)
action = unbatch(action)
return action, [], {}
def _compute_single_action(policy,
observation,
add_noise=False,
update=True,
**kwargs):
action, state_outs, extra_fetches = policy.compute_actions(
[observation], add_noise=add_noise, update=update, **kwargs)
return action[0], state_outs, extra_fetches
type(policy).compute_actions = _compute_actions
type(policy).compute_single_action = _compute_single_action
def after_init(policy, observation_space, action_space, config):
state_dict = policy.model.state_dict()
policy.param_shapes = {
k: tuple(state_dict[k].size())
for k in sorted(state_dict.keys())
}
policy.num_params = sum(np.prod(s) for s in policy.param_shapes.values())
def make_model_and_action_dist(policy, observation_space, action_space,
config):
# Policy network.
dist_class, dist_dim = ModelCatalog.get_action_dist(
action_space,
config["model"], # model_options
dist_type="deterministic",
framework="torch")
model = ModelCatalog.get_model_v2(
policy.preprocessor.observation_space,
action_space,
num_outputs=dist_dim,
model_config=config["model"],
framework="torch")
# Make all model params not require any gradients.
for p in model.parameters():
p.requires_grad = False
return model, dist_class
ESTorchPolicy = build_policy_class(
name="ESTorchPolicy",
framework="torch",
loss_fn=None,
get_default_config=lambda: ray.rllib.agents.es.es.DEFAULT_CONFIG,
before_init=before_init,
after_init=after_init,
make_model_and_action_dist=make_model_and_action_dist)
|
apache-2.0
| 94,148,031,799,769,420 | 37.518248 | 78 | 0.614554 | false |
Orav/kbengine
|
kbe/res/scripts/common/Lib/site-packages/pip/_vendor/requests/packages/urllib3/request.py
|
1
|
5949
|
# urllib3/request.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .filepost import encode_multipart_formdata
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are encoded
in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the option
to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **urlopen_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the
payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request signing,
such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example: ::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will be
overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if encode_multipart:
body, content_type = encode_multipart_formdata(fields or {},
boundary=multipart_boundary)
else:
body, content_type = (urlencode(fields or {}),
'application/x-www-form-urlencoded')
if headers is None:
headers = self.headers
headers_ = {'Content-Type': content_type}
headers_.update(headers)
return self.urlopen(method, url, body=body, headers=headers_,
**urlopen_kw)
|
lgpl-3.0
| -7,818,495,000,782,802,000 | 40.191489 | 81 | 0.598084 | false |
mbiciunas/nix
|
src/cli_config/script/script_rename.py
|
1
|
2028
|
# NixConfig
# Copyright (c) 2017 Mark Biciunas.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import logging
from config.script.rename_script import RenameScript
LOG = logging.getLogger(__name__)
def init(subparsers: argparse._SubParsersAction):
"""
Command line subparser for renaming an existing script.
The following arguments can be interpreted by the subprocessor:
:Old Name: Current name of the script.
:New Name: New name for the script. Must be unique from other scripts as well as tags.
:param subparsers: Object that will contain the argument definitions.
:type subparsers: ArgumentParser
"""
LOG.debug("Initialize subparser for the rename command")
_subparser = subparsers.add_parser('rename',
help='Rename a script.')
_subparser.add_argument(type=str,
help="Current name",
dest='name')
_subparser.add_argument(type=str,
help="New name",
dest='name_new')
_subparser.set_defaults(func=_process)
def _process(args):
"""Process a command line action for listing setup groups.
:param args: Command line arguments
:type args: Namespace
"""
LOG.info("Begin action to create a new script")
rename_script = RenameScript()
rename_script.rename(args.name, args.name_new)
|
gpl-3.0
| -5,188,311,129,422,364,000 | 31.190476 | 91 | 0.675542 | false |
arkharin/OpenCool
|
scr/logic/components/separator_flow/theoretical.py
|
1
|
3301
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Define the Separator Flow component.
"""
from scr.logic.components.component import Component as Cmp
from scr.logic.components.component import ComponentInfo as CmpInfo
from scr.logic.components.component import component, fundamental_equation, basic_property
from scr.helpers.properties import NumericProperty
from math import inf
def update_saved_data_to_last_version(orig_data, orig_version):
return orig_data
@component('adiabatic_one_phase_separator_flow', CmpInfo.SEPARATOR_FLOW, 1, update_saved_data_to_last_version, inlet_nodes=1,
outlet_nodes=2)
class Theoretical(Cmp):
def __init__(self, id_, inlet_nodes_id, outlet_nodes_id, component_data):
super().__init__(id_, inlet_nodes_id, outlet_nodes_id, component_data)
@basic_property(pressure_lose_1=NumericProperty(0, inf, unit='kPa'))
def _eval_pressure_lose_1(self):
id_inlet_node = self.get_id_inlet_nodes()[0]
inlet_node = self.get_inlet_node(id_inlet_node)
id_outlet_nodes = self.get_id_outlet_nodes()
p_in = inlet_node.pressure()
outlet_node_1 = self.get_outlet_node(id_outlet_nodes[0])
p_out = outlet_node_1.pressure()
return (p_in - p_out) / 1000.0
@basic_property(pressure_lose_2=NumericProperty(0, inf, unit='kPa'))
def _eval_pressure_lose_2(self):
id_inlet_node = self.get_id_inlet_nodes()[0]
inlet_node = self.get_inlet_node(id_inlet_node)
id_outlet_nodes = self.get_id_outlet_nodes()
p_in = inlet_node.pressure()
outlet_node_2 = self.get_outlet_node(id_outlet_nodes[1])
p_out = outlet_node_2.pressure()
return (p_in - p_out) / 1000.0
@fundamental_equation()
def _eval_intrinsic_equation_enthalpy_1(self):
id_inlet_node = self.get_id_inlet_nodes()[0]
inlet_node = self.get_inlet_node(id_inlet_node)
id_outlet_nodes = self.get_id_outlet_nodes()
outlet_node_1 = self.get_outlet_node(id_outlet_nodes[0])
h_in = inlet_node.enthalpy()
h_out_1 = outlet_node_1.enthalpy()
return [h_in, h_out_1]
@fundamental_equation()
def _eval_intrinsic_equation_enthalpy_2(self):
id_inlet_node = self.get_id_inlet_nodes()[0]
inlet_node = self.get_inlet_node(id_inlet_node)
id_outlet_nodes = self.get_id_outlet_nodes()
outlet_node_2 = self.get_outlet_node(id_outlet_nodes[1])
h_in = inlet_node.enthalpy()
h_out_2 = outlet_node_2.enthalpy()
return [h_in, h_out_2]
@fundamental_equation()
def _eval_intrinsic_equations_mass(self):
id_inlet_node = self.get_id_inlet_nodes()[0]
inlet_node = self.get_inlet_node(id_inlet_node)
id_outlet_nodes = self.get_id_outlet_nodes()
outlet_node_1 = self.get_outlet_node(id_outlet_nodes[0])
outlet_node_2 = self.get_outlet_node(id_outlet_nodes[1])
mass_flow_inlet = inlet_node.mass_flow()
mass_flow_out_1 = outlet_node_1.mass_flow()
mass_flow_out_2 = outlet_node_2.mass_flow()
return [mass_flow_inlet, mass_flow_out_1 + mass_flow_out_2]
|
mpl-2.0
| -5,360,974,496,690,660,000 | 38.309524 | 125 | 0.65465 | false |
timothycrosley/pies
|
pies/overrides.py
|
1
|
8179
|
"""pies/overrides.py.
Overrides Python syntax to conform to the Python3 version as much as possible using a '*' import
Copyright (C) 2013 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import math as _math
import abc
import functools
import sys
from numbers import Integral
from ._utils import unmodified_isinstance, with_metaclass
from .version_info import PY2, PY3, VERSION
native_dict = dict
native_round = round
native_filter = filter
native_map = map
native_zip = zip
native_range = range
native_str = str
native_chr = chr
native_input = input
native_next = next
native_object = object
common = ['native_dict', 'native_round', 'native_filter', 'native_map', 'native_range', 'native_str', 'native_chr',
'native_input', 'PY2', 'PY3', 'u', 'itemsview', 'valuesview', 'keysview', 'execute', 'integer_types',
'native_next', 'native_object', 'with_metaclass']
if PY3:
import urllib
import builtins
from urllib import parse
from collections import OrderedDict
integer_types = (int, )
def u(string):
return string
def itemsview(collection):
return collection.items()
def valuesview(collection):
return collection.values()
def keysview(collection):
return collection.keys()
urllib.quote = parse.quote
urllib.quote_plus = parse.quote_plus
urllib.unquote = parse.unquote
urllib.unquote_plus = parse.unquote_plus
urllib.urlencode = parse.urlencode
execute = getattr(builtins, 'exec')
if VERSION[1] < 2:
def callable(entity):
return hasattr(entity, '__call__')
common.append('callable')
__all__ = common + ['OrderedDict', 'urllib']
else:
from itertools import ifilter as filter
from itertools import imap as map
from itertools import izip as zip
from decimal import Decimal, ROUND_HALF_EVEN
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import codecs
str = unicode
chr = unichr
input = raw_input
range = xrange
integer_types = (int, long)
# Reloading the sys module kills IPython's output printing.
#import sys
#stdout = sys.stdout
#stderr = sys.stderr
#reload(sys)
#sys.stdout = stdout
#sys.stderr = stderr
#sys.setdefaultencoding('utf-8')
def _create_not_allowed(name):
def _not_allow(*args, **kwargs):
raise NameError("name '{0}' is not defined".format(name))
_not_allow.__name__ = name
return _not_allow
for removed in ('apply', 'cmp', 'coerce', 'execfile', 'raw_input', 'unpacks'):
globals()[removed] = _create_not_allowed(removed)
def u(s):
if isinstance(s, unicode):
return s
else:
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
def execute(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
class _dict_view_base(object):
__slots__ = ('_dictionary', )
def __init__(self, dictionary):
self._dictionary = dictionary
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__, str(list(self.__iter__())))
def __unicode__(self):
return str(self.__repr__())
def __str__(self):
return str(self.__unicode__())
class dict_keys(_dict_view_base):
__slots__ = ()
def __iter__(self):
return self._dictionary.iterkeys()
class dict_values(_dict_view_base):
__slots__ = ()
def __iter__(self):
return self._dictionary.itervalues()
class dict_items(_dict_view_base):
__slots__ = ()
def __iter__(self):
return self._dictionary.iteritems()
def itemsview(collection):
return dict_items(collection)
def valuesview(collection):
return dict_values(collection)
def keysview(collection):
return dict_keys(collection)
class dict(unmodified_isinstance(native_dict)):
def has_key(self, *args, **kwargs):
return AttributeError("'dict' object has no attribute 'has_key'")
def items(self):
return dict_items(self)
def keys(self):
return dict_keys(self)
def values(self):
return dict_values(self)
def round(number, ndigits=None):
return_int = False
if ndigits is None:
return_int = True
ndigits = 0
if hasattr(number, '__round__'):
return number.__round__(ndigits)
if ndigits < 0:
raise NotImplementedError('negative ndigits not supported yet')
# Python 2.6 doesn't support from_float.
if sys.version_info[1] <= 6:
return native_round(number, ndigits)
exponent = Decimal('10') ** (-ndigits)
d = Decimal.from_float(number).quantize(exponent,
rounding=ROUND_HALF_EVEN)
if return_int:
return int(d)
else:
return float(d)
def next(iterator):
try:
iterator.__next__()
except Exception:
native_next(iterator)
class FixStr(type):
def __new__(cls, name, bases, dct):
if '__str__' in dct:
dct['__unicode__'] = dct['__str__']
dct['__str__'] = lambda self: self.__unicode__().encode('utf-8')
return type.__new__(cls, name, bases, dct)
if sys.version_info[1] <= 6:
def __instancecheck__(cls, instance):
if cls.__name__ == "object":
return isinstance(instance, native_object)
subclass = getattr(instance, '__class__', None)
subtype = type(instance)
instance_type = getattr(abc, '_InstanceType', None)
if not instance_type:
class test_object:
pass
instance_type = type(test_object)
if subtype is instance_type:
subtype = subclass
if subtype is subclass or subclass is None:
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or cls.__subclasscheck__(subtype))
else:
def __instancecheck__(cls, instance):
if cls.__name__ == "object":
return isinstance(instance, native_object)
return type.__instancecheck__(cls, instance)
class object(with_metaclass(FixStr, object)):
pass
__all__ = common + ['round', 'dict', 'apply', 'cmp', 'coerce', 'execfile', 'raw_input', 'unpacks', 'str', 'chr',
'input', 'range', 'filter', 'map', 'zip', 'object']
|
mit
| 6,420,396,422,063,561,000 | 31.328063 | 116 | 0.59396 | false |
amanharitsh123/zulip
|
zerver/tornado/socket.py
|
1
|
13905
|
from typing import Any, Dict, Mapping, Optional, Text, Union
from django.conf import settings
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext as _
from django.contrib.sessions.models import Session as djSession
try:
from django.middleware.csrf import _compare_salted_tokens
except ImportError:
# This function was added in Django 1.10.
def _compare_salted_tokens(token1, token2):
# type: (str, str) -> bool
return token1 == token2
import sockjs.tornado
from sockjs.tornado.session import ConnectionInfo
import tornado.ioloop
import ujson
import logging
import time
from zerver.models import UserProfile, get_user_profile_by_id, get_client
from zerver.lib.queue import queue_json_publish
from zerver.lib.actions import check_send_message, extract_recipients
from zerver.decorator import JsonableError
from zerver.lib.utils import statsd
from zerver.middleware import record_request_start_data, record_request_stop_data, \
record_request_restart_data, write_log_line, format_timedelta
from zerver.lib.redis_utils import get_redis_client
from zerver.lib.sessions import get_session_user
from zerver.tornado.event_queue import get_client_descriptor
from zerver.tornado.exceptions import BadEventQueueIdError
logger = logging.getLogger('zulip.socket')
def get_user_profile(session_id):
# type: (Optional[Text]) -> Optional[UserProfile]
if session_id is None:
return None
try:
djsession = djSession.objects.get(expire_date__gt=timezone_now(),
session_key=session_id)
except djSession.DoesNotExist:
return None
try:
return get_user_profile_by_id(get_session_user(djsession))
except (UserProfile.DoesNotExist, KeyError):
return None
connections = dict() # type: Dict[Union[int, str], SocketConnection]
def get_connection(id):
# type: (Union[int, str]) -> Optional[SocketConnection]
return connections.get(id)
def register_connection(id, conn):
# type: (Union[int, str], SocketConnection) -> None
# Kill any old connections if they exist
if id in connections:
connections[id].close()
conn.client_id = id
connections[conn.client_id] = conn
def deregister_connection(conn):
# type: (SocketConnection) -> None
assert conn.client_id is not None
del connections[conn.client_id]
redis_client = get_redis_client()
def req_redis_key(req_id):
# type: (Text) -> Text
return u'socket_req_status:%s' % (req_id,)
class CloseErrorInfo(object):
def __init__(self, status_code, err_msg):
# type: (int, str) -> None
self.status_code = status_code
self.err_msg = err_msg
class SocketConnection(sockjs.tornado.SockJSConnection):
client_id = None # type: Optional[Union[int, str]]
def on_open(self, info):
# type: (ConnectionInfo) -> None
log_data = dict(extra='[transport=%s]' % (self.session.transport_name,))
record_request_start_data(log_data)
ioloop = tornado.ioloop.IOLoop.instance()
self.authenticated = False
self.session.user_profile = None
self.close_info = None # type: Optional[CloseErrorInfo]
self.did_close = False
try:
self.browser_session_id = info.get_cookie(settings.SESSION_COOKIE_NAME).value
self.csrf_token = info.get_cookie(settings.CSRF_COOKIE_NAME).value
except AttributeError:
# The request didn't contain the necessary cookie values. We can't
# close immediately because sockjs-tornado doesn't expect a close
# inside on_open(), so do it on the next tick.
self.close_info = CloseErrorInfo(403, "Initial cookie lacked required values")
ioloop.add_callback(self.close)
return
def auth_timeout():
# type: () -> None
self.close_info = CloseErrorInfo(408, "Timeout while waiting for authentication")
self.close()
self.timeout_handle = ioloop.add_timeout(time.time() + 10, auth_timeout)
write_log_line(log_data, path='/socket/open', method='SOCKET',
remote_ip=info.ip, email='unknown', client_name='?')
def authenticate_client(self, msg):
# type: (Dict[str, Any]) -> None
if self.authenticated:
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': {'result': 'error', 'msg': 'Already authenticated'}})
return
user_profile = get_user_profile(self.browser_session_id)
if user_profile is None:
raise JsonableError(_('Unknown or missing session'))
self.session.user_profile = user_profile
if not _compare_salted_tokens(msg['request']['csrf_token'], self.csrf_token):
raise JsonableError(_('CSRF token does not match that in cookie'))
if 'queue_id' not in msg['request']:
raise JsonableError(_("Missing 'queue_id' argument"))
queue_id = msg['request']['queue_id']
client = get_client_descriptor(queue_id)
if client is None:
raise BadEventQueueIdError(queue_id)
if user_profile.id != client.user_profile_id:
raise JsonableError(_("You are not the owner of the queue with id '%s'") % (queue_id,))
self.authenticated = True
register_connection(queue_id, self)
response = {'req_id': msg['req_id'], 'type': 'response',
'response': {'result': 'success', 'msg': ''}}
status_inquiries = msg['request'].get('status_inquiries')
if status_inquiries is not None:
results = {} # type: Dict[str, Dict[str, str]]
for inquiry in status_inquiries:
status = redis_client.hgetall(req_redis_key(inquiry)) # type: Dict[bytes, bytes]
if len(status) == 0:
result = {'status': 'not_received'}
elif b'response' not in status:
result = {'status': status[b'status'].decode('utf-8')}
else:
result = {'status': status[b'status'].decode('utf-8'),
'response': ujson.loads(status[b'response'])}
results[str(inquiry)] = result
response['response']['status_inquiries'] = results
self.session.send_message(response)
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self.timeout_handle)
def on_message(self, msg_raw):
# type: (str) -> None
log_data = dict(extra='[transport=%s' % (self.session.transport_name,))
record_request_start_data(log_data)
msg = ujson.loads(msg_raw)
if self.did_close:
logger.info("Received message on already closed socket! transport=%s user=%s client_id=%s"
% (self.session.transport_name,
self.session.user_profile.email if self.session.user_profile is not None else 'unknown',
self.client_id))
self.session.send_message({'req_id': msg['req_id'], 'type': 'ack'})
if msg['type'] == 'auth':
log_data['extra'] += ']'
try:
self.authenticate_client(msg)
# TODO: Fill in the correct client
write_log_line(log_data, path='/socket/auth', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email=self.session.user_profile.email,
client_name='?')
except JsonableError as e:
response = e.to_json()
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': response})
write_log_line(log_data, path='/socket/auth', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email='unknown', client_name='?',
status_code=403, error_content=ujson.dumps(response))
return
else:
if not self.authenticated:
response = {'result': 'error', 'msg': "Not yet authenticated"}
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': response})
write_log_line(log_data, path='/socket/service_request', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email='unknown', client_name='?',
status_code=403, error_content=ujson.dumps(response))
return
redis_key = req_redis_key(msg['req_id'])
with redis_client.pipeline() as pipeline:
pipeline.hmset(redis_key, {'status': 'received'})
pipeline.expire(redis_key, 60 * 60 * 24)
pipeline.execute()
record_request_stop_data(log_data)
queue_json_publish("message_sender",
dict(request=msg['request'],
req_id=msg['req_id'],
server_meta=dict(user_id=self.session.user_profile.id,
client_id=self.client_id,
return_queue="tornado_return",
log_data=log_data,
request_environ=dict(REMOTE_ADDR=self.session.conn_info.ip))),
fake_message_sender)
def on_close(self):
# type: () -> None
log_data = dict(extra='[transport=%s]' % (self.session.transport_name,))
record_request_start_data(log_data)
if self.close_info is not None:
write_log_line(log_data, path='/socket/close', method='SOCKET',
remote_ip=self.session.conn_info.ip, email='unknown',
client_name='?', status_code=self.close_info.status_code,
error_content=self.close_info.err_msg)
else:
deregister_connection(self)
email = self.session.user_profile.email \
if self.session.user_profile is not None else 'unknown'
write_log_line(log_data, path='/socket/close', method='SOCKET',
remote_ip=self.session.conn_info.ip, email=email,
client_name='?')
self.did_close = True
def fake_message_sender(event):
# type: (Dict[str, Any]) -> None
"""This function is used only for Casper and backend tests, where
rabbitmq is disabled"""
log_data = dict() # type: Dict[str, Any]
record_request_start_data(log_data)
req = event['request']
try:
sender = get_user_profile_by_id(event['server_meta']['user_id'])
client = get_client("website")
msg_id = check_send_message(sender, client, req['type'],
extract_recipients(req['to']),
req['subject'], req['content'],
local_id=req.get('local_id', None),
sender_queue_id=req.get('queue_id', None))
resp = {"result": "success", "msg": "", "id": msg_id}
except JsonableError as e:
resp = {"result": "error", "msg": str(e)}
server_meta = event['server_meta']
server_meta.update({'worker_log_data': log_data,
'time_request_finished': time.time()})
result = {'response': resp, 'req_id': event['req_id'],
'server_meta': server_meta}
respond_send_message(result)
def respond_send_message(data):
# type: (Mapping[str, Any]) -> None
log_data = data['server_meta']['log_data']
record_request_restart_data(log_data)
worker_log_data = data['server_meta']['worker_log_data']
forward_queue_delay = worker_log_data['time_started'] - log_data['time_stopped']
return_queue_delay = log_data['time_restarted'] - data['server_meta']['time_request_finished']
service_time = data['server_meta']['time_request_finished'] - worker_log_data['time_started']
log_data['extra'] += ', queue_delay: %s/%s, service_time: %s]' % (
format_timedelta(forward_queue_delay), format_timedelta(return_queue_delay),
format_timedelta(service_time))
client_id = data['server_meta']['client_id']
connection = get_connection(client_id)
if connection is None:
logger.info("Could not find connection to send response to! client_id=%s" % (client_id,))
else:
connection.session.send_message({'req_id': data['req_id'], 'type': 'response',
'response': data['response']})
# TODO: Fill in client name
# TODO: Maybe fill in the status code correctly
write_log_line(log_data, path='/socket/service_request', method='SOCKET',
remote_ip=connection.session.conn_info.ip,
email=connection.session.user_profile.email, client_name='?')
# We disable the eventsource and htmlfile transports because they cannot
# securely send us the zulip.com cookie, which we use as part of our
# authentication scheme.
sockjs_router = sockjs.tornado.SockJSRouter(SocketConnection, "/sockjs",
{'sockjs_url': 'https://%s/static/third/sockjs/sockjs-0.3.4.js' % (
settings.EXTERNAL_HOST,),
'disabled_transports': ['eventsource', 'htmlfile']})
def get_sockjs_router():
# type: () -> sockjs.tornado.SockJSRouter
return sockjs_router
|
apache-2.0
| -7,408,919,245,389,131,000 | 43.283439 | 115 | 0.577778 | false |
betrisey/home-assistant
|
tests/test_util/aiohttp.py
|
1
|
3248
|
"""Aiohttp test utils."""
import asyncio
from contextlib import contextmanager
import functools
import json as _json
from unittest import mock
class AiohttpClientMocker:
"""Mock Aiohttp client requests."""
def __init__(self):
"""Initialize the request mocker."""
self._mocks = []
self.mock_calls = []
def request(self, method, url, *,
auth=None,
status=200,
text=None,
content=None,
json=None):
"""Mock a request."""
if json:
text = _json.dumps(json)
if text:
content = text.encode('utf-8')
if content is None:
content = b''
self._mocks.append(AiohttpClientMockResponse(
method, url, status, content))
def get(self, *args, **kwargs):
"""Register a mock get request."""
self.request('get', *args, **kwargs)
def put(self, *args, **kwargs):
"""Register a mock put request."""
self.request('put', *args, **kwargs)
def post(self, *args, **kwargs):
"""Register a mock post request."""
self.request('post', *args, **kwargs)
def delete(self, *args, **kwargs):
"""Register a mock delete request."""
self.request('delete', *args, **kwargs)
def options(self, *args, **kwargs):
"""Register a mock options request."""
self.request('options', *args, **kwargs)
@property
def call_count(self):
"""Number of requests made."""
return len(self.mock_calls)
@asyncio.coroutine
def match_request(self, method, url, *, auth=None):
"""Match a request against pre-registered requests."""
for response in self._mocks:
if response.match_request(method, url):
self.mock_calls.append((method, url))
return response
assert False, "No mock registered for {} {}".format(method.upper(),
url)
class AiohttpClientMockResponse:
"""Mock Aiohttp client response."""
def __init__(self, method, url, status, response):
"""Initialize a fake response."""
self.method = method
self.url = url
self.status = status
self.response = response
def match_request(self, method, url):
"""Test if response answers request."""
return method == self.method and url == self.url
@asyncio.coroutine
def read(self):
"""Return mock response."""
return self.response
@asyncio.coroutine
def text(self, encoding='utf-8'):
"""Return mock response as a string."""
return self.response.decode(encoding)
@asyncio.coroutine
def release(self):
"""Mock release."""
pass
@contextmanager
def mock_aiohttp_client():
"""Context manager to mock aiohttp client."""
mocker = AiohttpClientMocker()
with mock.patch('aiohttp.ClientSession') as mock_session:
instance = mock_session()
for method in ('get', 'post', 'put', 'options', 'delete'):
setattr(instance, method,
functools.partial(mocker.match_request, method))
yield mocker
|
mit
| 5,491,227,388,798,299,000 | 27.743363 | 75 | 0.564963 | false |
espressif/esp-idf
|
tools/ci/python_packages/tiny_test_fw/App.py
|
1
|
2939
|
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
class for handling Test Apps. Currently it provides the following features:
1. get SDK path
2. get SDK tools
3. parse application info from its path. for example:
* provide download info
* provide partition table info
Test Apps should inherent from BaseApp class and overwrite the methods.
"""
import os
import sys
import time
# timestamp used for calculate log folder name
LOG_FOLDER_TIMESTAMP = time.time()
class BaseApp(object):
"""
Base Class for App.
Defines the mandatory methods that App need to implement.
Also implements some common methods.
:param app_path: the path for app.
:param config_name: app configuration to be tested
:param target: build target
"""
def __init__(self, app_path, config_name=None, target=None):
pass
@classmethod
def get_sdk_path(cls):
"""
get sdk path.
subclass must overwrite this method.
:return: abs sdk path
"""
pass
@classmethod
def get_tools(cls):
"""
get SDK related tools for applications
subclass must overwrite this method.
:return: tuple, abs path of each tool
"""
pass
@classmethod
def get_log_folder(cls, test_suite_name):
"""
By default log folder is ``${SDK_PATH}/TEST_LOGS/${test_suite_name}_${timestamp}``.
The log folder name is consist once start running, ensure all logs of will be put into the same folder.
:param test_suite_name: the test suite name, by default it's the base file name for main module
:return: the log folder path
"""
if not test_suite_name:
test_suite_name = os.path.splitext(os.path.basename(sys.modules['__main__'].__file__))[0]
sdk_path = cls.get_sdk_path()
log_folder = os.path.join(sdk_path, 'TEST_LOGS',
test_suite_name +
time.strftime('_%m%d_%H_%M_%S', time.localtime(LOG_FOLDER_TIMESTAMP)))
if not os.path.exists(log_folder):
os.makedirs(log_folder)
return log_folder
def process_app_info(self):
"""
parse built app info for DUTTool
subclass must overwrite this method.
:return: required info for specific DUTTool
"""
pass
|
apache-2.0
| -2,223,458,540,208,511,200 | 28.989796 | 111 | 0.643076 | false |
OCA/margin-analysis
|
sale_order_margin_percent/tests/test_margin_percent.py
|
1
|
1456
|
# Copyright 2019 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl
from odoo.tests import SavepointCase
class TestSaleMarginPercent(SavepointCase):
def setUp(self):
super().setUp()
self.SaleOrder = self.env['sale.order']
self.product_uom_id = self.ref('uom.product_uom_unit')
self.product_id = self.ref('product.product_product_24')
self.partner_id = self.ref('base.res_partner_4')
def test_sale_margin(self):
""" Test the sale_margin module in Odoo. """
sale_order = self.SaleOrder.create({
'name': 'Test_SO011',
'order_line': [
(0, 0, {
'name': '[CARD] Graphics Card',
'purchase_price': 700.0,
'price_unit': 1000.0,
'product_uom': self.product_uom_id,
'product_uom_qty': 10.0,
'state': 'draft',
'product_id': self.product_id}),
],
'partner_id': self.partner_id,
})
# (1000 - 700)*10 = 3000 - margin
# 1000 * 10 = 10000 - amount untaxed
self.assertEqual(sale_order.percent, 30.00)
sale_order.order_line.price_unit = 1200
sale_order._amount_all()
# (1200 - 700)*10 = 5000 - margin
# 1000 * 10 = 12000 - amount untaxed
self.assertEqual(sale_order.percent, 41.67)
|
agpl-3.0
| -5,844,371,091,095,846,000 | 34.512195 | 64 | 0.524725 | false |
tatsuhirosatou/JMdictDB
|
python/lib/jmxml.py
|
1
|
40347
|
#######################################################################
# This file is part of JMdictDB.
# Copyright (c) 2006-2012 Stuart McGraw
#
# JMdictDB is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License,
# or (at your option) any later version.
#
# JMdictDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with JMdictDB; if not, write to the Free Software Foundation,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#######################################################################
"""
Functions for parsing XML descriptions of entries into
entry objects.
"""
import sys, os, re, datetime
from collections import defaultdict
#import lxml.etree as ElementTree
import xml.etree.cElementTree as ElementTree
import jdb, xmlkw
class ParseError (RuntimeError): pass
class NotFoundError (RuntimeError): pass
def _ent_repl (mo):
# This func is used in re.sub() calls below to replace all
# but the standard xml entities with ordinary text strings.
orig = mo.group(0)
if orig in ('<','>','&','"'): return orig
return orig[1:-1]
class JmdictFile:
# Wrap a standard file object and preprocess the lines being
# read (expectedly by an XML parser) for three purposes:
# 1. Keep track of the file line number (for error messages).
# 2. Since the XML parser doesn't seem to be able to extract
# the JMdict creation date comment (because it is outside
# of the root element), we do it here.
# 3. Build a map of the jmdict entities defined in the DTD.
# 4. Replace jmdict entities with fixed (i.e. non-entity)
# text strings of the same value (e.g., "&v5r;" -> "v5r")
# It is more convinient to work with the entity string
# values than their expanded text values.
def __init__ (self, source):
self.source = source; self.lineno = 0
self.name = None; self.created=None
def read (self, bytes): # 'bytes' argument ignored.
s = self.source.readline(); self.lineno += 1
if self.lineno == 1:
if s[0] == '\uFEFF': s = s[1:]
s = re.sub (r'&[a-zA-Z0-9-]+;', _ent_repl, s)
if self.created is None and self.lineno < 400:
mo = re.search (r'<!-- ([a-zA-Z]+) created: (\d{4})-(\d{2})-(\d{2}) -->', s)
if mo:
self.name = mo.group(1)
self.created = datetime.date (*map(int, mo.group(2,3,4)))
return s
class Jmparser (object):
def __init__ (self,
kw, # A jdb.Kwds object initialized with database
# keywords, such as returned by jdb.dbOpen()
# or jdb.Kwds(jdb.std_csv_dir()).
xkw=None, # A jdb.Kwds object initialized with XML
# keywords, such as returned by xmlmk.make().
# If None, __init__() will get one by calling
# xmlmk.make(kw).
logfile=None): # File object to write warning messages to.
self.KW = kw
if xkw: self.XKW = xkw
else: self.XKW = xmlkw.make (kw)
self.logfile = logfile
self.seq = 0
def parse_entry (self, txt, dtd=None):
# Convert an XML text string into entry objects.
#
# @param txt Text string containing a piece of XML defining
# one of more entry elements.
# @param dtd (optional) A text string providing a DTD to be
# prepended to 'txt'. If not provide no DTD will
# be prepended.
# @returns An list of entry objects.
if dtd: txt = dtd + txt
else:
pat = '&[a-zA-Z0-9-]+;'
if isinstance (txt, bytes): pat = pat.encode ('latin1')
txt = re.sub (pat, _ent_repl, txt)
xo = ElementTree.XML (txt)
if xo is None:
print ("No parse results")
return []
e = self.do_entr (xo, None)
return [e]
def parse_xmlfile (self,
inpf, # (file) An open jmdict/jmnedict XML file..
startseq=None, # (int) Skip until an entry with this seq
# number is seen, or None to start at first
# entry. See also parameters seqnum_init
# and seqnum_incr below.
elimit=None, # (int) Maximum number of entries to process.
xlit=False, # (bool) Extract "lit" info from glosses.
xlang=None, # (list) List of lang id's to limit extracted
# glosses to.
corp_dict=None, # (dict) A mapping that contains corpus (aka
# "kwsrc") records indexed by id number and
# name. <ent_corp> elements will be looked
# up in this dict. If not supplied, it is
# expected that <corpus> elements will occur
# in the XML that define corpora before they
# are referenced by <ent_corp> elements.
grpdefs=None, # (dict) A mapping that contains grpdef (aka
# "kwgrp") records indexed by id number and
# name. <group> elements will be looked
# up in this dict. If not supplied, it is
# expected that <grpdef> elements will occur
# in the XML that define corpora before they
# are referenced by <group> elements.
toptag=False, # (bool) Make first item returned by iterator
# a string giving the name of the top-level
# element.
seqnum_init=1, # If an entry does not contain a <ent_seq> tag
seqnum_incr=1): # giving its sequence number, calculate a
# seq number for it from the formula:
# seqnum_init + (seqnum_incr * entrnum)
# where entrnum is the ordinal position of
# the entry in the file, starting at 0. For
# example, to get jmdict-like sequence numbers
# use seqnum_init=1000000 and seqnum_incr=10.
etiter = iter(ElementTree.iterparse( inpf, ("start","end")))
event, root = next(etiter)
if toptag: yield 'root', root.tag
if corp_dict is None: corp_dict = {}
if grpdefs is None: grpdefs = {}
elist=[]; count=0; entrnum=0
for event, elem in etiter:
if elem.tag not in ['entry', 'grpdef', 'corpus']: continue
if event == "start":
lineno = getattr (inpf, 'lineno', None)
if elem.tag == 'entry': entrnum += 1
continue
# At this point elem.tag must be either 'entry', 'corpus', or
# 'grpdef', and event is 'end'.
if elem.tag == 'grpdef':
grpdef = self.do_grpdef (elem)
grpdefs[grpdef.id] = grpdefs[grpdef.kw] = grpdef
yield "grpdef", grpdef
continue
if elem.tag == 'corpus':
corp = self.do_corpus (elem)
corp_dict[corp.id] = corp_dict[corp.kw] = corp
yield "corpus", corp
continue
# From this point on elem.tag is 'entr'...
prevseq = self.seq
# Old-style (pre 2014-10) jmnedict xml does not have "ent_seq"
# elements so we will generate a synthetic seq_number based on
# the ordinal position of the entry in the file ('entrnum').
self.seq = seq = int (elem.findtext ('ent_seq')
or ((entrnum-1) * seqnum_incr + seqnum_init))
if prevseq and seq <= prevseq:
self.warn (" (line %d): Sequence less than preceeding sequence" % lineno)
if not startseq or seq >= startseq:
startseq = None
try: entr = self.do_entr (elem, seq, xlit, xlang, corp_dict, grpdefs)
except ParseError as e:
self.warn (" (line %d): %s" % (lineno, e))
else: yield "entry", entr
count += 1
if elimit and count >= elimit: break
root.clear()
def do_corpus (self, elem):
o = jdb.Obj (id=int(elem.get('id')), kw=elem.findtext ('co_name'))
descr = elem.findtext ('co_descr')
if descr: o.descr = descr
dt = elem.findtext ('co_date')
if dt: o.dt = dt
notes = elem.findtext ('co_notes')
if notes: o.notes = notes
sname = elem.findtext ('co_sname')
if sname: o.seq = sname
sinc = elem.findtext ('co_sinc')
if sinc: o.sinc = sinc
smin = elem.findtext ('co_smin')
if smin: o.smin = smin
smax = elem.findtext ('co_smax')
if smax: o.smax = smax
return o
def do_grpdef (self, elem):
o = jdb.Obj (id=int(elem.get('id')), kw=elem.findtext ('gd_name'))
descr = elem.findtext ('gd_descr')
if descr: o.descr = descr
return o
def do_entr (self, elem, seq, xlit=False, xlang=None, corp_dict=None, grpdefs=None):
"""
Create an entr object from a parsed ElementTree entry
element, 'elem'. 'lineno' is the source file line number
of the "<entry>" line or None and is only used in error
messages.
Note that the entry object returned is different from one
read from the database in the following respects:
* The 'entr' record will have no .src (aka corpus) attribute
if there is no <ent_corp> element in the entry. In this
case the .src attribute is expected to be added by the
caller. If there is a <ent_corp> element, it will be
used to find a corpus in 'corp_dict', which in turn will
will provide an id number used in .src.
* Items in sense's _xref list are unresolved xrefs, not
resolved xrefs as in a database entr object.
jdb.resolv_xref() or similar can be used to resolve the
xrefs.
* Attributes will be missing if the corresponding xml
information is not present. For example, if a particular
entry has no <ke_ele> elements, the entr object will not
have a '._kanj' attribute. In an entr object read from
the database, it will have a '._kanj' attribute with a
value of [].
* The entr object does not have many of the foreign key
attributes: gloss.gloss, xref.xref, <anything>.entr, etc.
However, it does have rdng.rdng, kanj.kanj, and sens.sens
attributes since these are required when adding restr,
stagr, stagk, and freq objects.
"""
XKW, KW = self.XKW, self.KW
entr = jdb.Entr ()
if not seq:
elemseq = elem.find ('ent_seq')
if elemseq is None: raise ParseError ("No <ent_seq> element found")
try: seq = int (elemseq.text)
except ValueError: raise ParseError ("Invalid 'ent_seq' value, '%s'" % elem.text)
if seq <= 0: raise ParseError ("Invalid 'ent_seq' value, '%s'" % elem.text)
entr.seq = seq
id = elem.get('id')
if id is not None: entr.id = int (id)
dfrm = elem.get('dfrm')
if dfrm is not None: entr.dfrm = int (dfrm)
stat = elem.get('status') or jdb.KW.STAT['A'].id
try: stat = XKW.STAT[stat].id
except KeyError: raise ParseError ("Invalid <status> element value, '%s'" % stat)
entr.stat = stat
entr.unap = elem.get('appr') == 'n'
corpname = elem.findtext('ent_corp')
if corpname is not None: entr.src = corp_dict[corpname].id
fmap = defaultdict (lambda:([],[]))
self.do_kanjs (elem.findall('k_ele'), entr, fmap)
self.do_rdngs (elem.findall('r_ele'), entr, fmap)
if fmap:
freq_errs = jdb.make_freq_objs (fmap, entr)
for x in freq_errs:
typ, r, k, kw, val = x
kwstr = XKW.FREQ[kw].kw + str (val)
self.freq_warn (typ, r, k, kwstr)
self.do_senss (elem.findall('sense'), entr, xlit, xlang)
self.do_senss (elem.findall('trans'), entr, xlit, xlang)
self.do_info (elem.findall("info"), entr)
self.do_audio (elem.findall("audio"), entr, jdb.Entrsnd)
self.do_groups(elem.findall("group"), entr, grpdefs)
return entr
def do_info (self, elems, entr):
if not elems: return
elem = elems[0] # DTD allows only one "info" element.
x = elem.findtext ('srcnote')
if x: entr.srcnote = x
x = elem.findtext ('notes')
if x: entr.notes = x
self.do_hist (elem.findall("audit"), entr)
def do_kanjs (self, elems, entr, fmap):
if elems is None: return
kanjs = []; dupchk = {}
for ord, elem in enumerate (elems):
txt = elem.find('keb').text
if not jdb.unique (txt, dupchk):
self.warn ("Duplicate keb text: '%s'" % txt); continue
if not (jdb.jstr_keb (txt)):
self.warn ("keb text '%s' not kanji." % txt)
kanj = jdb.Kanj (kanj=ord+1, txt=txt)
self.do_kws (elem.findall('ke_inf'), kanj, '_inf', 'KINF')
for x in elem.findall ('ke_pri'):
freqtuple = self.parse_freq (x.text, "ke_pri")
if not freqtuple: continue
klist = fmap[freqtuple][1]
if not jdb.isin (kanj, klist): klist.append (kanj)
else: self.freq_warn ("Duplicate", None, kanj, x.text)
kanjs.append (kanj)
if kanjs: entr._kanj = kanjs
def do_rdngs (self, elems, entr, fmap):
if elems is None: return
rdngs = getattr (entr, '_rdng', [])
kanjs = getattr (entr, '_kanj', [])
rdngs = []; dupchk = {}
for ord, elem in enumerate (elems):
txt = elem.find('reb').text
if not jdb.unique (txt, dupchk):
self.warn ("Duplicate reb text: '%s'" % txt); continue
if not jdb.jstr_reb (txt):
self.warn ("reb text '%s' not kana." % txt)
rdng = jdb.Rdng (rdng=ord+1, txt=txt)
self.do_kws (elem.findall('re_inf'), rdng, '_inf', 'RINF')
for x in elem.findall ('re_pri'):
freqtuple = self.parse_freq (x.text, "re_pri")
if not freqtuple: continue
rlist = fmap[freqtuple][0]
if not jdb.isin (rdng, rlist): rlist.append (rdng)
else: self.freq_warn ("Duplicate", rdng, None, x.text)
nokanji = elem.find ('re_nokanji')
self.do_restr (elem.findall('re_restr'), rdng, kanjs, 'restr', nokanji)
self.do_audio (elem.findall("audio"), rdng, jdb.Rdngsnd)
rdngs.append (rdng)
if rdngs: entr._rdng = rdngs
def do_senss (self, elems, entr, xlit=False, xlang=None):
XKW = self.XKW
rdngs = getattr (entr, '_rdng', [])
kanjs = getattr (entr, '_kanj', [])
senss = []; last_pos = None
for ord, elem in enumerate (elems):
sens = jdb.Sens (sens=ord+1)
snotes = elem.find ('s_inf')
if snotes is not None and snotes.text: sens.notes = snotes.text
pelems = elem.findall('pos')
if pelems:
last_pos = self.do_kws (pelems, sens, '_pos', 'POS')
elif last_pos:
sens._pos = [jdb.Pos(kw=x.kw) for x in last_pos]
self.do_kws (elem.findall('name_type'), sens, '_misc', 'NAME_TYPE')
self.do_kws (elem.findall('misc'), sens, '_misc', 'MISC')
self.do_kws (elem.findall('field'), sens, '_fld', 'FLD')
self.do_kws (elem.findall('dial'), sens, '_dial', 'DIAL')
self.do_lsrc (elem.findall('lsource'), sens,)
self.do_gloss (elem.findall('gloss'), sens, xlit, xlang)
self.do_gloss (elem.findall('trans_det'), sens,)
self.do_restr (elem.findall('stagr'), sens, rdngs, 'stagr')
self.do_restr (elem.findall('stagk'), sens, kanjs, 'stagk')
self.do_xref (elem.findall('xref'), sens, jdb.KW.XREF['see'].id)
self.do_xref (elem.findall('ant'), sens, jdb.KW.XREF['ant'].id)
if not getattr (sens, '_gloss', None):
self.warn ("Sense %d has no glosses." % (ord+1))
senss.append (sens)
if senss: entr._sens = senss
def do_gloss (self, elems, sens, xlit=False, xlang=None):
XKW = self.XKW
glosses=[]; lits=[]; lsrc=[]; dupchk={}
for elem in elems:
lng = elem.get ('{http://www.w3.org/XML/1998/namespace}lang')
try: lang = XKW.LANG[lng].id if lng else XKW.LANG['eng'].id
except KeyError:
self.warn ("Invalid gloss lang attribute: '%s'" % lng)
continue
txt = elem.text
if not jdb.jstr_gloss (txt):
self.warn ("gloss text '%s' not latin characters." % txt)
lit = []
if xlit and ('lit:' in txt):
txt, lit = extract_lit (txt)
if not jdb.unique ((lang,txt), dupchk):
self.warn ("Duplicate lang/text in gloss '%s'/'%s'" % (lng, txt))
continue
# (entr,sens,gloss,lang,txt)
if txt and (not xlang or lang in xlang):
glosses.append (jdb.Gloss (lang=lang, ginf=XKW.GINF['equ'].id, txt=txt))
if lit:
lits.extend ([jdb.Gloss (lang=lang, ginf=XKW.GINF['lit'].id, txt=x) for x in lit])
if glosses or lits:
if not hasattr (sens, '_gloss'): sens._gloss = []
sens._gloss.extend (glosses + lits)
def do_lsrc (self, elems, sens):
lsrc = [];
for elem in elems:
txt = elem.text or ''
lng = elem.get ('{http://www.w3.org/XML/1998/namespace}lang')
try: lang = self.XKW.LANG[lng].id if lng else self.XKW.LANG['eng'].id
except KeyError:
self.warn ("Invalid lsource lang attribute: '%s'" % lng)
continue
lstyp = elem.get ('ls_type')
if lstyp and lstyp != 'part':
self.warn ("Invalid lsource type attribute: '%s'" % lstyp)
continue
wasei = elem.get ('ls_wasei') is not None
if (lstyp or wasei) and not txt:
attrs = ("ls_wasei" if wasei else '') \
+ ',' if wasei and lstyp else '' \
+ ("ls_type" if lstyp else '')
self.warn ("lsource has attribute(s) %s but no text" % attrs)
lsrc.append (jdb.Lsrc (lang=lang, txt=txt, part=lstyp=='part', wasei=wasei))
if lsrc:
if not hasattr (sens, '_lsrc'): sens._lsrc = []
sens._lsrc.extend (lsrc)
def do_xref (self, elems, sens, xtypkw):
# Create a xresolv record for each xml <xref> element. The xref
# may contain a kanji string, kana string, or kanji.\x{30fb}kana.
# (\x{30fb} is a mid-height dot.) It may optionally be followed
# by a \x{30fb} and a sense number.
# Since jmdict words may also contain \x{30fb} as part of their
# kanji or reading text we try to handle that by ignoring the
# \x{30fb} between two kana strings, two kanji strings, or a
# kana\x{30fb}kanji string. Of course if a jmdict word is
# kanji\x{30fb}kana then we're out of luck; it's ambiguous.
xrefs = []
for elem in elems:
txt = elem.text
# Split the xref text on the separator character.
frags = txt.split ("\u30fb")
# Check for a sense number in the rightmost fragment.
# But don't treat it as a sense number if it is the
# only fragment (which will leave us without any kana
# or kanji text which will fail when loading xresolv.
snum = None
if len (frags) > 0 and frags[-1].isdigit():
snum = int (frags.pop())
# Go through all the fragments, from right to left.
# For each, if it has no kanji, push it on the @rlst
# list. If it has kanji, and every fragment thereafter
# regardless of its kana/kanji status, push on the @klst
# list. $kflg is set to true when we see a kanji word
# to make that happen.
# We could do more checking here (that entries going
# into @rlst are kana for, example) but don't bother
# since, as long as the data loads into xresolv ok,
# wierd xrefs will be found later by being unresolvable.
klst=[]; rlst=[]; kflg=False
for frag in reversed (frags):
if not kflg: jtyp = jdb.jstr_classify (frag)
if kflg or jtyp & jdb.KANJI:
klst.append (frag)
kflg = True
else: rlst.append (frag)
# Put the kanji and kana parts back together into
# strings, and write the xresolv record.
ktxt = "\u30fb".join (reversed (klst)) or None
rtxt = "\u30fb".join (reversed (rlst)) or None
if ktxt or rtxt:
xrefs.append (jdb.Xrslv (typ=xtypkw, ktxt=ktxt, rtxt=rtxt, tsens=snum))
if xrefs:
for n, x in enumerate (xrefs): x.ord = n + 1
if not hasattr (sens, '_xrslv'): sens._xrslv = []
sens._xrslv.extend (xrefs)
def do_hist (self, elems, entr):
XKW = self.XKW
hists = []
for elem in elems:
x = elem.findtext ("upd_date")
dt = datetime.datetime (*([int(z) for z in x.split ('-')] + [0, 0, 0]))
o = jdb.Hist (dt=dt)
stat = elem.findtext ("upd_stat")
unap = elem.find ("upd_unap")
notes = elem.findtext ("upd_detl")
name = elem.findtext ("upd_name")
email = elem.findtext ("upd_email")
diff = elem.findtext ("upd_diff")
refs = elem.findtext ("upd_refs")
o.stat = XKW.STAT[stat].id if stat else XKW.STAT['A'].id
o.unap = unap is not None
o.name = name if name else None
o.email = email if email else None
o.notes = notes if notes else None
o.refs = refs if refs else None
o.diff = diff if diff else None
hists.append (o)
if hists:
if not hasattr (entr, '_hist'): entr._hist = []
entr._hist.extend (hists)
def do_groups (self, elems, entr, grpdefs):
grps = []
for elem in elems:
txt = elem.text
try: grpdefid = grpdefs[txt].id
except KeyError:
self.warn ("Unrecognised group text '%s'." % txt)
continue
ordtxt = elem.get ('ord')
if not ordtxt:
self.warn ("Missing group 'ord' attribute on group element '%s'." % (txt))
continue
try: ord = int (ordtxt)
except (ValueError, TypeError):
self.warn ("Invalid 'ord' attribute '%s' on group element '%s'." % (ordtxt, txt))
continue
grps.append (jdb.Grp (kw=grpdefid, ord=ord))
if grps:
if not getattr (entr, '_grp', None): entr._grp = grps
else: exnt._grps.extend (grps)
def do_audio (self, elems, entr_or_rdng, sndclass):
snds = []
for n, elem in enumerate (elems):
v =elem.get ('clipid')
if not v:
self.warn ("Missing audio clipid attribute."); continue
try: clipid = int (v.lstrip('c'))
except (ValueError, TypeError): self.warn ("Invalid audio clipid attribute: %s" % v)
else:
snds.append (sndclass (snd=clipid, ord=n+1))
if snds:
if not hasattr (entr_or_rdng, '_snd'): entr_or_rdng._snd = []
entr_or_rdng._snd.extend (snds)
def do_kws (self, elems, obj, attr, kwtabname):
"""
Extract the keywords in the elementtree elements 'elems',
resolve them in kw table 'kwtabname', and append them to
the list attached to 'obj' named 'attr'.
"""
XKW = self.XKW
if elems is None or len(elems) == 0: return None
kwtab = getattr (XKW, kwtabname)
kwtxts, dups = jdb.rmdups ([x.text for x in elems])
try: cls = getattr (jdb, kwtabname.capitalize())
except AttributeError: cls = jdb.Obj
kwrecs = []
for x in kwtxts:
try: kw = kwtab[x].id
except KeyError:
self.warn ("Unknown %s keyword '%s'" % (kwtabname,x))
else:
kwrecs.append (cls (kw=kw))
dups, x = jdb.rmdups (dups)
for x in dups:
self.warn ("Duplicate %s keyword '%s'" % (kwtabname, x))
if kwrecs:
if not hasattr (obj, attr): setattr (obj, attr, [])
getattr (obj, attr).extend (kwrecs)
return kwrecs
def do_restr (self, elems, rdng, kanjs, rtype, nokanji=None):
"""
The function can be used to process stagr and stagk restrictions
in addition to re_restr restrictions, but for simplicity, code
comments and variable names assume re_restr processing.
elems -- A list of 're_restr' xml elements (may be empty).
rtype -- One of: 'restr', 'stgr', stagk', indicating the type
of restrictions being processed.
rdng -- If 'rtype' is "restr", a Rdng object, otherwise a Sens
object.
kanjs -- If 'rtype' is "restr" or ""stagk", the entry's list
of Kanj objects. Otherwise, the entry's list of Rdng
objects.
nokanji -- True if the rtype in "restr" and the reading has a
<no_kanji> element, false otherwise.
Examples:
To use for restr restictions:
do_restrs (restr_elems, entr._rdng, entr._kanj, "restr", nokanji)
or stagr restictions:
do_restrs (stagr_elems, entr._sens, entr._rdng, "stagr")
or stagk restrictions:
do_restrs (stagk_elems, entr._sens, entr._kanj, "stagk")
"""
if rtype == 'restr': rattr, kattr, pattr = 'rdng', 'kanj', '_restr'
elif rtype == 'stagr': rattr, kattr, pattr = 'sens', 'rdng', '_stagr'
elif rtype == 'stagk': rattr, kattr, pattr = 'sens', 'kanj', '_stagk'
# Warning, do not replace the 'nokanji is None' tests below
# with 'not nokanji'. 'nokanji' may be an elementtree element
# which can be False, even if not None. (See the element tree
# docs.)
if not elems and nokanji is None: return
if elems and nokanji is not None:
self.warn ("Conflicting 'nokanji' and 're_restr' in reading %d." % rdng.rdng)
if nokanji is not None: allowed_kanj = []
else:
allowed_kanj, dups = jdb.rmdups ([x.text for x in elems])
if dups:
self.warn ("Duplicate %s item(s) %s in %s %d."
% (pattr[1:], "'"+"','".join(dups)+"'",
rattr, getattr (rdng,rattr)))
for kanj in kanjs:
if kanj.txt not in allowed_kanj:
jdb.add_restrobj (rdng, rattr, kanj, kattr, pattr)
def parse_freq (self, fstr, ptype):
# Convert a re_pri or ke_pri element string (e.g "nf30") into
# numeric (id,value) pair (like 4,30) (4 is the id number of
# keyword "nf" in the database table "kwfreq", and we get it
# by looking it up in JM2ID (from jmdictxml.pm). In addition
# to the id,value pair, we also return keyword string.
# $ptype is a string used only in error or warning messages
# and is typically either "re_pri" or "ke_pri".
XKW = self.XKW
mo = re.match (r'^([a-z]+)(\d+)$', fstr)
if not mo:
self.warn ("Invalid %s, '%s'" % (ptype, fstr))
return None
kwstr, val = mo.group (1,2)
try: kw = XKW.FREQ[kwstr].id
except KeyError:
self.warn ("Unrecognised %s, '%s'" % (ptype, fstr))
return None
val = int (val)
#FIXME -- check for invalid values in 'val'.
return kw, val
def freq_warn (self, warn_type, r, k, kwstr):
tmp = []
if r: tmp.append ("reading %s" % r.txt)
if k: tmp.append ("kanji %s" % k.txt)
self.warn ("%s pri value '%s' in %s"
% (warn_type, kwstr, ', '.join (tmp)))
def warn (self, msg):
print ("Seq %d: %s" % (self.seq, msg),
file=self.logfile or sys.stderr)
def extract_lit (txt):
"""
Extract literal gloss text from a gloss text string, 'txt'.
"""
t = re.sub (r'^lit:\s*', '', txt)
if len(t) != len(txt): return '', [t]
# The following regex will match substrings like "(lit: xxxx)".
# The "xxxx" part may have parenthesised text but not nested.
# Thus, "lit: foo (on you) dude" will be correctly parsed, but
# "lit: (foo (on you)) dude" won't.
regex = r'\((lit):\s*((([^()]+)|(\([^)]+\)))+)\)'
start = 0; gloss=[]; lit=[]
for mo in re.finditer(regex, txt):
gtyp, special = mo.group(1,2)
brk, end = mo.span(0)
if brk - start > 0: gloss.append (txt[start:brk].strip())
lit.append (special.strip())
start = end
t = txt[start:len(txt)].strip()
if t: gloss.append (t)
gloss = ' '.join(gloss)
return gloss, lit
def crossprod (*args):
"""
Return the cross product of an arbitrary number of lists.
"""
# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/159975
result = [[]]
for arg in args:
result = [x + [y] for x in result for y in arg]
return result
def extract (fin, seqs_wanted, dtd=False, fullscan=False, keepends=False):
"""
Returns an iterator that will return the text lines in
xml file 'fin' for the entries given in 'seqs_wanted'.
Each call (of the iterator's next() method) will return
a 2-tuple: the first item is the seq number of the entry
and the second item is list of text lines the comprise
the entry. The lines are exactly as read from 'fin'
(i.e. if 'fin'in a standard file object, the lines
will have the encoding of the file (typically utf-8)
and contain line terminators ('\n'). Entries are returned
in the order they are encountered in the input file,
regardless of the order given in 'seq_wanted'. Comments
within an entry will returned as part of that entry, but
comments between entries are inaccessible.
If the 'dtd' parameter is true, the first call will return
a 2-tuple whose first item is a string naming the root tag
(typically "JMdict" or JMnedict"; it is needed by the caller
so that a correct closing tag can be written), and a list
of the lines of the input file's DTD.
Note that this function does *not* actually parse the xml;
it relies on the lexical characteristics of the jmdict and
jmnedict files (an entry tag occurs alone on a line, that
an ent_seq element is on a single line, etc) for speed.
If the format of the jmdict files changes, it is likely
that this will fail or return erroneous results.
TO-DO: document those assumtions.
If a requested seq number is not found, a NotFoundError will
be raised after all the found entries have been returned.
fin -- Open file object for the xml file to use.
seq_wanted -- A list of intermixed jmdict seq numbers or
seq number/count pairs (tuple or list). The seq number
value indentifies the entry to return. The count value
gives the number of successive entries including seq
number to return. If the count is not given, 1 is
assumed. Entries will be returned in the order found,
not the order they occur in 'seq-wanted'.
dtd -- If true, the first returned value will be a 2-tuple.
The first item in it will be a list containng the text
lines of the DTD in the input file (if any). The second
item is a single text string that is the line containing
the root element (will be "<JMdict>\n" for a standard
JMdict file, or "<JMnedict>\n" to the entries extracted.
fullscan -- Normally this function assumes the input file
entries are in ascending seq number order, and after it
sees a sequence number greater that the highest seq number
in 'seq_wanted', it will stop scanning and report any
unfound seq numbers. If the input file entries are not
ordered, it may be necessary to use 'fullscan' to force
scanning to the end of the file to find all the requested
entries. This can take a long time for large files.
keepends -- retain any trailing whitespace.
"""
# Break the seqs_wanted listed into two lists: a list of
# the sequence numbers, sorted in ascending order, and a
# equal length list of the corresponding counts. The
# try/except below is to catch the failure of "len(s)"
# which will happen if 's' is a seq number rather than
# a (seq-number, count) pair.
tmp = []
for s in seqs_wanted:
try:
if len(s) == 2: sv, sc = s
elif len(s) == 1: sv, sc = s[0], 1
else: raise ValueError (s)
except TypeError:
sv, sc = int(s), 1
tmp.append ((sv, sc))
tmp.sort (key=lambda x:x[0])
seqs = [x[0] for x in tmp]; counts = [x[1] for x in tmp]
scanning='in_dtd'; seq=0; lastseq=None; toplev=None;
rettxt = []; count=0;
for line in fin:
# The following "if" clauses are in order of frequency
# of being true for efficiency.
if scanning == 'copy' or scanning == 'nocopy':
if scanning == 'copy':
#FIXME? should we strip() when keepends is true?
if keepends: rettxt.append (line.strip())
else: rettxt.append (line.rstrip())
if line.lstrip().startswith ('</entry>'):
if count <= 0 and (not seqs or (seqs[-1] < seq and not fullscan)): break
if count > 0:
yield seq, rettxt; rettxt = []
scanning = 'between_entries'
lastseq = seq
elif scanning == 'between_entries':
if line.lstrip().startswith ('<entry>'):
entryline = line
scanning = 'in_entry'
elif scanning == 'in_entry':
ln = line.lstrip()
if ln.startswith ('<ent_seq>'):
n = ln.find ('</ent_seq>')
if n < 0: raise IOError ('Invalid <ent_seq> element, line %d', lnnum)
seq = int (ln[9:n])
else:
seq += 1 # Old-style (pre 2014-10) JMnedict has no seq
# numbers, so just count entries.
count = wanted (seq, seqs, counts, count)
if count > 0:
if keepends:
rettxt.append (entryline)
rettxt.append (line)
else:
rettxt.append (entryline.rstrip())
rettxt.append (line.rstrip())
scanning = 'copy'
else: scanning = 'nocopy'
elif scanning == 'in_dtd':
if dtd:
if keepends: rettxt.append (line)
else: rettxt.append (line.rstrip())
ln = line.strip()
if ln.lstrip() == "]>":
scanning = 'after_dtd'
elif scanning == 'after_dtd':
ln = line.strip()
if len(ln) > 2 and ln[0] == '<' and ln[1] != '!':
if dtd:
toplev = line.strip()[1:-1]
yield toplev, rettxt; rettxt = []
scanning = 'between_entries'
else:
raise ValueError (scanning)
if seqs:
raise NotFoundError ("Sequence numbers not found", seqs)
def wanted (seq, seqs, counts, count):
""" Helper function for extract().
Return the number of entries to copy."""
if count > 0: count -= 1
s = 0
for n, s in enumerate (seqs):
if s >= seq: break
if s == seq:
count = max (counts[n], count)
del seqs[n]; del counts[n]
return count
def parse_sndfile (
inpf, # (file) An open sound clips XML file..
toptag=False): # (bool) Make first item retuned by iterator
# a string giving the name of the top-level
# element.
etiter = iter(ElementTree.iterparse( inpf, ("start","end")))
event, root = next(etiter)
vols = []
for event, elem in etiter:
tag = elem.tag
if tag not in ('avol','asel','aclip'): continue
if event == 'start': lineno = inpf.lineno; continue
if tag == 'aclip': yield do_clip (elem), 'clip', lineno
elif tag == 'asel': yield do_sel (elem), 'sel', lineno
elif tag == 'avol': yield do_vol (elem), 'vol', lineno
def do_vol (elem):
return jdb.Sndvol (id=int(elem.get('id')[1:]),
title=elem.findtext('av_title'), loc=elem.findtext('av_loc'),
type=elem.findtext('av_type'), idstr=elem.findtext('av_idstr'),
corp=elem.findtext('av_corpus'), notes=elem.findtext('av_notes'))
def do_sel (elem):
return jdb.Sndfile (id=int(elem.get('id')[1:]), vol=int(elem.get('vol')[1:]),
title=elem.findtext('as_title'), loc=elem.findtext('as_loc'),
type=elem.findtext('as_type'), notes=elem.findtext('as_notes'))
def do_clip (elem):
return jdb.Snd (id=int(elem.get('id')[1:]), file=int(elem.get('sel')[1:]),
strt=int(elem.findtext('ac_strt')), leng=int(elem.findtext('ac_leng')),
trns=elem.findtext('ac_trns'), notes=elem.findtext('ac_notes'))
def main (args, opts):
jdb.KW = KW = kw.Kwds (kw.std_csv_dir())
KW.__dict__.update (kw.short_vars (KW))
jmparser = Jmparser (kW)
if len(args) >= 1:
inpf = JmdictFile( open( args[0], encoding='utf-8' ))
for tag,entr in jmparser.parse_xmlfile (inpf, xlit=1):
import fmt
print (fmt.entr (entr))
else:
print ("""
No argument given. Please supply the name of a file containing
test entries that will be read, formatted and printed. The entries
must be enclosed in a root element (e.g. <JMdict>...</JMdict>) but
a DTD is not necessary.""", file=sys.stderr)
if __name__ == '__main__': main (sys.argv[1:], None)
|
gpl-2.0
| -779,504,099,350,805,900 | 43.681063 | 98 | 0.534934 | false |
ctogle/make_places
|
mp/make_places/blueprints.py
|
1
|
6334
|
#import make_places.scenegraph as sg
import make_places.fundamental as fu
import make_places.walls as wa
#import make_places.primitives as pr
import mp_utils as mpu
import mp_bboxes as mpbb
import mp_vector as cv
#from make_places.stairs import ramp
#from make_places.stairs import shaft
#from make_places.floors import floor
#from make_places.walls import wall
#from make_places.walls import perimeter
from math import cos
from math import sin
from math import tan
from math import sqrt
import numpy as np
import random as rm
import pdb
class blueprint(fu.base):
def __init__(self, *args, **kwargs):
pass
class door_plan(blueprint):
def __init__(self, position, **kwargs):
self.position = position
self._default_('width',1,**kwargs)
self._default_('height',3,**kwargs)
class window_plan(blueprint):
def __init__(self, position, **kwargs):
self.position = position
self._default_('width',1,**kwargs)
self._default_('height',2,**kwargs)
self._default_('zoffset',1,**kwargs)
def circumscribe_box(corners,outline):
# move any vertex outside of corners to the nearest edge
#
c1,c2,c3,c4 = corners
l = c2.x - c1.x
w = c4.y - c1.y
print 'totally unfinished'
# walk_line makes an xy outline of a building
def walk_line(corners):
c1,c2,c3,c4 = corners
l = c2.x - c1.x
w = c4.y - c1.y
steps = rm.choice([4,5,6,8])
angle = 360.0/steps
turns = [x*angle for x in range(steps)]
stepl = 0.8*min([l,w])
lengs = [stepl]*steps
start = corners[0].copy()
start.translate(cv.vector(l/10.0,w/10.0,0))
outline = [start]
newz = start.z
current_angle = 0.0
for dex in range(steps-1):
l,t = lengs[dex],turns[dex]
current_angle = t
dx = l*cos(fu.to_rad(current_angle))
dy = l*sin(fu.to_rad(current_angle))
#quadr = fu.quadrant(fu.to_rad(current_angle))
#dxsign = 1.0 if quadr in [1,4] else -1.0
#dysign = 1.0 if quadr in [1,2] else -1.0
dxsign = 1.0
dysign = 1.0
last = outline[-1]
newx = last.x + dx*dxsign
newy = last.y + dy*dysign
new = cv.vector(newx,newy,newz)
outline.append(new)
return circumscribe_box(corners,outline)
def outline_test(center):
c1 = cv.vector(-10,-10,0)
c2 = cv.vector( 10,-10,0)
c3 = cv.vector( 10, 10,0)
c4 = cv.vector(-10, 10,0)
corners = [c1,c2,c3,c4]
cv.translate_coords(corners,center)
outy = walk_line(corners)
return outy
class floor_sector(blueprint):
# a sector is a convex 2d projection of a space to be stitched to other
# sectors to form a building floor plan
# should be able to fill a space as necessary
# a building is made of references to a list of floor_plans
# each floor is pointed to a single plan
# the plans are a chosen to stack appropriately geometrically
def __init__(self, *args, **kwargs):
self.corners = kwargs['corners']
self.sides = range(len(self.corners))
self.side_count = len(self.sides)
self._default_('doorways',[],**kwargs)
blueprint.__init__(self, *args, **kwargs)
def build(self):
print 'BUILD FLOOR SECTOR!'
pieces = []
rid = False
ww = 0.5
h = 4.0
wargs = {
'rid_top_bottom':rid,
'wall_width':ww,
'wall_height':h,
'gaped':False,
}
ccnt = len(self.corners)
for c in range(1,ccnt):
c1 = self.corners[c-1]
c2 = self.corners[c]
pieces.append(wa.wall(c1,c2,**wargs))
c1,c2 = self.corners[-1],self.corners[0]
pieces.append(wall(c1,c2,**wargs))
for dw in self.doorways: print dw
return pieces
def grow(self):
side = rm.choice(self.sides)
d1 = side + 1 if side < self.side_count - 1 else 0
d2 = side
c1 = self.corners[d1]
c2 = self.corners[d2]
c3,c4 = self.extrude(c1,c2)
dpos = cv.midpoint(c1,c2)
dangz = cv.angle_from_xaxis_xy(self.c1c2)
newcorners = [c1,c2,c3,c4]
newdoorways = [door_plan(position = dpos,orientation = dangz)]
sect = floor_sector(
corners = newcorners,
doorways = newdoorways)
return sect
def extrude(self,c1,c2):
c1c2 = cv.v1_v2(c1,c2)
c1c2n = cv.cross(fu.zhat,c1c2).normalize()
eleng = rm.choice([3,4,5])
c1c2n.scale(cv.vector(eleng,eleng,eleng))
c3 = c2.copy().translate(c1c2n)
c4 = c1.copy().translate(c1c2n)
self.c1c2 = c1c2
self.c1c2n = c1c2n
return c3,c4
class floor_plan(blueprint):
# a floor_plan should generate all parameters necessary to make a building
#
def __init__(self, *args, **kwargs):
self._default_('length',10,**kwargs)
self._default_('width',10,**kwargs)
self._default_('entrance',0,**kwargs)
self.sectors = self.divide_space()
def build(self):
for sector in self.sectors: sector.build()
def main_space(self):
l,w = self.length,self.width
sub_length = rm.choice([0.5*l,0.75*l,l])
sub_width = rm.choice([0.5*w,0.75*w,w])
c1 = cv.vector(0,0,0)
c2 = cv.vector(sub_length,0,0)
c3 = cv.vector(sub_length,sub_width,0)
c4 = cv.vector(0,sub_width,0)
corners = [c1,c2,c3,c4]
if self.entrance == 0:
# south side should be flush
offset = cv.vector(0,0,0)
[c.translate(offset) for c in corners]
dpos = cv.midpoint(c1,c2)
dangz = 0.0
doorways = [door_plan(position = dpos,orientation = dangz)]
else:
print 'nonzero entrance value non implemented!'
sect = floor_sector(corners = corners,doorways = doorways)
#self.doorways = doorways
self.front_door = doorways[0]
return sect
def divide_space(self):
sections = rm.choice([1,2,3])
sectors = [self.main_space()]
for sect in range(sections):
sectors.append(sectors[-1].grow())
print sect
return sectors
def test_bp():
bp = floor_plan()
bp.build()
|
gpl-2.0
| -4,426,654,233,850,615,000 | 26.419913 | 78 | 0.578623 | false |
mesheven/pyOCD
|
pyocd/debug/breakpoints/manager.py
|
1
|
6475
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2015-2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ...core.target import Target
import logging
##
# @brief
class BreakpointManager(object):
## Number of hardware breakpoints to try to keep available.
MIN_HW_BREAKPOINTS = 0
def __init__(self, core):
self._breakpoints = {}
self._core = core
self._fpb = None
self._providers = {}
def add_provider(self, provider, type):
self._providers[type] = provider
if type == Target.BREAKPOINT_HW:
self._fpb = provider
## @brief Return a list of all breakpoint addresses.
def get_breakpoints(self):
return self._breakpoints.keys()
def find_breakpoint(self, addr):
return self._breakpoints.get(addr, None)
## @brief Set a hardware or software breakpoint at a specific location in memory.
#
# @retval True Breakpoint was set.
# @retval False Breakpoint could not be set.
def set_breakpoint(self, addr, type=Target.BREAKPOINT_AUTO):
logging.debug("set bkpt type %d at 0x%x", type, addr)
# Clear Thumb bit in case it is set.
addr = addr & ~1
in_hw_bkpt_range = (self._fpb is not None) and (self._fpb.can_support_address(addr))
fbp_available = ((self._fpb is not None) and
(self._fpb.available_breakpoints() > 0))
fbp_below_min = ((self._fpb is None) or
(self._fpb.available_breakpoints() <= self.MIN_HW_BREAKPOINTS))
# Check for an existing breakpoint at this address.
bp = self.find_breakpoint(addr)
if bp is not None:
return True
if self._core.memory_map is None:
# No memory map - fallback to hardware breakpoints.
type = Target.BREAKPOINT_HW
is_flash = False
is_ram = False
else:
# Look up the memory type for the requested address.
region = self._core.memory_map.get_region_for_address(addr)
if region is not None:
is_flash = region.is_flash
is_ram = region.is_ram
else:
# No memory region - fallback to hardware breakpoints.
type = Target.BREAKPOINT_HW
is_flash = False
is_ram = False
# Determine best type to use if auto.
if type == Target.BREAKPOINT_AUTO:
# Use sw breaks for:
# 1. Addresses outside the supported FPBv1 range of 0-0x1fffffff
# 2. RAM regions by default.
# 3. Number of remaining hw breaks are at or less than the minimum we want to keep.
#
# Otherwise use hw.
if not in_hw_bkpt_range or is_ram or fbp_below_min:
type = Target.BREAKPOINT_SW
else:
type = Target.BREAKPOINT_HW
logging.debug("using type %d for auto bp", type)
# Revert to sw bp if out of hardware breakpoint range.
if (type == Target.BREAKPOINT_HW) and not in_hw_bkpt_range:
if is_ram:
logging.debug("using sw bp instead because of unsupported addr")
type = Target.BREAKPOINT_SW
else:
logging.debug("could not fallback to software breakpoint")
return False
# Revert to hw bp if region is flash.
if is_flash:
if in_hw_bkpt_range and fbp_available:
logging.debug("using hw bp instead because addr is flash")
type = Target.BREAKPOINT_HW
else:
logging.debug("could not fallback to hardware breakpoint")
return False
# Set the bp.
try:
provider = self._providers[type]
bp = provider.set_breakpoint(addr)
except KeyError:
raise RuntimeError("Unknown breakpoint type %d" % type)
if bp is None:
return False
# Save the bp.
self._breakpoints[addr] = bp
return True
## @brief Remove a breakpoint at a specific location.
def remove_breakpoint(self, addr):
try:
logging.debug("remove bkpt at 0x%x", addr)
# Clear Thumb bit in case it is set.
addr = addr & ~1
# Get bp and remove from dict.
bp = self._breakpoints.pop(addr)
assert bp.provider is not None
bp.provider.remove_breakpoint(bp)
except KeyError:
logging.debug("Tried to remove breakpoint 0x%08x that wasn't set" % addr)
def get_breakpoint_type(self, addr):
bp = self.find_breakpoint(addr)
return bp.type if (bp is not None) else None
def filter_memory(self, addr, size, data):
for provider in [p for p in self._providers.values() if p.do_filter_memory]:
data = provider.filter_memory(addr, size, data)
return data
def filter_memory_unaligned_8(self, addr, size, data):
for provider in [p for p in self._providers.values() if p.do_filter_memory]:
for i, d in enumerate(data):
data[i] = provider.filter_memory(addr + i, 8, d)
return data
def filter_memory_aligned_32(self, addr, size, data):
for provider in [p for p in self._providers.values() if p.do_filter_memory]:
for i, d in enumerate(data):
data[i] = provider.filter_memory(addr + i, 32, d)
return data
def remove_all_breakpoints(self):
for bp in self._breakpoints.values():
bp.provider.remove_breakpoint(bp)
self._breakpoints = {}
self._flush_all()
def _flush_all(self):
# Flush all providers.
for provider in self._providers.values():
provider.flush()
def flush(self):
try:
# Flush all providers.
self._flush_all()
finally:
pass
|
apache-2.0
| -6,488,740,294,680,375,000 | 34 | 96 | 0.586873 | false |
kadamski/func
|
func/minion/modules/func_module.py
|
1
|
4442
|
##
## Copyright 2007, Red Hat, Inc
## see AUTHORS
##
## This software may be freely redistributed under the terms of the GNU
## general public license.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
import inspect
from func import logger
from certmaster.config import read_config, BaseConfig
from func.commonconfig import FuncdConfig
from func.utils import is_public_valid_method
from func.minion.func_arg import * #the arg getter stuff
class FuncModule(object):
# the version is meant to
version = "0.0.0"
api_version = "0.0.0"
description = "No Description provided"
class Config(BaseConfig):
pass
def __init__(self):
config_file = '/etc/func/minion.conf'
self.config = read_config(config_file, FuncdConfig)
self.__init_log()
self.__base_methods = {
# __'s so we don't clobber useful names
"module_version" : self.__module_version,
"module_api_version" : self.__module_api_version,
"module_description" : self.__module_description,
"list_methods" : self.__list_methods,
"get_method_args" : self.__get_method_args,
}
self.__init_options()
def __init_log(self):
log = logger.Logger()
self.logger = log.logger
def __init_options(self):
options_file = '/etc/func/modules/'+self.__class__.__name__+'.conf'
self.options = read_config(options_file, self.Config)
return
def register_rpc(self, handlers, module_name):
# add the internal methods, note that this means they
# can get clobbbered by subclass versions
for meth in self.__base_methods:
handlers["%s.%s" % (module_name, meth)] = self.__base_methods[meth]
# register our module's handlers
for name, handler in self.__list_handlers().items():
handlers["%s.%s" % (module_name, name)] = handler
def __list_handlers(self):
""" Return a dict of { handler_name, method, ... }.
All methods that do not being with an underscore will be exposed.
We also make sure to not expose our register_rpc method.
"""
handlers = {}
for attr in dir(self):
if self.__is_public_valid_method(attr):
handlers[attr] = getattr(self, attr)
return handlers
def __list_methods(self):
return self.__list_handlers().keys() + self.__base_methods.keys()
def __module_version(self):
return self.version
def __module_api_version(self):
return self.api_version
def __module_description(self):
return self.description
def __is_public_valid_method(self,attr):
return is_public_valid_method(self, attr, blacklist=['register_rpc', 'register_method_args'])
def __get_method_args(self):
"""
Gets arguments with their formats according to ArgCompatibility
class' rules.
@return : dict with args or Raise Exception if something wrong
happens
"""
tmp_arg_dict = self.register_method_args()
#if it is not implemeted then return empty stuff
if not tmp_arg_dict:
return {}
#see if user tried to register an not implemented method :)
for method in tmp_arg_dict.iterkeys():
if not hasattr(self,method):
raise NonExistingMethodRegistered("%s is not in %s "%(method,self.__class__.__name__))
#create argument validation instance
self.arg_comp = ArgCompatibility(tmp_arg_dict)
#see if all registered arguments are there
for method in tmp_arg_dict.iterkeys():
self.arg_comp.is_all_arguments_registered(self,method,tmp_arg_dict[method]['args'])
#see if the options that were used are OK..
self.arg_comp.validate_all()
return tmp_arg_dict
def register_method_args(self):
"""
That is the method where users should override in their
modules according to be able to send their method arguments
to the Overlord. If they dont have it nothing breaks
just that one in the base class is called
@return : empty {}
"""
# to know they didnt implement it
return {}
|
gpl-2.0
| -3,535,112,327,350,250,500 | 32.651515 | 102 | 0.615714 | false |
jeremiahyan/odoo
|
addons/survey/tests/test_survey_flow.py
|
1
|
5648
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.survey.tests import common
from odoo.tests import tagged
from odoo.tests.common import HttpCase
@tagged('-at_install', 'post_install', 'functional')
class TestSurveyFlow(common.TestSurveyCommon, HttpCase):
def _format_submission_data(self, page, answer_data, additional_post_data):
post_data = {}
post_data['page_id'] = page.id
for question_id, answer_vals in answer_data.items():
question = page.question_ids.filtered(lambda q: q.id == question_id)
post_data.update(self._prepare_post_data(question, answer_vals['value'], post_data))
post_data.update(**additional_post_data)
return post_data
def test_flow_public(self):
# Step: survey manager creates the survey
# --------------------------------------------------
with self.with_user('survey_manager'):
survey = self.env['survey.survey'].create({
'title': 'Public Survey for Tarte Al Djotte',
'access_mode': 'public',
'users_login_required': False,
'questions_layout': 'page_per_section',
})
# First page is about customer data
page_0 = self.env['survey.question'].create({
'is_page': True,
'sequence': 1,
'title': 'Page1: Your Data',
'survey_id': survey.id,
})
page0_q0 = self._add_question(
page_0, 'What is your name', 'text_box',
comments_allowed=False,
constr_mandatory=True, constr_error_msg='Please enter your name', survey_id=survey.id)
page0_q1 = self._add_question(
page_0, 'What is your age', 'numerical_box',
comments_allowed=False,
constr_mandatory=True, constr_error_msg='Please enter your name', survey_id=survey.id)
# Second page is about tarte al djotte
page_1 = self.env['survey.question'].create({
'is_page': True,
'sequence': 4,
'title': 'Page2: Tarte Al Djotte',
'survey_id': survey.id,
})
page1_q0 = self._add_question(
page_1, 'What do you like most in our tarte al djotte', 'multiple_choice',
labels=[{'value': 'The gras'},
{'value': 'The bette'},
{'value': 'The tout'},
{'value': 'The regime is fucked up'}], survey_id=survey.id)
# fetch starting data to check only newly created data during this flow
answers = self.env['survey.user_input'].search([('survey_id', '=', survey.id)])
answer_lines = self.env['survey.user_input.line'].search([('survey_id', '=', survey.id)])
self.assertEqual(answers, self.env['survey.user_input'])
self.assertEqual(answer_lines, self.env['survey.user_input.line'])
# Step: customer takes the survey
# --------------------------------------------------
# Customer opens start page
r = self._access_start(survey)
self.assertResponse(r, 200, [survey.title])
# -> this should have generated a new answer with a token
answers = self.env['survey.user_input'].search([('survey_id', '=', survey.id)])
self.assertEqual(len(answers), 1)
answer_token = answers.access_token
self.assertTrue(answer_token)
self.assertAnswer(answers, 'new', self.env['survey.question'])
# Customer begins survey with first page
r = self._access_page(survey, answer_token)
self.assertResponse(r, 200)
self.assertAnswer(answers, 'new', self.env['survey.question'])
csrf_token = self._find_csrf_token(r.text)
r = self._access_begin(survey, answer_token)
self.assertResponse(r, 200)
# Customer submit first page answers
answer_data = {
page0_q0.id: {'value': ['Alfred Poilvache']},
page0_q1.id: {'value': ['44.0']},
}
post_data = self._format_submission_data(page_0, answer_data, {'csrf_token': csrf_token, 'token': answer_token, 'button_submit': 'next'})
r = self._access_submit(survey, answer_token, post_data)
self.assertResponse(r, 200)
answers.invalidate_cache() # TDE note: necessary as lots of sudo in controllers messing with cache
# -> this should have generated answer lines
self.assertAnswer(answers, 'in_progress', page_0)
self.assertAnswerLines(page_0, answers, answer_data)
# Customer is redirected on second page and begins filling it
r = self._access_page(survey, answer_token)
self.assertResponse(r, 200)
csrf_token = self._find_csrf_token(r.text)
# Customer submit second page answers
answer_data = {
page1_q0.id: {'value': [page1_q0.suggested_answer_ids.ids[0], page1_q0.suggested_answer_ids.ids[1]]},
}
post_data = self._format_submission_data(page_1, answer_data, {'csrf_token': csrf_token, 'token': answer_token, 'button_submit': 'next'})
r = self._access_submit(survey, answer_token, post_data)
self.assertResponse(r, 200)
answers.invalidate_cache() # TDE note: necessary as lots of sudo in controllers messing with cache
# -> this should have generated answer lines and closed the answer
self.assertAnswer(answers, 'done', page_1)
self.assertAnswerLines(page_1, answers, answer_data)
|
gpl-3.0
| 752,808,867,488,364,400 | 46.066667 | 145 | 0.581445 | false |
pombredanne/pyp2rpm
|
pyp2rpm/name_convertor.py
|
1
|
7808
|
import logging
import re
try:
import dnf
except ImportError:
dnf = None
from pyp2rpm import settings
from pyp2rpm import utils
from pyp2rpm.logger import LoggerWriter
logger = logging.getLogger(__name__)
class NameConvertor(object):
def __init__(self, distro):
self.distro = distro
self.reg_start = re.compile(r'^python(\d*|)-(.*)')
self.reg_end = re.compile(r'(.*)-(python)(\d*|)$')
@staticmethod
def rpm_versioned_name(name, version, default_number=False):
"""Properly versions the name.
For example:
rpm_versioned_name('python-foo', '26') will return python26-foo
rpm_versioned_name('pyfoo, '3') will return python3-pyfoo
If version is same as settings.DEFAULT_PYTHON_VERSION, no change is done.
Args:
name: name to version
version: version or None
Returns:
Versioned name or the original name if given version is None.
"""
regexp = re.compile(r'^python(\d*|)-(.*)')
if not version or version == settings.DEFAULT_PYTHON_VERSION and not default_number:
found = regexp.search(name)
# second check is to avoid renaming of python2-devel to python-devel
if found and found.group(2) != 'devel':
return 'python-{0}'.format(regexp.search(name).group(2))
return name
versioned_name = name
if version:
if regexp.search(name):
versioned_name = re.sub(r'^python(\d*|)-', 'python{0}-'.format(version), name)
else:
versioned_name = 'python{0}-{1}'.format(version, name)
return versioned_name
def rpm_name(self, name, python_version=None):
"""Returns name of the package coverted to (possibly) correct package
name according to Packaging Guidelines.
Args:
name: name to convert
python_version: python version for which to retrieve the name of the package
Returns:
Converted name of the package, that should be in line with Fedora Packaging Guidelines.
If for_python is not None, the returned name is in form python%(version)s-%(name)s
"""
logger.debug('Converting name: {0} to rpm name.'.format(name))
rpmized_name = self.base_name(name)
rpmized_name = 'python-{0}'.format(rpmized_name)
if self.distro == 'mageia':
rpmized_name = rpmized_name.lower()
logger.debug('Rpmized name of {0}: {1}.'.format(name, rpmized_name))
return NameConvertor.rpm_versioned_name(rpmized_name, python_version)
def base_name(self, name):
"""Removes any python prefixes of suffixes from name if present."""
base_name = name.replace('.', "-")
# remove python prefix if present
found_prefix = self.reg_start.search(name)
if found_prefix:
base_name = found_prefix.group(2)
# remove -pythonXY like suffix if present
found_end = self.reg_end.search(name.lower())
if found_end:
base_name = found_end.group(1)
return base_name
class NameVariants(object):
"""Class to generate variants of python package name and choose
most likely correct one.
"""
def __init__(self, name, version, py_init=True):
self.name = name
self.version = version
self.variants = {}
if py_init:
self.names_init()
self.variants_init()
def find_match(self, name):
for variant in ['python_ver_name', 'pyver_name', 'name_python_ver', 'raw_name']:
# iterates over all variants and store name to variants if matches
if canonical_form(name) == canonical_form(getattr(self, variant)):
self.variants[variant] = name
def merge(self, other):
"""Merges object with other NameVariants object, not set values
of self.variants are replace by values from other object.
"""
if not isinstance(other, NameVariants):
raise TypeError("NameVariants isinstance can be merge with"
"other isinstance of the same class")
for key in self.variants:
self.variants[key] = self.variants[key] or other.variants[key]
return self
def names_init(self):
self.python_ver_name = 'python{0}-{1}'.format(self.version, self.name)
self.pyver_name = self.name if self.name.startswith('py') else 'py{0}{1}'.format(
self.version, self.name)
self.name_python_ver = '{0}-python{1}'.format(self.name, self.version)
self.raw_name = self.name
def variants_init(self):
self.variants = {'python_ver_name': None,
'pyver_name': None,
'name_python_ver': None,
'raw_name': None}
@property
def best_matching(self):
return (self.variants['python_ver_name'] or
self.variants['pyver_name'] or
self.variants['name_python_ver'] or
self.variants['raw_name'])
class DandifiedNameConvertor(NameConvertor):
"""Name convertor based on DNF API query, checks if converted
name of the package exists in Fedora repositories. If it doesn't, searches
for the correct variant of the name.
"""
def __init__(self, *args):
super(DandifiedNameConvertor, self).__init__(*args)
if dnf is None or self.distro != 'fedora':
raise RuntimeError("DandifiedNameConvertor needs optional require dnf, and "
"can be used for Fedora distro only.")
with dnf.Base() as base:
with utils.RedirectStdStreams(stdout=LoggerWriter(logger.debug),
stderr=LoggerWriter(logger.warning)):
RELEASEVER = dnf.rpm.detect_releasever(base.conf.installroot)
base.conf.substitutions['releasever'] = RELEASEVER
base.read_all_repos()
base.fill_sack()
self.query = base.sack.query()
def rpm_name(self, name, python_version=None):
"""Checks if name converted using superclass rpm_name_method match name
of package in the query. Searches for correct name if it doesn't.
"""
original_name = name
converted = super(DandifiedNameConvertor, self).rpm_name(name, python_version)
python_query = self.query.filter(name__substr=['python', 'py', original_name,
canonical_form(original_name)])
if converted in [pkg.name for pkg in python_query]:
logger.debug("Converted name exists")
return converted
logger.debug("Converted name not found, searches for correct form")
not_versioned_name = NameVariants(self.base_name(original_name), '')
versioned_name = NameVariants(self.base_name(original_name), python_version)
if self.base_name(original_name).startswith("py"):
nonpy_name = NameVariants(self.base_name(
original_name)[2:], python_version)
for pkg in python_query:
versioned_name.find_match(pkg.name)
not_versioned_name.find_match(pkg.name)
if 'nonpy_name' in locals():
nonpy_name.find_match(pkg.name)
if 'nonpy_name' in locals():
versioned_name = versioned_name.merge(nonpy_name)
correct_form = versioned_name.merge(not_versioned_name).best_matching
logger.debug("Most likely correct form of the name {0}.".format(correct_form))
return correct_form or converted
def canonical_form(name):
return name.lower().replace('-', '').replace('_', '')
|
mit
| 7,444,019,662,291,841,000 | 37.845771 | 99 | 0.60105 | false |
rfinnie/unladen
|
unladen/httpd/handlers/status.py
|
1
|
1333
|
#!/usr/bin/env python
# Unladen
# Copyright (C) 2014 Ryan Finnie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
try:
import http.client as httplib
except ImportError:
import httplib
class UnladenRequestHandler():
def __init__(self, http):
self.http = http
def process_request(self, reqpath):
"""Generic status reply."""
r_fn = reqpath.strip('/').split('/')
if not r_fn[0] == 'status':
return False
out = 'OK\n'
self.http.send_response(httplib.OK)
self.http.send_header('Content-Length', len(out))
self.http.end_headers()
self.http.wfile.write(out)
return True
|
gpl-2.0
| 3,672,747,148,127,936,500 | 31.512195 | 73 | 0.684921 | false |
giruenf/GRIPy
|
app/app_utils.py
|
1
|
29345
|
import re
import os
import json
import importlib
import timeit
import inspect
import collections
from enum import Enum
from pathlib import Path
import numpy as np
from matplotlib.cm import cmap_d
import wx
import app
import fileio
from classes.om.base.manager import ObjectManager
from app import log
class GripyBitmap(wx.Bitmap):
def __init__(self, path_to_bitmap=None):
if path_to_bitmap is None:
super().__init__()
return
if os.path.exists(path_to_bitmap):
full_file_name = path_to_bitmap
elif os.path.exists(os.path.join(app.ICONS_PATH, \
path_to_bitmap)):
full_file_name = os.path.join(app.ICONS_PATH, path_to_bitmap)
else:
raise Exception('ERROR: Wrong bitmap path [{}, {}].'.format( \
app.ICONS_PATH, path_to_bitmap)
)
super().__init__(full_file_name)
class GripyIcon(wx.Icon):
def __init__(self, path_to_bitmap=None, type_=wx.BITMAP_TYPE_ANY):
# print(PurePath(app.ICONS_PATH, path_to_bitmap), 'r')
if path_to_bitmap is not None:
if Path(path_to_bitmap).exists():
pass
elif Path(app.ICONS_PATH, path_to_bitmap).exists():
path_to_bitmap = Path(app.ICONS_PATH, path_to_bitmap)
else:
raise Exception('ERROR: Wrong bitmap path.')
super().__init__(path_to_bitmap, type_)
def calc_well_time_from_depth(event, well_uid):
OM = ObjectManager()
well = OM.get(well_uid)
vp = None
for log_obj in OM.list('log', well.uid):
if log_obj.datatype == 'Velocity':
vp = log_obj
break
if vp is None:
raise Exception('ERROR [calc_prof_tempo]: Vp log not found.')
index_set = OM.get(vp.index_set_uid)
md = index_set.get_z_axis_indexes_by_type('MD')[0]
#
if md.data[0] != 0.0:
return
owt = [0.0]
#
for idx in range(1, len(md.data)):
if vp.data[idx - 1] == np.nan:
raise Exception('ERROR [calc_prof_tempo]: Found np.nan on Vp[{}] '.format(idx - 1))
diff_prof = md.data[idx] - md.data[idx - 1]
value = (float(diff_prof) / vp.data[idx - 1]) * 1000.0 # To milliseconds
value = owt[idx - 1] + value
owt.append(value)
#
owt = np.array(owt)
twt = owt * 2.0
#
print('\nOWT:', owt)
#
owt_index = OM.new('data_index', 0, 'One Way Time', 'TIME', 'ms', data=owt)
OM.add(owt_index, index_set.uid)
#
twt_index = OM.new('data_index', 0, 'Two Way Time', 'TWT', 'ms', data=twt)
OM.add(twt_index, index_set.uid)
#
def load_segy(event, filename, new_obj_name='', comparators_list=None,
iline_byte=9, xline_byte=21, offset_byte=37, tid='seismic',
datatype='amplitude', parentuid=None):
OM = ObjectManager()
disableAll = wx.WindowDisabler()
wait = wx.BusyInfo("Loading SEG-Y file...")
#
try:
print("\nLoading SEG-Y file...")
segy_file = fileio.segy.SEGYFile(filename)
# segy_file.print_dump()
# """
segy_file.read(comparators_list)
segy_file.organize_3D_data(iline_byte, xline_byte, offset_byte)
#
print('segy_file.traces.shape:', segy_file.traces.shape)
#
#
seis_like_obj = OM.new(tid, segy_file.traces, name=new_obj_name,
datatype=datatype
)
if not OM.add(seis_like_obj, parentuid):
raise Exception('Object was not added. tid={}'.format(tid))
#
#
z_index = OM.new('data_index',
name='Time',
datatype='TWT',
unit='ms',
start=0.0,
step=(segy_file.sample_rate * 1000),
samples=segy_file.number_of_samples
)
OM.add(z_index, seis_like_obj.uid)
#
try:
offset_index = OM.new('data_index',
segy_file.dimensions[2],
name='Offset',
datatype='OFFSET',
unit='m'
)
OM.add(offset_index, seis_like_obj.uid)
next_dim = 2
except Exception as e:
next_dim = 1
#
xline_index = OM.new('data_index',
segy_file.dimensions[1],
name='X Line',
datatype='X_LINE'
)
if OM.add(xline_index, seis_like_obj.uid):
next_dim += 1
#
iline_index = OM.new('data_index',
segy_file.dimensions[0],
name='I Line',
datatype='I_LINE'
)
OM.add(iline_index, seis_like_obj.uid)
#
seis_like_obj._create_data_index_map(
[iline_index.uid],
[xline_index.uid],
[offset_index.uid],
[z_index.uid]
)
print('seis_like_obj.traces.shape:', seis_like_obj.data.shape)
# """
except Exception as e:
raise e
finally:
del wait
del disableAll
#
# TODO: Verificar melhor opcao no Python 3.6
#
CallerInfo = collections.namedtuple('CallerInfo',
['object_', 'class_', 'module', 'function_name',
'filename', 'line_number', 'line_code'
]
)
def get_callers_stack():
"""
Based on: https://gist.github.com/techtonik/2151727 with some
changes.
Get a list with caller modules, objects and functions in the stack
with list index 0 being the latest call.
Returns:
list(collections.namedtuple('CallerInfo',
['object_', 'class_', 'module', 'function_name',
'filename', 'line_number', 'line_code']))
"""
ret_list = []
print('app_utils.get_callers_stack')
try:
stack = inspect.stack()
for i in range(1, len(stack)):
fi = stack[i]
module_ = None
obj = fi.frame.f_locals.get('self', None)
class_ = fi.frame.f_locals.get('__class__', None)
if obj:
module_ = inspect.getmodule(obj)
if not class_ and obj:
class_ = obj.__class__
ret_list.append(
CallerInfo(object_=obj, class_=class_, module=module_,
function_name=fi.function, filename=fi.filename,
line_number=fi.lineno, line_code=fi.code_context,
# index=fi.index,
# traceback=traceback, f_locals=fi.frame.f_locals
)
)
if fi.frame.f_locals.get('__name__') == '__main__':
break
except Exception as e:
print(e)
raise
return ret_list
def get_class_full_name(obj):
try:
full_name = obj.__class__.__module__ + "." + obj.__class__.__name__
except Exception as e:
msg = 'ERROR in function app.app_utils.get_class_full_name().'
log.exception(msg)
raise e
return full_name
def get_string_from_function(function_):
if not callable(function_):
msg = 'ERROR: Given input is not a function: {}.'.format(str(function_))
log.error(msg)
raise Exception(msg)
return function_.__module__ + '.' + function_.__name__
def get_function_from_filename(full_filename, function_name):
try:
# print ('\nget_function_from_filename', full_filename, function_name)
if function_name == '<module>':
return None
rel_path = os.path.relpath(full_filename, app.BASE_PATH)
module_rel_path = os.path.splitext(rel_path)[0]
# print (module_rel_path)
module_str = '.'.join(module_rel_path.split(os.path.sep))
# print (module_str)
module_ = importlib.import_module(module_str)
# print (module_, function_name)
function_ = getattr(module_, function_name)
return function_
except:
raise
def get_function_from_string(fullpath_function):
try:
# print ('\nget_function_from_string:', fullpath_function)
module_str = '.'.join(fullpath_function.split('.')[:-1])
function_str = fullpath_function.split('.')[-1]
# print ('importing module:', module_str)
module_ = importlib.import_module(module_str)
# print ('getting function:', function_str, '\n')
function_ = getattr(module_, function_str)
return function_
except Exception as e:
msg = 'ERROR in function app.app_utils.get_function_from_string({}).'.format(fullpath_function)
log.exception(msg)
print(msg)
raise e
class Chronometer(object):
def __init__(self):
self.start_time = timeit.default_timer()
def end(self):
self.total = timeit.default_timer() - self.start_time
return 'Execution in {:0.3f}s'.format(self.total)
# Phoenix DropTarget code
class DropTarget(wx.DropTarget):
def __init__(self, _test_func, callback=None):
wx.DropTarget.__init__(self)
self.data = wx.CustomDataObject('obj_uid')
self.SetDataObject(self.data)
self._test_func = _test_func
self._callback = callback
def OnDrop(self, x, y):
return True
def OnData(self, x, y, defResult):
obj_uid = self._get_object_uid()
if self._callback:
wx.CallAfter(self._callback, obj_uid)
return defResult
def OnDragOver(self, x, y, defResult):
obj_uid = self._get_object_uid()
if obj_uid:
if self._test_func(obj_uid):
return defResult
return wx.DragNone
def _get_object_uid(self):
if self.GetData():
obj_uid_bytes = self.data.GetData().tobytes()
obj_uid_str = obj_uid_bytes.decode()
if obj_uid_str:
obj_uid = parse_string_to_uid(obj_uid_str)
return obj_uid
return None
class GripyEnum(Enum):
def __repr__(self):
# return '{} object, name: {}, value: {}'.format(self.__class__, self.name, self.value)
return str(self.value)
def __eq__(self, other):
if type(other) is self.__class__:
return self.value is other.value
return self.value is other
def __lt__(self, other):
if type(other) is self.__class__:
return self.value < other.value
return self.value < other
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __gt__(self, other):
if type(other) is self.__class__:
return self.value > other.value
return self.value > other
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
class GripyEnumBitwise(GripyEnum):
def __or__(self, other):
if type(other) is self.__class__:
return self.value | other.value
return self.value | other
def __ror__(self, other):
return self.__or__(other)
class WellPlotState(GripyEnum):
NORMAL_TOOL = 0
SELECTION_TOOL = 1
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL)
)
class GripyJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, wx.Point):
return 'wx.Point' + str(obj)
elif isinstance(obj, wx.Size):
return 'wx.Size' + str(obj)
elif isinstance(obj, GripyEnum):
return str(obj.value)
elif callable(obj):
return get_string_from_function(obj)
try:
return str(obj)
except:
return super(GripyJSONDecoder, self).default(self, obj)
def clean_path_str(path):
# path = path.replace('\\' ,'/')
path = path.encode('ascii', 'ignore') # in order to save unicode characters
path = path.encode('string-escape')
return path
def write_json_file(py_object, fullfilename):
fullfilename = clean_path_str(fullfilename)
fullfilename = os.path.normpath(fullfilename)
directory = os.path.dirname(fullfilename)
if not os.path.exists(directory):
os.makedirs(directory)
msg = 'App.app_utils.write_json_file has created directory: {}'.format(directory)
# log.debug(msg)
print(msg)
f = open(fullfilename, 'w')
f.write(json.dumps(py_object, indent=4, cls=GripyJSONEncoder))
f.close()
class GripyJSONDecoder(json.JSONDecoder):
def decode(self, s, _w=WHITESPACE.match):
self.scan_once = gripy_make_scanner(self)
return super(GripyJSONDecoder, self).decode(s, _w)
def gripy_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
# encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
if string[idx:idx + 10] == '"wx.Point(':
return GripyJSONParser((string, idx + 10), _scan_once, wx.Point)
elif string[idx:idx + 9] == '"wx.Size(':
return GripyJSONParser((string, idx + 9), _scan_once, wx.Size)
return parse_string(string, idx + 1, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), strict,
_scan_once, object_hook, object_pairs_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
def GripyJSONParser(s_and_end, scan_once, _class, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
s, end = s_and_end
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError("Expecting object {}, {}".format(s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ')' and s[end:end + 1] == '"':
end += 1
break
elif nextchar != ',':
raise ValueError("Expecting ',' delimiter {}, {}".format(s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return _class(int(values[0]), int(values[1])), end
def read_json_file(full_filename):
# print ('\nread_json_file:', fullfilename, type(fullfilename))
# fullfilename = fullfilename.replace('\\' ,'/')
# fullfilename = fullfilename.encode('ascii', 'ignore') # in order to save unicode characters
# fullfilename = fullfilename.encode('string-escape')
json_file = open(full_filename, 'r')
state = json.load(json_file, cls=GripyJSONDecoder)
json_file.close()
return state
def parse_string_to_uid(obj_uid_string):
"""
Parse a uid String (which may contains non uid characters like " and \) to
a uid tuple in a format (tid, oid).
Parameters
----------
obj_uid_string : str
The uid String.
Returns
-------
tuple
A pair (tid, oid) which can be a Gripy object identifier.
"""
try:
# print ('parse_string_to_uid:', obj_uid_string)
left_index = obj_uid_string.find('(')
right_index = obj_uid_string.rfind(')')
if left_index == -1 or right_index == -1:
return None
elif right_index < left_index:
return None
obj_uid_string = obj_uid_string[left_index + 1:right_index]
tid, oid = obj_uid_string.split(',')
tid = tid.strip('\'\" ')
oid = int(oid.strip('\'\" '))
return tid, oid
except:
raise
def get_wx_colour_from_seq_string(seq_str):
# tuple or list
if seq_str.startswith('(') or seq_str.startswith('['):
seq_str = seq_str[1:-1]
val = tuple([int(c.strip()) for c in seq_str.split(',')])
color = wx.Colour(val)
print('_get_wx_colour:', color,
color.GetAsString(wx.C2S_HTML_SYNTAX))
return color
return None
# Have colormaps separated into categories:
# http://matplotlib.org/examples/color/colormaps_reference.html
"""
# MPL 1.4/1.5 COLORS
MPL_CATS_CMAPS = [('Perceptually Uniform Sequential', [
'viridis', 'plasma', 'inferno', 'magma']),
('Sequential', [
'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']),
('Sequential (2)', [
'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',
'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',
'hot', 'afmhot', 'gist_heat', 'copper']),
('Diverging', [
'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',
'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic']),
('Qualitative', [
'Pastel1', 'Pastel2', 'Paired', 'Accent',
'Dark2', 'Set1', 'Set2', 'Set3',
'tab10', 'tab20', 'tab20b', 'tab20c']),
('Miscellaneous', [
'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',
'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'hsv',
'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar'])]
"""
# MPL 2.0 COLORS
MPL_CATS_CMAPS = [
('Perceptually Uniform Sequential',
['viridis', 'inferno', 'plasma', 'magma']
),
('Sequential',
['Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys',
'Oranges', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'Purples',
'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd'
]
),
('Sequential (2)',
['afmhot', 'autumn', 'bone', 'cool', 'copper', 'gist_heat',
'gray', 'hot', 'pink', 'spring', 'summer', 'winter'
]
),
('Diverging',
['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr',
'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral', 'seismic'
]
),
('Qualitative',
['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1',
'Set2', 'Set3', 'Vega10', 'Vega20', 'Vega20b', 'Vega20c'
]
),
('Miscellaneous',
['gist_earth', 'terrain', 'ocean', 'gist_stern', 'brg',
'CMRmap', 'cubehelix', 'gnuplot', 'gnuplot2', 'gist_ncar',
'nipy_spectral', 'jet', 'rainbow', 'gist_rainbow', 'hsv',
'flag', 'prism'
]
)
]
# MPL_COLORMAPS = [value for (key, values) in MPL_CATS_CMAPS for value in values]
MPL_COLORMAPS = sorted(cmap_d)
"""
MPL_COLORMAPS = ['Accent', 'Accent_r', 'Blues', 'Blues_r', 'BrBG', 'BrBG_r',
'BuGn', 'BuGn_r', 'BuPu', 'BuPu_r', 'CMRmap', 'CMRmap_r',
'Dark2', 'Dark2_r', 'GnBu', 'GnBu_r', 'Greens', 'Greens_r',
'Greys', 'Greys_r', 'OrRd', 'OrRd_r', 'Oranges', 'Oranges_r',
'PRGn', 'PRGn_r', 'Paired', 'Paired_r', 'Pastel1', 'Pastel1_r',
'Pastel2', 'Pastel2_r', 'PiYG', 'PiYG_r', 'PuBu', 'PuBu_r',
'PuBuGn', 'PuBuGn_r', 'PuOr', 'PuOr_r', 'PuRd', 'PuRd_r',
'Purples', 'Purples_r', 'RdBu', 'RdBu_r', 'RdGy', 'RdGy_r',
'RdPu', 'RdPu_r', 'RdYlBu', 'RdYlBu_r', 'RdYlGn', 'RdYlGn_r',
'Reds', 'Reds_r', 'Set1', 'Set1_r', 'Set2', 'Set2_r',
'Set3', 'Set3_r', 'Spectral', 'Spectral_r', 'Vega10', 'Vega10_r',
'Vega20', 'Vega20_r', 'Vega20b', 'Vega20b_r', 'Vega20c', 'Vega20c_r',
'Wistia', 'Wistia_r', 'YlGn', 'YlGn_r', 'YlGnBu', 'YlGnBu_r',
'YlOrBr', 'YlOrBr_r', 'YlOrRd', 'YlOrRd_r',
'afmhot', 'afmhot_r', 'autumn', 'autumn_r', 'binary', 'binary_r',
'bone', 'bone_r', 'brg', 'brg_r', 'bwr', 'bwr_r',
'cool', 'cool_r', 'coolwarm', 'coolwarm_r', 'copper', 'copper_r',
'cubehelix', 'cubehelix_r', 'flag', 'flag_r',
'gist_earth', 'gist_earth_r', 'gist_gray', 'gist_gray_r',
'gist_heat', 'gist_heat_r', 'gist_ncar', 'gist_ncar_r',
'gist_rainbow', 'gist_rainbow_r', 'gist_stern', 'gist_stern_r',
'gist_yarg', 'gist_yarg_r', 'gnuplot', 'gnuplot2', 'gnuplot2_r',
'gnuplot_r', 'gray', 'gray_r', 'hot', 'hot_r', 'hsv', 'hsv_r',
'inferno', 'inferno_r', 'jet', 'jet_r', 'magma', 'magma_r',
'nipy_spectral', 'nipy_spectral_r', 'ocean', 'ocean_r',
'pink', 'pink_r', 'plasma', 'plasma_r', 'prism', 'prism_r',
'rainbow', 'rainbow_r', 'seismic', 'seismic_r',
'spectral', 'spectral_r', 'spring', 'spring_r',
'summer', 'summer_r', 'terrain', 'terrain_r',
'viridis', 'viridis_r', 'winter', 'winter_r']
"""
###############################################################################
###############################################################################
MPL_COLORS = collections.OrderedDict()
MPL_COLORS['Black'] = None
MPL_COLORS['Maroon'] = None
MPL_COLORS['Green'] = wx.Colour(0, 100, 0) # Dark Green
MPL_COLORS['Olive'] = wx.Colour(128, 128, 0)
MPL_COLORS['Navy'] = None
MPL_COLORS['Purple'] = None
MPL_COLORS['Teal'] = wx.Colour(0, 128, 128)
MPL_COLORS['Gray'] = None
MPL_COLORS['Silver'] = wx.Colour(192, 192, 192)
MPL_COLORS['Red'] = None
MPL_COLORS['Lime'] = wx.Colour(0, 255, 0) # Green
MPL_COLORS['Yellow'] = None
MPL_COLORS['Blue'] = None
MPL_COLORS['Fuchsia'] = wx.Colour(255, 0, 255)
MPL_COLORS['Aqua'] = wx.Colour(0, 255, 255)
MPL_COLORS['White'] = None
MPL_COLORS['SkyBlue'] = wx.Colour(135, 206, 235)
MPL_COLORS['LightGray'] = wx.Colour(211, 211, 211)
MPL_COLORS['DarkGray'] = wx.Colour(169, 169, 169)
MPL_COLORS['SlateGray'] = wx.Colour(112, 128, 144)
MPL_COLORS['DimGray'] = wx.Colour(105, 105, 105)
MPL_COLORS['BlueViolet'] = wx.Colour(138, 43, 226)
MPL_COLORS['DarkViolet'] = wx.Colour(148, 0, 211)
MPL_COLORS['Magenta'] = None
MPL_COLORS['DeepPink'] = wx.Colour(148, 0, 211)
MPL_COLORS['Brown'] = None
MPL_COLORS['Crimson'] = wx.Colour(220, 20, 60)
MPL_COLORS['Firebrick'] = None
MPL_COLORS['DarkRed'] = wx.Colour(139, 0, 0)
MPL_COLORS['DarkSlateGray'] = wx.Colour(47, 79, 79)
MPL_COLORS['DarkSlateBlue'] = wx.Colour(72, 61, 139)
MPL_COLORS['Wheat'] = None
MPL_COLORS['BurlyWood'] = wx.Colour(222, 184, 135)
MPL_COLORS['Tan'] = None
MPL_COLORS['Gold'] = None
MPL_COLORS['Orange'] = None
MPL_COLORS['DarkOrange'] = wx.Colour(255, 140, 0)
MPL_COLORS['Coral'] = None
MPL_COLORS['DarkKhaki'] = wx.Colour(189, 183, 107)
MPL_COLORS['GoldenRod'] = None
MPL_COLORS['DarkGoldenrod'] = wx.Colour(184, 134, 11)
MPL_COLORS['Chocolate'] = wx.Colour(210, 105, 30)
MPL_COLORS['Sienna'] = None
MPL_COLORS['SaddleBrown'] = wx.Colour(139, 69, 19)
MPL_COLORS['GreenYellow'] = wx.Colour(173, 255, 47)
MPL_COLORS['Chartreuse'] = wx.Colour(127, 255, 0)
MPL_COLORS['SpringGreen'] = wx.Colour(0, 255, 127)
MPL_COLORS['MediumSpringGreen'] = wx.Colour(0, 250, 154)
MPL_COLORS['MediumAquamarine'] = wx.Colour(102, 205, 170)
MPL_COLORS['LimeGreen'] = wx.Colour(50, 205, 50)
MPL_COLORS['LightSeaGreen'] = wx.Colour(32, 178, 170)
MPL_COLORS['MediumSeaGreen'] = wx.Colour(60, 179, 113)
MPL_COLORS['DarkSeaGreen'] = wx.Colour(143, 188, 143)
MPL_COLORS['SeaGreen'] = wx.Colour(46, 139, 87)
MPL_COLORS['ForestGreen'] = wx.Colour(34, 139, 34)
MPL_COLORS['DarkOliveGreen'] = wx.Colour(85, 107, 47)
MPL_COLORS['DarkGreen'] = wx.Colour(1, 50, 32)
MPL_COLORS['LightCyan'] = wx.Colour(224, 255, 255)
MPL_COLORS['Thistle'] = None
MPL_COLORS['PowderBlue'] = wx.Colour(176, 224, 230)
MPL_COLORS['LightSteelBlue'] = wx.Colour(176, 196, 222)
MPL_COLORS['LightSkyBlue'] = wx.Colour(135, 206, 250)
MPL_COLORS['MediumTurquoise'] = wx.Colour(72, 209, 204)
MPL_COLORS['Turquoise'] = None
MPL_COLORS['DarkTurquoise'] = wx.Colour(0, 206, 209)
MPL_COLORS['DeepSkyBlue'] = wx.Colour(0, 191, 255)
MPL_COLORS['DodgerBlue'] = wx.Colour(30, 144, 255)
MPL_COLORS['CornflowerBlue'] = wx.Colour(100, 149, 237)
MPL_COLORS['CadetBlue'] = wx.Colour(95, 158, 160)
MPL_COLORS['DarkCyan'] = wx.Colour(0, 139, 139)
MPL_COLORS['SteelBlue'] = wx.Colour(70, 130, 180)
MPL_COLORS['RoyalBlue'] = wx.Colour(65, 105, 225)
MPL_COLORS['SlateBlue'] = wx.Colour(106, 90, 205)
MPL_COLORS['DarkBlue'] = wx.Colour(0, 0, 139)
MPL_COLORS['MediumBlue'] = wx.Colour(0, 0, 205)
MPL_COLORS['SandyBrown'] = wx.Colour(244, 164, 96)
MPL_COLORS['DarkSalmon'] = wx.Colour(233, 150, 122)
MPL_COLORS['Salmon'] = None
MPL_COLORS['Tomato'] = wx.Colour(255, 99, 71)
MPL_COLORS['Violet'] = wx.Colour(238, 130, 238)
MPL_COLORS['HotPink'] = wx.Colour(255, 105, 180)
MPL_COLORS['RosyBrown'] = wx.Colour(188, 143, 143)
MPL_COLORS['MediumVioletRed'] = wx.Colour(199, 21, 133)
MPL_COLORS['DarkMagenta'] = wx.Colour(139, 0, 139)
MPL_COLORS['DarkOrchid'] = wx.Colour(153, 50, 204)
MPL_COLORS['Indigo'] = wx.Colour(75, 0, 130)
MPL_COLORS['MidnightBlue'] = wx.Colour(25, 25, 112)
MPL_COLORS['MediumSlateBlue'] = wx.Colour(123, 104, 238)
MPL_COLORS['MediumPurple'] = wx.Colour(147, 112, 219)
MPL_COLORS['MediumOrchid'] = wx.Colour(186, 85, 211)
MPL_COLORS = collections.OrderedDict(sorted(MPL_COLORS.items()))
###############################################################################
###############################################################################
# Based on https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html
# 10/September/2019 - Adriano Santana
MPL_LINESTYLES = collections.OrderedDict()
MPL_LINESTYLES['Solid'] = (0, ())
MPL_LINESTYLES['Dotted'] = (0, (1, 1))
MPL_LINESTYLES['Loosely dotted'] = (0, (1, 10))
MPL_LINESTYLES['Densely dotted'] = (0, (1, 1))
MPL_LINESTYLES['Dashed'] = (0, (5, 5))
MPL_LINESTYLES['Loosely dashed'] = (0, (5, 10))
MPL_LINESTYLES['Densely dashed'] = (0, (5, 1))
MPL_LINESTYLES['Dashdotted'] = (0, (3, 5, 1, 5))
MPL_LINESTYLES['Loosely dashdotted'] = (0, (3, 10, 1, 10))
MPL_LINESTYLES['Densely dashdotted'] = (0, (3, 1, 1, 1))
MPL_LINESTYLES['Dashdotdotted'] = (0, (3, 5, 1, 5, 1, 5))
MPL_LINESTYLES['Loosely dashdotdotted'] = (0, (3, 10, 1, 10, 1, 10))
MPL_LINESTYLES['Densely dashdotdotted'] = (0, (3, 1, 1, 1, 1, 1))
|
apache-2.0
| -2,007,023,896,810,117,400 | 34.727159 | 103 | 0.527926 | false |
grouan/udata
|
udata/api/fields.py
|
1
|
2440
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from dateutil.parser import parse
from flask import request, url_for
from flask.ext.restplus.fields import *
log = logging.getLogger(__name__)
class ISODateTime(String):
__schema_format__ = 'date-time'
def format(self, value):
if isinstance(value, basestring):
value = parse(value)
return value.isoformat()
class Markdown(String):
__schema_format__ = 'markdown'
class UrlFor(String):
def __init__(self, endpoint, mapper=None, **kwargs):
super(UrlFor, self).__init__(**kwargs)
self.endpoint = endpoint
self.mapper = mapper or self.default_mapper
def default_mapper(self, obj):
return {'id': str(obj.id)}
def output(self, key, obj):
return url_for(self.endpoint, _external=True, **self.mapper(obj))
class NextPageUrl(String):
def output(self, key, obj):
if not obj.has_next:
return None
args = request.args.copy()
args.update(request.view_args)
args['page'] = obj.page + 1
return url_for(request.endpoint, _external=True, **args)
class PreviousPageUrl(String):
def output(self, key, obj):
if not obj.has_prev:
return None
args = request.args.copy()
args.update(request.view_args)
args['page'] = obj.page - 1
return url_for(request.endpoint, _external=True, **args)
class ImageField(String):
def __init__(self, size=None, **kwargs):
super(ImageField, self).__init__(**kwargs)
self.size = size
def format(self, field):
return (field(self.size, external=True)
if self.size else field(external=True))
def pager(page_fields):
pager_fields = {
'data': List(Nested(page_fields), attribute='objects',
description='The page data'),
'page': Integer(description='The current page', required=True, min=1),
'page_size': Integer(description='The page size used for pagination',
required=True, min=0),
'total': Integer(description='The total paginated items',
required=True, min=0),
'next_page': NextPageUrl(description='The next page URL if exists'),
'previous_page': PreviousPageUrl(
description='The previous page URL if exists'),
}
return pager_fields
|
agpl-3.0
| 1,874,636,427,373,957,000 | 28.047619 | 78 | 0.607377 | false |
optiv-labs/talus_client
|
talus_client/cmds/__init__.py
|
1
|
11800
|
#!/usr/bin/env python
# encoding: utf-8
import argparse
import arrow
import cmd
import datetime
import glob
import json
import inspect
import os
import re
import readline
import shlex
import sys
import textwrap
import types
import talus_client.api
import talus_client.errors as errors
import talus_client.utils as utils
from talus_client.utils import Colors
ModelCmd = None
ENABLED_COMMANDS = []
class TalusMetaClass(type):
def __init__(cls, name, bases, namespace):
global ENABLED_COMMANDS
super(TalusMetaClass, cls).__init__(name, bases, namespace)
if cls.__name__ in ["TalusCmdBase"]:
return
ENABLED_COMMANDS.append(cls)
class TalusCmdBase(object,cmd.Cmd):
__metaclass__ = TalusMetaClass
# to be overridden by inheriting classes
command_name = ""
def __init__(self, talus_host=None, client=None, user=None):
"""Create a new TalusCmdBase
:talus_host: The root of the talus web app (e.g. http://localhost:8001 if the api is at http://localhost:8001/api)
"""
global ModelCmd
from talus_client.param_model import ModelCmd as MC
ModelCmd = MC
cmd.Cmd.__init__(self, "\t")
self.one_shot = False
self._last_was_keyboard = False
self._talus_host = talus_host
self._talus_client = client
self._talus_user = user
if self._talus_host is not None and self._talus_client is None:
self._talus_client = talus_client.api.TalusClient(self._talus_host, user=self._talus_user)
def _nice_name(self, model, attr):
if "name" in model._fields[attr].value:
return "{} ({})".format(model._fields[attr]["name"], model._fields[attr]["id"])
else:
return getattr(model, attr)
def _resolve_one_model(self, id_or_name, model, search, sort="-timestamps.created", default_id_search=None):
if default_id_search is None:
default_id_search = ["id", "name"]
if id_or_name is not None and not id_or_name.startswith("+"):
for default_compare in default_id_search:
res = model.find_one(**{default_compare:id_or_name})
if res is not None:
return res
return None
if id_or_name is None:
skip = 0
else:
if not re.match(r'^\+\d+$', id_or_name):
raise errors.TalusApiError("Git-like referencing must be a plus sign followed by digits")
skip = int(id_or_name.replace("+", "")) - 1
search["skip"] = skip
search["num"] = 1
search["sort"] = sort
return model.find_one(**search)
def _search_terms(self, parts, key_remap=None, user_default_filter=True, out_leftover=None, no_hex_keys=None):
"""Return a dictionary of search terms"""
if no_hex_keys is None:
no_hex_keys = []
search = {}
key = None
if key_remap is None:
key_remap = {}
key_map = {
"status": "status.name"
}
key_map.update(key_remap)
found_all = False
for item in parts:
if key is None:
if not item.startswith("--"):
if out_leftover is not None:
out_leftover.append(item)
continue
else:
raise errors.TalusApiError("args must be alternating search item/value pairs!")
item = item[2:].replace("-", "_")
key = item
if key == "all":
found_all = True
key = None
continue
if key in key_map:
key = key_map[key]
if key.endswith("__type") or key.endswith(".type"):
key += "_"
elif key is not None:
# hex conversion
if re.match(r'^0x[0-9a-f]+$', item, re.IGNORECASE) is not None and key.split("__")[0] not in no_hex_keys:
item = int(item, 16)
if key in search and not isinstance(search[key], list):
search[key] = [search[key]]
if key in search and isinstance(search[key], list):
search[key].append(item)
else:
search[key] = item
self.out("searching for {} = {}".format(key, item))
# reset this
key = None
if user_default_filter and not found_all and self._talus_user is not None:
# default filter by username tag
self.out("default filtering by username (searching for tags = {})".format(self._talus_user))
self.out("use --all to view all models")
if "tags" in search and not isinstance(search["tags"], list):
search["tags"] = [search["tags"]]
if "tags" in search and isinstance(search["tags"], list):
search["tags"].append(self._talus_user)
else:
search["tags"] = self._talus_user
if out_leftover is not None and key is not None:
out_leftover.append(key)
return search
def _actual_date(self, epoch):
return datetime.datetime.fromtimestamp(epoch).strftime("%Y-%m-%d %H:%M:%S")
def _rel_date(self, epoch):
return arrow.get(epoch).humanize()
def _prep_model(self, model):
if hasattr(model, "tags") and self._talus_user is not None and self._talus_user not in model.tags:
model.tags.append(self._talus_user)
def _make_model_cmd(self, model, prompt_part="create"):
res = ModelCmd(model, self._talus_host, self._talus_client)
res.prompt = self.prompt[:-2] + ":" + prompt_part + "> "
res._root = self._root
return res
def _go_interactive(self, args):
return ("--shell" in args or (len(args) == 0 and not self._root.one_shot))
def ask(self, msg):
msg = Colors.WARNING + msg + Colors.ENDC
return raw_input(msg)
def ok(self, msg):
"""
Print the message with a success/ok color
"""
msg = u"\n".join(Colors.OKGREEN + u"{}{}".format(u"[.] ", line) + Colors.ENDC for line in unicode(msg).split("\n"))
print(msg)
def out(self, msg, raw=False):
"""
Print the message with standard formatting
"""
pre = Colors.OKBLUE + "[+]" + Colors.ENDC + " "
if raw:
pre = " "
msg = u"\n".join(u"{}{}".format(pre, line) for line in unicode(msg).split("\n"))
print(msg)
def warn(self, msg):
"""
Print an error message
"""
# TODO colors?
msg = u"\n".join(Colors.FAIL + u"[!] {}".format(line) + Colors.ENDC for line in unicode(msg).split("\n"))
print(msg)
def err(self, msg):
"""
Print an error message
"""
# TODO colors?
msg = u"\n".join(Colors.FAIL + u"[E] {}".format(line) + Colors.ENDC for line in unicode(msg).split("\n"))
print(msg)
@property
def prompt(self):
caller_name = inspect.stack()[1][3]
if caller_name == "cmdloop":
return Colors.HEADER + self._prompt + Colors.ENDC
return self._prompt
@prompt.setter
def prompt(self, value):
self._prompt = value
return self._prompt
def emptyline(self):
"""don't repeat the last successful command"""
pass
def do_up(self, args):
"""Quit the current processor (move up a level)"""
return True
def do_quit(self, args):
"""Quit the program"""
return True
do_exit = do_quit
do_exit.__doc__ = do_quit.__doc__
def cmdloop(self, *args, **kwargs):
try:
return cmd.Cmd.cmdloop(self, *args, **kwargs)
except KeyboardInterrupt as e:
self.err("cancelled")
return True
def onecmd(self, *args, **kwargs):
try:
return cmd.Cmd.onecmd(self, *args, **kwargs)
except talus_client.errors.TalusApiError as e:
self.err(e.message)
except KeyboardInterrupt as e:
if not self._last_was_keyboard:
self.err("cancelled")
else:
self.err("if you want to quit, use the 'quit' command")
self._last_was_keyboard = True
# raised by argparse when args aren't correct
except SystemExit as e:
pass
else:
# no KeyboardInterrupts happened
self._last_was_keyboard = False
def default(self, line):
funcs = filter(lambda x: x.startswith("do_"), dir(self))
parts = line.split()
first_param = parts[0]
matches = filter(lambda x: x.startswith("do_" + first_param), funcs)
if len(matches) > 1:
self.warn("ambiguous command, matching commands:")
for match in matches:
print(" " + match.replace("do_", ""))
return
elif len(matches) == 1:
func = getattr(self, matches[0])
return func(" ".join(parts[1:]))
self.err("Unknown command. Try the 'help' command.")
def completedefault(self, text, line, begidx, endidx):
funcs = filter(lambda x: x.startswith("do_"), dir(self))
res = filter(lambda x: x.startswith(text), funcs)
return res
@classmethod
def get_command_helps(cls):
"""Look for methods in this class starting with do_.
:returns: A dict of commands and their help values. E.g. ``{"list": "List all the images"}``
"""
res = {}
regex = re.compile(r'^do_(.*)$')
for name in dir(cls):
match = regex.match(name)
if match is not None:
cmd = match.group(1)
prop = getattr(cls, name, None)
doc = getattr(prop, "__doc__", None)
if doc is not None:
lines = doc.split("\n")
res[cmd] = lines[0].lstrip() + textwrap.dedent("\n".join(lines[1:]).expandtabs(4))
return res
@classmethod
def get_help(cls, args=None, abbrev=False, examples=False):
args = "" if args is None else args
cmd = None
cmd_specific = (len(args) > 0)
cmd_helps = ""
if not cmd_specific:
cmd_helps += "\n{name}\n{under}\n".format(
name=cls.command_name,
under=("-"*len(cls.command_name))
)
else:
cmd = args.split(" ")[0]
for subcmd_name,subcmd_help in cls.get_command_helps().iteritems():
if cmd_specific and subcmd_name != cmd:
continue
if not examples and "\nExamples:\n" in subcmd_help:
subcmd_help,_ = subcmd_help.split("\nExamples:\n")
lines = subcmd_help.split("\n")
first_line = lines[0].lstrip()
label_start = "\n{:>10} - ".format(subcmd_name)
spaces = " " * len(label_start)
label_line = label_start + first_line
cmd_helps += "\n".join(textwrap.wrap(
label_line,
subsequent_indent=spaces
))
if len(lines) > 2 and not abbrev:
cmd_helps += "\n\n" + "\n".join(spaces + x for x in lines[1:])
cmd_helps += "\n"
return cmd_helps
def do_help(self, args):
examples = (len(args) > 0)
print(self.get_help(args=args, examples=examples))
# -----------------------------------
def _argparser(self):
# TODO make this a loop and find the first do_XXXX function in
# the current callstack?
caller_name = inspect.stack()[1][3]
if self.one_shot:
return argparse.ArgumentParser(self.command_name + " " + caller_name.replace("do_", ""))
else:
return argparse.ArgumentParser(caller_name.replace("do_", ""))
class TalusCmd(TalusCmdBase):
"""The main talus command. This is what is invoked when dropping
into a shell or when run from the command line"""
command_name = "<ROOT>"
def __init__(self, talus_host=None, client=None, one_shot=False, user=None):
"""Initialize the Talus command object
:one_shot: True if only one command is to be processed (cmd-line args, no shell, etc)
"""
super(TalusCmd, self).__init__(talus_host=talus_host, client=client, user=user)
self.prompt = "talus> "
self.one_shot = one_shot
# auto-import all defined commands in talus/cmds/*.py
this_dir = os.path.dirname(__file__)
for filename in glob.glob(os.path.join(this_dir, "*.py")):
basename = os.path.basename(filename)
if basename == "__init__.py":
continue
mod_name = basename.replace(".py", "")
mod_base = __import__("talus_client.cmds", globals(), locals(), fromlist=[mod_name])
mod = getattr(mod_base, mod_name)
def make_cmd_handler(cls):
def _handle_command(self, args):
processor = cls(talus_host=self._talus_host, client=self._talus_client, user=self._talus_user)
processor._root = self
processor.prompt = "talus:" + processor.command_name + "> "
if self.one_shot or len(args) > 0:
processor.one_shot = True
processor.onecmd(args)
else:
processor.cmdloop()
return _handle_command
def define_root_commands():
for cls in ENABLED_COMMANDS:
if cls.command_name == "" or cls == TalusCmd:
continue
handler = make_cmd_handler(cls)
# the baseclass cmd.Cmd always defines a do_help, so we need to check if it's
# redefined in the specific subclass
if "do_help" in cls.__dict__:
handler.__doc__ = cls.do_help.__doc__
else:
handler.__doc__ = cls.__doc__
setattr(TalusCmd, "do_" + cls.command_name, handler)
define_root_commands()
|
mit
| 4,405,679,507,563,071,000 | 26.699531 | 118 | 0.648559 | false |
cristian99garcia/laybrinth-activity
|
labyrinthactivity.py
|
1
|
39566
|
#!/usr/bin/python
# coding=UTF-8
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import sys
import os
import shutil
import time
from gettext import gettext as _
import xml.dom.minidom as dom
import cairo
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import Gio
from gi.repository import Pango
from gi.repository import GdkPixbuf
from gi.repository import PangoCairo
from sugar3.activity import activity
from sugar3.activity.widgets import EditToolbar as SugarEditToolbar
from sugar3.graphics.toolbutton import ToolButton
from sugar3.graphics.radiotoolbutton import RadioToolButton
from sugar3.graphics.colorbutton import ColorToolButton
from sugar3.graphics.menuitem import MenuItem
from sugar3.graphics.icon import Icon
from sugar3.datastore import datastore
from sugar3.graphics import style
from port.tarball import Tarball
from sugar3 import env
try:
from sugar3.graphics.toolbarbox import ToolbarBox
HASTOOLBARBOX = True
except ImportError:
HASTOOLBARBOX = False
if HASTOOLBARBOX:
from sugar3.graphics.toolbarbox import ToolbarButton
from sugar3.activity.widgets import ActivityToolbarButton
from sugar3.activity.widgets import StopButton
# labyrinth sources are shipped inside the 'src' subdirectory
sys.path.append(os.path.join(activity.get_bundle_path(), 'src'))
import UndoManager
import MMapArea
import utils
EMPTY = -800
DEFAULT_FONTS = ['Sans', 'Serif', 'Monospace']
USER_FONTS_FILE_PATH = env.get_profile_path('fonts')
GLOBAL_FONTS_FILE_PATH = '/etc/sugar_fonts'
def stop_editing(main_area):
if len(main_area.selected) == 1:
if hasattr(main_area.selected[0], 'textview'):
main_area.selected[0].remove_textview()
class MyMenuItem(MenuItem):
def __init__(self, text_label=None, icon_name=None, text_maxlen=60,
xo_color=None, file_name=None, image=None):
super(MenuItem, self).__init__()
self._accelerator = None
self.props.submenu = None
label = Gtk.AccelLabel(text_label)
label.set_alignment(0.0, 0.5)
label.set_accel_widget(self)
if text_maxlen > 0:
label.set_ellipsize(Pango.EllipsizeMode.MIDDLE)
label.set_max_width_chars(text_maxlen)
self.add(label)
label.show()
if image is not None:
self.set_image(image)
image.show()
elif icon_name is not None:
icon = Icon(icon_name=icon_name,
icon_size=Gtk.IconSize.SMALL_TOOLBAR)
if xo_color is not None:
icon.props.xo_color = xo_color
self.set_image(icon)
icon.show()
elif file_name is not None:
icon = Icon(file=file_name, icon_size=Gtk.IconSize.SMALL_TOOLBAR)
if xo_color is not None:
icon.props.xo_color = xo_color
self.set_image(icon)
icon.show()
class FontImage(Gtk.Image):
_FONT_ICON = \
'<?xml version="1.0" encoding="UTF-8" standalone="no"?>\
<svg\
version="1.1"\
width="27.5"\
height="27.5"\
viewBox="0 0 27.5 27.5">\
<text\
x="5"\
y="21"\
style="font-size:25px;fill:#ffffff;stroke:none"><tspan\
x="5"\
y="21"\
style="font-family:%s">F</tspan></text>\
</svg>'
def __init__(self, font_name):
super(Gtk.Image, self).__init__()
loader = GdkPixbuf.PixbufLoader()
loader.write(self._FONT_ICON.encode())
loader.close()
pixbuf = loader.get_pixbuf()
self.set_from_pixbuf(pixbuf)
self.show()
class EditToolbar(SugarEditToolbar):
def __init__(self, _parent):
SugarEditToolbar.__init__(self)
self._parent = _parent
self.undo.connect('clicked', self.__undo_cb)
self.redo.connect('clicked', self.__redo_cb)
self.copy.connect('clicked', self.__copy_cb)
self.paste.connect('clicked', self.__paste_cb)
menu_item = MenuItem(_('Cut'))
menu_item.connect('activate', self.__cut_cb)
menu_item.show()
self.copy.get_palette().menu.append(menu_item)
self.insert(Gtk.SeparatorToolItem(), -1)
self.erase_button = ToolButton('edit-delete')
self.erase_button.set_tooltip(_('Erase selected thought(s)'))
self.erase_button.connect('clicked', self.__delete_cb)
self.insert(self.erase_button, -1)
self.show_all()
self.clipboard = Gtk.Clipboard()
self.copy.set_sensitive(False)
self.paste.set_sensitive(False)
self.erase_button.set_sensitive(False)
def __undo_cb(self, button):
self._parent._undo.undo_action(None)
def __redo_cb(self, button):
self._parent._undo.redo_action(None)
def __cut_cb(self, event):
self._parent._main_area.cut_clipboard(self.clipboard)
def __copy_cb(self, event):
self._parent._main_area.copy_clipboard(self.clipboard)
def __paste_cb(self, event):
self._parent._main_area.paste_clipboard(self.clipboard)
def __delete_cb(self, widget):
self._stop_moving()
self.stop_dragging()
self._parent._main_area.delete_selected_elements()
def stop_dragging(self):
if self._parent._main_area.is_dragging():
self._parent._main_area.drag_menu_cb(self._sw, False)
def _stop_moving(self):
self._parent._main_area.move_mode = False
class ViewToolbar(Gtk.Toolbar):
def __init__(self, main_area):
Gtk.Toolbar.__init__(self)
self._main_area = main_area
tool = ToolButton('zoom-best-fit')
tool.set_tooltip(_('Fit to window'))
tool.set_accelerator(_('<ctrl>9'))
tool.connect('clicked', self.__zoom_tofit_cb)
self.insert(tool, -1)
tool = ToolButton('zoom-original')
tool.set_tooltip(_('Original size'))
tool.set_accelerator(_('<ctrl>0'))
tool.connect('clicked', self.__zoom_original_cb)
self.insert(tool, -1)
tool = ToolButton('zoom-out')
tool.set_tooltip(_('Zoom out'))
tool.set_accelerator(_('<ctrl>minus'))
tool.connect('clicked', self.__zoom_out_cb)
self.insert(tool, -1)
tool = ToolButton('zoom-in')
tool.set_tooltip(_('Zoom in'))
tool.set_accelerator(_('<ctrl>equal'))
tool.connect('clicked', self.__zoom_in_cb)
self.insert(tool, -1)
self.show_all()
def __zoom_in_cb(self, button):
stop_editing(self._main_area)
self._main_area.scale_fac *= 1.2
hadj = self._main_area.sw.get_hadjustment()
hadj.set_upper(hadj.get_upper() * 1.2)
vadj = self._main_area.sw.get_vadjustment()
vadj.set_upper(vadj.get_upper() * 1.2)
self._main_area.invalidate()
def __zoom_out_cb(self, button):
stop_editing(self._main_area)
self._main_area.scale_fac /= 1.2
hadj = self._main_area.sw.get_hadjustment()
hadj.set_upper(hadj.get_upper() / 1.2)
vadj = self._main_area.sw.get_vadjustment()
vadj.set_upper(vadj.get_upper() / 1.2)
self._main_area.invalidate()
def __zoom_original_cb(self, button):
stop_editing(self._main_area)
self._main_area.scale_fac = 1.0
self._main_area.translation[0] = 0
self._main_area.translation[1] = 0
hadj = self._main_area.sw.get_hadjustment()
hadj.set_lower(0)
hadj.set_upper(max(Gdk.Screen.width(), Gdk.Screen.height()))
vadj = self._main_area.sw.get_vadjustment()
vadj.set_lower(0)
vadj.set_upper(max(Gdk.Screen.width(), Gdk.Screen.height()))
self._main_area.invalidate()
def __zoom_tofit_cb(self, button):
stop_editing(self._main_area)
bounds = self.__get_thought_bounds()
self._main_area.translation[0] = bounds['x']
self._main_area.translation[1] = bounds['y']
self._main_area.scale_fac = bounds['scale']
hadj = self._main_area.sw.get_hadjustment()
hadj.set_lower(0)
hadj.set_upper(max(Gdk.Screen.width(),
Gdk.Screen.height()) * bounds['scale'])
vadj = self._main_area.sw.get_vadjustment()
vadj.set_lower(0)
vadj.set_upper(max(Gdk.Screen.width(),
Gdk.Screen.height()) * bounds['scale'])
self._main_area.invalidate()
def __get_thought_bounds(self):
if len(self._main_area.thoughts) == 0:
self._main_area.scale_fac = 1.0
self._main_area.translation[0] = 0
self._main_area.translation[1] = 0
self._main_area.invalidate()
return {'x': 0, 'y': 0, 'scale': 1.0}
# Find thoughts extent
left = right = upper = lower = None
for t in self._main_area.thoughts:
if right == None or t.lr[0] > right:
right = t.lr[0]
if lower == None or t.lr[1] > lower:
lower = t.lr[1]
if left == None or t.ul[0] < left:
left = t.ul[0]
if upper == None or t.ul[1] < upper:
upper = t.ul[1]
width = right - left
height = lower - upper
'''
screen_width = self._main_area.window.get_geometry()[2]
screen_height = self._main_area.window.get_geometry()[3]
'''
screen_width = Gdk.Screen.width()
screen_height = Gdk.Screen.height() - style.GRID_CELL_SIZE
overlap = (width - screen_width, height - screen_height)
width_scale = float(screen_width) / (width * 1.1)
height_scale = float(screen_height) / (height * 1.1)
return {'x': (screen_width / 2.0) - (width / 2.0 + left),
'y': (screen_height / 2.0) - (height / 2.0 + upper) + \
style.GRID_CELL_SIZE,
'scale': min(width_scale, height_scale)}
class TextAttributesToolbar(Gtk.Toolbar):
def __init__(self, main_area):
Gtk.Toolbar.__init__(self)
self._main_area = main_area
self._font_list = ['ABC123', 'Sans', 'Serif', 'Monospace', 'Symbol']
self._font_sizes = ['8', '9', '10', '11', '12', '14', '16', '20',
'22', '24', '26', '28', '36', '48', '72']
self.font_button = ToolButton('font-text')
self.font_button.set_tooltip(_('Select font'))
self.font_button.connect('clicked', self.__font_selection_cb)
self.insert(self.font_button, -1)
self._setup_font_palette()
self.insert(Gtk.SeparatorToolItem(), -1)
self.font_size_up = ToolButton('resize+')
self.font_size_up.set_tooltip(_('Bigger'))
self.font_size_up.connect('clicked', self.__font_sizes_cb, True)
self.insert(self.font_size_up, -1)
if len(self._main_area.selected) > 0:
font_size = self._main_area.font_size
else:
font_size = utils.default_font_size
self.size_label = Gtk.Label(str(font_size))
self.size_label.show()
toolitem = Gtk.ToolItem()
toolitem.add(self.size_label)
toolitem.show()
self.insert(toolitem, -1)
self.font_size_down = ToolButton('resize-')
self.font_size_down.set_tooltip(_('Smaller'))
self.font_size_down.connect('clicked', self.__font_sizes_cb, False)
self.insert(self.font_size_down, -1)
self.insert(Gtk.SeparatorToolItem(), -1)
self.bold = ToolButton('bold-text')
self.bold.set_tooltip(_('Bold'))
self.bold.connect('clicked', self.__bold_cb)
self.insert(self.bold, -1)
self.italics = ToolButton('italics-text')
self.italics.set_tooltip(_('Italics'))
self.italics.connect('clicked', self.__italics_cb)
self.insert(self.italics, -1)
self.underline = ToolButton('underline-text')
self.underline.set_tooltip(_('Underline'))
self.underline.connect('clicked', self.__underline_cb)
self.insert(self.underline, -1)
foreground_color = ColorToolButton()
foreground_color.set_title(_('Set font color'))
foreground_color.connect('color-set', self.__foreground_color_cb)
self.insert(foreground_color, -1)
bakground_color = ColorToolButton()
bakground_color.set_title(_('Set background color'))
bakground_color.connect('color-set', self.__background_color_cb)
bakground_color.set_color(Gdk.Color(65535, 65535, 65535))
self.insert(bakground_color, -1)
self.show_all()
def __font_selection_cb(self, widget):
if self._font_palette:
if not self._font_palette.is_up():
self._font_palette.popup(immediate=True,
state=self._font_palette.SECONDARY)
else:
self._font_palette.popdown(immediate=True)
return
def _init_font_list(self):
self._font_white_list = []
self._font_white_list.extend(DEFAULT_FONTS)
# check if there are a user configuration file
if not os.path.exists(USER_FONTS_FILE_PATH):
# verify if exists a file in /etc
if os.path.exists(GLOBAL_FONTS_FILE_PATH):
shutil.copy(GLOBAL_FONTS_FILE_PATH, USER_FONTS_FILE_PATH)
if os.path.exists(USER_FONTS_FILE_PATH):
# get the font names in the file to the white list
fonts_file = open(USER_FONTS_FILE_PATH)
# get the font names in the file to the white list
for line in fonts_file:
self._font_white_list.append(line.strip())
# monitor changes in the file
gio_fonts_file = Gio.File.new_for_path(USER_FONTS_FILE_PATH)
self.monitor = gio_fonts_file.monitor_file(0, None)
self.monitor.set_rate_limit(5000)
self.monitor.connect('changed', self._reload_fonts)
def _reload_fonts(self, monitor, gio_file, other_file, event):
if event != Gio.FileMonitorEvent.CHANGES_DONE_HINT:
return
self._font_white_list = []
self._font_white_list.extend(DEFAULT_FONTS)
fonts_file = open(USER_FONTS_FILE_PATH)
for line in fonts_file:
self._font_white_list.append(line.strip())
# update the menu
for child in self._font_palette.menu.get_children():
self._font_palette.menu.remove(child)
child = None
context = self.get_pango_context()
tmp_list = []
for family in context.list_families():
name = family.get_name()
if name in self._font_white_list:
tmp_list.append(name)
for font in sorted(tmp_list):
menu_item = MyMenuItem(image=FontImage(font.replace(' ', '-')),
text_label=font)
menu_item.connect('activate', self.__font_selected_cb, font)
self._font_palette.menu.append(menu_item)
menu_item.show()
return False
def _setup_font_palette(self):
self._init_font_list()
context = self._main_area.pango_context
for family in context.list_families():
name = Pango.FontDescription(family.get_name()).to_string()
if name not in self._font_list and \
name in self._font_white_list:
self._font_list.append(name)
self._font_palette = self.font_button.get_palette()
for font in sorted(self._font_list):
menu_item = MyMenuItem(image=FontImage(font.replace(' ', '-')),
text_label=font)
menu_item.connect('activate', self.__font_selected_cb, font)
self._font_palette.menu.append(menu_item)
menu_item.show()
def __font_selected_cb(self, widget, font_name):
if not hasattr(self._main_area, 'font_name'):
return
if len(self._main_area.selected) > 0:
font_size = self._main_area.font_size
else:
font_size = utils.default_font_size
self._main_area.set_font(font_name, font_size)
self._main_area.font_name = font_name
self._main_area.font_size = font_size
def __attribute_values(self):
thought = self._main_area.selected[0]
return thought.attributes.copy()
def __font_sizes_cb(self, button, increase):
if not hasattr(self._main_area, 'font_size'):
return
if len(self._main_area.selected) < 1:
return
font_size = self._main_area.font_size
if font_size in self._font_sizes:
i = self._font_sizes.index(font_size)
if increase:
if i < len(self._font_sizes) - 2:
i += 1
else:
if i > 0:
i -= 1
else:
i = self._font_sizes.index(utils.default_font_size)
font_size = self._font_sizes[i]
self.size_label.set_text(str(font_size))
self.font_size_down.set_sensitive(i != 0)
self.font_size_up.set_sensitive(i < len(self._font_sizes) - 2)
self._main_area.set_font(self._main_area.font_name, font_size)
def __bold_cb(self, button):
if len(self._main_area.selected) < 1:
return
value = not self.__attribute_values()["bold"]
self._main_area.set_bold(value)
def __italics_cb(self, button):
if len(self._main_area.selected) < 1:
return
value = not self.__attribute_values()["italic"]
self._main_area.set_italics(value)
def __underline_cb(self, button):
if len(self._main_area.selected) < 1:
return
value = not self.__attribute_values()["underline"]
self._main_area.set_underline(value)
def __foreground_color_cb(self, button):
color = button.get_color()
self._main_area.set_foreground_color(color)
def __background_color_cb(self, button):
color = button.get_color()
self._parent._main_area.set_background_color(color)
def change_active_font(self):
# TODO: update the toolbar
return
class ThoughtsToolbar(Gtk.Toolbar):
def __init__(self, parent):
Gtk.Toolbar.__init__(self)
self._parent = parent
text_mode_btn = RadioToolButton(icon_name='text-mode')
text_mode_btn.set_tooltip(_('Text mode'))
text_mode_btn.set_accelerator(_('<ctrl>t'))
text_mode_btn.connect('clicked', self._parent.mode_cb,
MMapArea.MODE_TEXT)
self.insert(text_mode_btn, -1)
image_mode_btn = RadioToolButton(icon_name='image-mode', group=text_mode_btn)
image_mode_btn.set_tooltip(_('Image add mode'))
image_mode_btn.set_accelerator(_('<ctrl>i'))
image_mode_btn.connect('clicked', self._parent.mode_cb,
MMapArea.MODE_IMAGE)
self.insert(image_mode_btn, -1)
draw_mode_btn = RadioToolButton(icon_name='draw-mode', group=text_mode_btn)
draw_mode_btn.set_tooltip(_('Drawing mode'))
draw_mode_btn.set_accelerator(_('<ctrl>d'))
draw_mode_btn.connect('clicked', self._parent.mode_cb,
MMapArea.MODE_DRAW)
self.insert(draw_mode_btn, -1)
label_mode_btn = RadioToolButton(icon_name='label-mode', group=text_mode_btn)
label_mode_btn.set_tooltip(_('Label mode'))
label_mode_btn.set_accelerator(_('<ctrl>a'))
label_mode_btn.connect('clicked', self._parent.mode_cb,
MMapArea.MODE_LABEL)
self.insert(label_mode_btn, -1)
self.show_all()
class ActionButtons():
''' This class manages the action buttons that move among toolsbars '''
def __init__(self, parent):
self._main_toolbar = parent.get_toolbar_box().toolbar
self._main_area = parent._main_area
self._erase_button = parent.edit_toolbar.erase_button
self._sw = parent._sw
if HASTOOLBARBOX:
target_toolbar = self._main_toolbar
else:
target_toolbar = self.parent.edit_toolbar
self._mods = RadioToolButton(icon_name='select-mode')
self._mods.set_tooltip(_('Select thoughts'))
self._mods.set_accelerator(_('<ctrl>e'))
self._mods.connect('clicked', parent.mode_cb, MMapArea.MODE_NULL)
target_toolbar.insert(self._mods, -1)
self._link_button = RadioToolButton(icon_name='link', group=self._mods)
self._link_button.set_tooltip(_('Link/unlink two selected thoughts'))
self._link_button.set_accelerator(_('<ctrl>l'))
self._link_button.connect('clicked', self.__link_cb)
target_toolbar.insert(self._link_button, -1)
self.move_button = RadioToolButton(icon_name='move', group=self._mods)
self.move_button.set_tooltip(_('Move selected thoughs'))
self.move_button.set_accelerator(_('<ctrl>m'))
self.move_button.connect('clicked', self.__move_cb)
target_toolbar.insert(self.move_button, -1)
self.drag_button = RadioToolButton(icon_name='drag', group=self._mods)
self.drag_button.set_tooltip(_('Scroll the screen'))
self.drag_button.connect('clicked', self.__drag_cb)
target_toolbar.insert(self.drag_button, -1)
if HASTOOLBARBOX:
self._separator_2 = Gtk.SeparatorToolItem()
self._separator_2.props.draw = False
#self._separator_2.set_size_request(0, -1)
self._separator_2.set_expand(True)
self._separator_2.show()
target_toolbar.insert(self._separator_2, -1)
self._stop_button = StopButton(parent)
target_toolbar.insert(self._stop_button, -1)
def stop_dragging(self):
if self._main_area.is_dragging():
self._main_area.drag_menu_cb(self._sw, False)
def _stop_moving(self):
self._main_area.move_mode = False
def __link_cb(self, widget):
self._stop_moving()
self.stop_dragging()
self._main_area.link_menu_cb()
def __move_cb(self, widget):
self.stop_dragging()
if self._main_area.move_mode:
self._main_area.stop_moving()
else:
self._main_area.start_moving(self.move_button)
self._erase_button.set_sensitive(False)
def __drag_cb(self, widget):
# If we were moving, stop
self._stop_moving()
if not self._main_area.is_dragging():
self._main_area.drag_menu_cb(self._sw, True)
else:
self.stop_dragging()
self._erase_button.set_sensitive(False)
def reconfigure(self):
''' If screen width has changed, we may need to reconfigure
the toolbars '''
if not HASTOOLBARBOX:
return
if hasattr(self, '_separator_2'):
if Gdk.Screen.width() / 13 > style.GRID_CELL_SIZE:
if self._separator_2.get_parent() is None:
self._main_toolbar.remove(self._stop_button)
self._main_toolbar.insert(self._separator_2, -1)
self._main_toolbar.insert(self._stop_button, -1)
else:
self._main_toolbar.remove(self._separator_2)
class LabyrinthActivity(activity.Activity):
def __init__(self, handle):
activity.Activity.__init__(self, handle)
if HASTOOLBARBOX:
self.max_participants = 1
toolbar_box = ToolbarBox()
self.set_toolbar_box(toolbar_box)
activity_button = ActivityToolbarButton(self)
toolbar_box.toolbar.insert(activity_button, 0)
separator = Gtk.SeparatorToolItem()
separator.props.draw = True
activity_button.props.page.insert(separator, -1)
separator.show()
tool = ToolButton('pdf-export')
tool.set_tooltip(_('Portable Document Format (PDF)'))
tool.connect('clicked', self.__export_pdf_cb)
activity_button.props.page.insert(tool, -1)
tool.show()
tool = ToolButton('png-export')
tool.set_tooltip(_('Portable Network Graphic (PNG)'))
tool.connect('clicked', self.__export_png_cb)
activity_button.props.page.insert(tool, -1)
tool.show()
tool = ToolbarButton()
self.edit_toolbar = EditToolbar(self)
tool.props.page = self.edit_toolbar
tool.props.icon_name = 'toolbar-edit'
##tool.props.label = _('Edit'),
toolbar_box.toolbar.insert(tool, -1)
#self._undo = UndoManager.UndoManager(self,
# self.edit_toolbar.undo.child,
# self.edit_toolbar.redo.child)
self._undo = UndoManager.UndoManager(self,
self.edit_toolbar.undo,
self.edit_toolbar.redo)
self.__build_main_canvas_area()
tool = ToolbarButton()
tool.props.page = ViewToolbar(self._main_area)
tool.props.icon_name = 'toolbar-view'
tool.props.label = _('View')
toolbar_box.toolbar.insert(tool, -1)
tool = ToolbarButton()
self.text_format_toolbar = TextAttributesToolbar(self._main_area)
tool.props.page = self.text_format_toolbar
tool.props.icon_name = 'toolbar-text'
tool.props.label = _('Text')
toolbar_box.toolbar.insert(tool, -1)
# self._main_area.set_text_attributes(self.text_format_toolbar)
self.thought_toolbar = ToolbarButton()
self.thought_toolbar.props.page = ThoughtsToolbar(self)
self.thought_toolbar.props.icon_name = 'thought'
self.thought_toolbar.props.label = _('Thought Type')
toolbar_box.toolbar.insert(self.thought_toolbar, -1)
self.action_buttons = ActionButtons(self)
toolbar_box.show_all()
else:
# Use old <= 0.84 toolbar design
toolbox = activity.ActivityToolbox(self)
self.set_toolbox(toolbox)
activity_toolbar = toolbox.get_activity_toolbar()
keep_palette = activity_toolbar.keep.get_palette()
menu_item = MenuItem(_('Portable Document Format (PDF)'))
menu_item.connect('activate', self.__export_pdf_cb)
keep_palette.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(_('Portable Network Graphic (PNG)'))
menu_item.connect('activate', self.__export_png_cb)
keep_palette.menu.append(menu_item)
menu_item.show()
self.edit_toolbar = EditToolbar(self)
toolbox.add_toolbar(_('Edit'), self.edit_toolbar)
separator = Gtk.SeparatorToolItem()
self.edit_toolbar.insert(separator, 0)
self.edit_toolbar.show()
self._undo = UndoManager.UndoManager(self,
self.edit_toolbar.undo.child,
self.edit_toolbar.redo.child)
self.__build_main_canvas_area()
view_toolbar = ViewToolbar(self._main_area)
toolbox.add_toolbar(_('View'), view_toolbar)
activity_toolbar = toolbox.get_activity_toolbar()
activity_toolbar.share.props.visible = False
toolbox.set_current_toolbar(1)
self.show_all()
self.__configure_cb(None)
self._mode = MMapArea.MODE_TEXT
self._main_area.set_mode(self._mode)
self.set_focus_child(self._main_area)
def __build_main_canvas_area(self):
self.fixed = Gtk.Fixed()
self.fixed.show()
self.set_canvas(self.fixed)
self._vbox = Gtk.VBox()
self._vbox.set_size_request(
Gdk.Screen.width(),
Gdk.Screen.height() - style.GRID_CELL_SIZE)
self._main_area = MMapArea.MMapArea(self._undo)
self._undo.block()
self._main_area.set_size_request(
max(Gdk.Screen.width(), Gdk.Screen.height()),
max(Gdk.Screen.width(), Gdk.Screen.height()))
self._main_area.show()
self._main_area.connect("set_focus", self.__main_area_focus_cb)
self._main_area.connect("button-press-event",
self.__main_area_focus_cb)
self._main_area.connect("draw", self.__expose)
self._main_area.connect("text_selection_changed",
self.__text_selection_cb)
self._main_area.connect("thought_selection_changed",
self.__thought_selected_cb)
Gdk.Screen.get_default().connect('size-changed',
self.__configure_cb)
self._sw = Gtk.ScrolledWindow()
self._sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self._sw.add_with_viewport(self._main_area)
self._vbox.pack_end(self._sw, True, True, 0)
self._sw.show()
self._main_area.show()
self._vbox.show()
self.fixed.put(self._vbox, 0, 0)
self.hadj = self._sw.get_hadjustment()
self.hadj.connect("value_changed", self._hadj_adjusted_cb,
self.hadj)
self.vadj = self._sw.get_vadjustment()
self.vadj.connect("value_changed", self._vadj_adjusted_cb,
self.vadj)
self._main_area.drag_menu_cb(self._sw, True)
self._main_area.drag_menu_cb(self._sw, False)
self._undo.unblock()
def _hadj_adjusted_cb(self, adj, data=None):
self._main_area.hadj = adj.get_value()
stop_editing(self._main_area)
def _vadj_adjusted_cb(self, adj, data=None):
self._main_area.vadj = adj.get_value()
stop_editing(self._main_area)
def __configure_cb(self, event):
''' Screen size has changed '''
self._vbox.set_size_request(
Gdk.Screen.width(),
Gdk.Screen.height() - style.GRID_CELL_SIZE)
self._vbox.show()
self.action_buttons.reconfigure()
self.show_all()
def __text_selection_cb(self, thought, start, end, text):
"""Update state of edit buttons based on text selection
"""
self.__change_erase_state(True)
if start != end:
self.__change_copy_state(True)
self.text_format_toolbar.change_active_font()
else:
self.__change_copy_state(False)
if self._mode == (MMapArea.MODE_TEXT and
len(self._main_area.selected)):
# With textview, we are always editing
# and self._main_area.selected[0].editing):
self.__change_paste_state(True)
else:
self.__change_paste_state(False)
# TODO: implement copy/paste for a whole thought or thoughts
def __thought_selected_cb(self, arg, background_color, foreground_color):
"""Disable copy button if whole thought object is selected
"""
self.__change_erase_state(True)
self.__change_copy_state(False)
self.__change_paste_state(False)
def __change_erase_state(self, state):
self.edit_toolbar.erase_button.set_sensitive(state)
def __change_copy_state(self, state):
self.edit_toolbar.copy.set_sensitive(state)
def __change_paste_state(self, state):
self.edit_toolbar.paste.set_sensitive(state)
def __expose(self, widget, context):
"""Create canvas hint message at start
"""
thought_count = len(self._main_area.thoughts)
if thought_count > 0:
return False
pango_context = self._main_area.pango_context
layout = Pango.Layout(pango_context)
context.set_source_rgb(0.6, 0.6, 0.6)
context.set_line_width(4.0)
context.set_dash([10.0, 5.0], 0.0)
geom = list(self._main_area.window.get_geometry())
geom[3] = geom[3] - ((self.get_window().get_geometry()[3] - geom[3]) / 2)
# Make sure initial thought is "above the fold"
if geom[2] < geom[3]:
xf = 2
yf = 4
else:
xf = 4
yf = 2
layout.set_alignment(Pango.Alignment.CENTER)
text = _('Click to add\ncentral thought')
layout.set_text(text, len(text))
width, height = layout.get_pixel_size()
context.rectangle(geom[2] / xf - (width / 2), geom[3] / yf - (height / 2), layout.get_width(), layout.get_height())
PangoCairo.show_layout(context, layout)
round = 40
ul = (geom[2] / xf - (width / 2) - round,
geom[3] / yf - (height / 2) - round)
lr = (geom[2] / xf + (width / 2) + round,
geom[3] / yf + (height / 2) + round)
context.move_to(ul[0], ul[1] + round)
context.line_to(ul[0], lr[1] - round)
context.curve_to(ul[0], lr[1], ul[0], lr[1], ul[0] + round, lr[1])
context.line_to(lr[0] - round, lr[1])
context.curve_to(lr[0], lr[1], lr[0], lr[1], lr[0], lr[1] - round)
context.line_to(lr[0], ul[1] + round)
context.curve_to(lr[0], ul[1], lr[0], ul[1], lr[0] - round, ul[1])
context.line_to(ul[0] + round, ul[1])
context.curve_to(ul[0], ul[1], ul[0], ul[1], ul[0], ul[1] + round)
context.stroke()
return False
def __centre(self):
bounds = self.__get_thought_bounds()
self._main_area.translation[0] = bounds['x']
self._main_area.translation[1] = bounds['y']
self._main_area.invalidate()
return False
def mode_cb(self, button, mode):
self.action_buttons.stop_dragging()
if self._mode == MMapArea.MODE_TEXT:
if len(self._main_area.selected) > 0:
self._main_area.selected[0].leave()
self._mode = mode
self._main_area.set_mode(self._mode)
# self.edit_toolbar.erase_button.set_sensitive(True)
def __export_pdf_cb(self, event):
maxx, maxy = self._main_area.get_max_area()
true_width = int(maxx)
true_height = int(maxy)
# Create the new journal entry
fileObject = datastore.create()
act_meta = self.metadata
fileObject.metadata['title'] = act_meta['title'] + ' (PDF)'
fileObject.metadata['title_set_by_user'] = \
act_meta['title_set_by_user']
fileObject.metadata['mime_type'] = 'application/pdf'
# TODO: add text thoughts into fulltext metadata
# fileObject.metadata['fulltext'] = ...
fileObject.metadata['icon-color'] = act_meta['icon-color']
fileObject.file_path = os.path.join(self.get_activity_root(),
'instance', '%i' % time.time())
filename = fileObject.file_path
surface = cairo.PDFSurface(filename, true_width, true_height)
cairo_context = cairo.Context(surface)
context = Pango.create_context(cairo_context)
self._main_area.export(context, true_width, true_height, False)
surface.finish()
datastore.write(fileObject, transfer_ownership=True)
fileObject.destroy()
del fileObject
def __export_png_cb(self, event):
x, y, w, h, bitdepth = self._main_area.window.get_geometry()
cmap = self._main_area.window.get_colormap()
maxx, maxy = self._main_area.get_max_area()
true_width = int(maxx)
true_height = int(maxy)
# Create the new journal entry
fileObject = datastore.create()
act_meta = self.metadata
fileObject.metadata['title'] = act_meta['title'] + ' (PNG)'
fileObject.metadata['title_set_by_user'] = \
act_meta['title_set_by_user']
fileObject.metadata['mime_type'] = 'image/png'
fileObject.metadata['icon-color'] = act_meta['icon-color']
fileObject.file_path = os.path.join(self.get_activity_root(),
'instance', '%i' % time.time())
filename = fileObject.file_path
#pixmap = gtk.gdk.Pixmap(None, true_width, true_height, bitdepth)
#pixmap.set_colormap(cmap)
#self._main_area.export(pixmap.cairo_create(), true_width, true_height,
# False)
#pb = gtk.gdk.Pixbuf.get_from_drawable(
# gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True, 8, true_width,
# true_height),
# pixmap, gtk.gdk.colormap_get_system(), 0, 0, 0, 0, true_width,
# true_height)
pb.save(filename, 'png')
datastore.write(fileObject, transfer_ownership=True)
fileObject.destroy()
del fileObject
def __main_area_focus_cb(self, arg, event, extended=False):
# Don't steal focus from textview
# self._main_area.grab_focus()
pass
def read_file(self, file_path):
tar = Tarball(file_path)
doc = dom.parseString(tar.read(tar.getnames()[0]))
top_element = doc.documentElement
self.set_title(top_element.getAttribute("title"))
self._mode = int(top_element.getAttribute("mode"))
self._main_area.set_mode(self._mode)
self._main_area.load_thyself(top_element, doc, tar)
if top_element.hasAttribute("scale_factor"):
fac = float(top_element.getAttribute("scale_factor"))
self._main_area.scale_fac = fac
if top_element.hasAttribute("translation"):
tmp = top_element.getAttribute("translation")
x, y = utils.parse_coords(tmp)
self._main_area.translation = [x, y]
tar.close()
def write_file(self, file_path):
tar = Tarball(file_path, 'w')
self._main_area.update_save()
manifest = self.serialize_to_xml(self._main_area.save,
self._main_area.element)
tar.write('MANIFEST', manifest)
self._main_area.save_thyself(tar)
tar.close()
def serialize_to_xml(self, doc, top_element):
top_element.setAttribute("title", self.props.title)
top_element.setAttribute("mode", str(self._mode))
top_element.setAttribute("size", str((400, 400)))
top_element.setAttribute("position", str((0, 0)))
top_element.setAttribute("maximised", str(True))
top_element.setAttribute("view_type", str(0))
top_element.setAttribute("pane_position", str(500))
top_element.setAttribute("scale_factor",
str(self._main_area.scale_fac))
top_element.setAttribute("translation",
str(self._main_area.translation))
string = doc.toxml()
return string.encode("utf-8")
|
gpl-2.0
| -8,125,047,139,919,657,000 | 36.151174 | 123 | 0.580397 | false |
wkschwartz/django
|
django/utils/http.py
|
1
|
17700
|
import base64
import calendar
import datetime
import re
import unicodedata
import warnings
from binascii import Error as BinasciiError
from email.utils import formatdate
from urllib.parse import (
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, quote,
quote_plus, scheme_chars, unquote, unquote_plus,
urlencode as original_urlencode, uses_params,
)
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.functional import keep_lazy_text
from django.utils.regex_helper import _lazy_re_compile
# based on RFC 7232, Appendix C
ETAG_MATCH = _lazy_re_compile(r'''
\A( # start of string and capture group
(?:W/)? # optional weak indicator
" # opening quote
[^"]* # any sequence of non-quote characters
" # end quote
)\Z # end of string and capture group
''', re.X)
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = _lazy_re_compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = _lazy_re_compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = _lazy_re_compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = ":/?#[]@"
RFC3986_SUBDELIMS = "!$&'()*+,;="
@keep_lazy_text
def urlquote(url, safe='/'):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote() function.
(was used for unicode handling on Python 2)
"""
warnings.warn(
'django.utils.http.urlquote() is deprecated in favor of '
'urllib.parse.quote().',
RemovedInDjango40Warning, stacklevel=2,
)
return quote(url, safe)
@keep_lazy_text
def urlquote_plus(url, safe=''):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote_plus()
function. (was used for unicode handling on Python 2)
"""
warnings.warn(
'django.utils.http.urlquote_plus() is deprecated in favor of '
'urllib.parse.quote_plus(),',
RemovedInDjango40Warning, stacklevel=2,
)
return quote_plus(url, safe)
@keep_lazy_text
def urlunquote(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote() function.
(was used for unicode handling on Python 2)
"""
warnings.warn(
'django.utils.http.urlunquote() is deprecated in favor of '
'urllib.parse.unquote().',
RemovedInDjango40Warning, stacklevel=2,
)
return unquote(quoted_url)
@keep_lazy_text
def urlunquote_plus(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote_plus()
function. (was used for unicode handling on Python 2)
"""
warnings.warn(
'django.utils.http.urlunquote_plus() is deprecated in favor of '
'urllib.parse.unquote_plus().',
RemovedInDjango40Warning, stacklevel=2,
)
return unquote_plus(quoted_url)
def urlencode(query, doseq=False):
"""
A version of Python's urllib.parse.urlencode() function that can operate on
MultiValueDict and non-string values.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
query_params = []
for key, value in query:
if value is None:
raise TypeError(
"Cannot encode None for key '%s' in a query string. Did you "
"mean to pass an empty string or omit the value?" % key
)
elif not doseq or isinstance(value, (str, bytes)):
query_val = value
else:
try:
itr = iter(value)
except TypeError:
query_val = value
else:
# Consume generators and iterators, when doseq=True, to
# work around https://bugs.python.org/issue31706.
query_val = []
for item in itr:
if item is None:
raise TypeError(
"Cannot encode None for key '%s' in a query "
"string. Did you mean to pass an empty string or "
"omit the value?" % key
)
elif not isinstance(item, bytes):
item = str(item)
query_val.append(item)
query_params.append((key, query_val))
return original_urlencode(query_params, doseq)
def http_date(epoch_seconds=None):
"""
Format the time to match the RFC1123 date format as specified by HTTP
RFC7231 section 7.1.1.1.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
"""
# email.utils.parsedate() does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m['year'])
if year < 100:
current_year = datetime.datetime.utcnow().year
current_century = current_year - (current_year % 100)
if year - (current_year % 100) > 50:
# year that appears to be more than 50 years in the future are
# interpreted as representing the past.
year += current_century - 100
else:
year += current_century
month = MONTHS.index(m['mon'].lower()) + 1
day = int(m['day'])
hour = int(m['hour'])
min = int(m['min'])
sec = int(m['sec'])
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def parse_http_date_safe(date):
"""
Same as parse_http_date, but return None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encode a bytestring to a base64 string for use in URLs. Strip any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=').decode('ascii')
def urlsafe_base64_decode(s):
"""
Decode a base64 encoded string. Add back any trailing equal signs that
might have been stripped.
"""
s = s.encode()
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parse a string of ETags given in an If-None-Match or If-Match header as
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags
should be matched.
"""
if etag_str.strip() == '*':
return ['*']
else:
# Parse each ETag individually, and return any that are valid.
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))
return [match[1] for match in etag_matches if match]
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):
"""
Return ``True`` if the url uses an allowed host and a safe scheme.
Always return ``False`` on an empty url.
If ``require_https`` is ``True``, only 'https' will be considered a valid
scheme, as opposed to 'http' and 'https' with the default, ``False``.
Note: "True" doesn't entail that a URL is "safe". It may still be e.g.
quoted incorrectly. Ensure to also use django.utils.encoding.iri_to_uri()
on the path component of untrusted URLs.
"""
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
elif isinstance(allowed_hosts, str):
allowed_hosts = {allowed_hosts}
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return (
_url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=require_https) and
_url_has_allowed_host_and_scheme(url.replace('\\', '/'), allowed_hosts, require_https=require_https)
)
def is_safe_url(url, allowed_hosts, require_https=False):
warnings.warn(
'django.utils.http.is_safe_url() is deprecated in favor of '
'url_has_allowed_host_and_scheme().',
RemovedInDjango40Warning, stacklevel=2,
)
return url_has_allowed_host_and_scheme(url, allowed_hosts, require_https)
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
def _urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = _urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
# Copied from urllib.parse.urlsplit() with
# https://github.com/python/cpython/pull/661 applied.
def _urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
return _coerce_result(v)
def _url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
try:
url_info = _urlparse(url)
except ValueError: # e.g. invalid IPv6 addresses
return False
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
scheme = url_info.scheme
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
if not url_info.scheme and url_info.netloc:
scheme = 'http'
valid_schemes = ['https'] if require_https else ['http', 'https']
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and
(not scheme or scheme in valid_schemes))
# TODO: Remove when dropping support for PY37.
def parse_qsl(
qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8',
errors='replace', max_num_fields=None, separator='&',
):
"""
Return a list of key/value tuples parsed from query string.
Backport of urllib.parse.parse_qsl() from Python 3.8.8.
Copyright (C) 2021 Python Software Foundation (see LICENSE.python).
----
Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If false
(the default), errors are silently ignored. If true, errors raise a
ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
max_num_fields: int. If set, then throws a ValueError if there are more
than n fields read by parse_qsl().
separator: str. The symbol to use for separating the query arguments.
Defaults to &.
Returns a list, as G-d intended.
"""
qs, _coerce_result = _coerce_args(qs)
if not separator or not isinstance(separator, (str, bytes)):
raise ValueError('Separator must be of type string or bytes.')
# If max_num_fields is defined then check that the number of fields is less
# than max_num_fields. This prevents a memory exhaustion DOS attack via
# post bodies with many fields.
if max_num_fields is not None:
num_fields = 1 + qs.count(separator)
if max_num_fields < num_fields:
raise ValueError('Max number of fields exceeded')
pairs = [s1 for s1 in qs.split(separator)]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign.
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def escape_leading_slashes(url):
"""
If redirecting to an absolute path (two leading slashes), a slash must be
escaped to prevent browsers from handling the path as schemaless and
redirecting to another host.
"""
if url.startswith('//'):
url = '/%2F{}'.format(url[2:])
return url
|
bsd-3-clause
| -1,610,993,750,533,662,700 | 34.329341 | 108 | 0.621356 | false |
Ignalion/bookshelf
|
app/endpoints/booksauthors.py
|
1
|
5914
|
"""
Here are defined all book related views as below:
BookList
AuthorList
AddEditBook
AddEditAuthor
"""
from flask import (
render_template,
request,
redirect,
url_for,
views,
)
from flask_login import login_required, current_user
from app.lib.abstract import BookAbstraction, AuthorAbstraction
from app.forms import (
AddEditBookForm,
AddEditAuthorForm,
BookListForm,
AuthorListForm
)
class BookList(views.View):
""" Lists all the book for current logged in user """
methods = ('GET', 'POST')
@login_required
def dispatch_request(self, t="booklist.html"):
book_mgr = BookAbstraction()
books = book_mgr.get_book_list(current_user)
form = BookListForm()
if request.method == 'GET':
for book_obj in books:
form.books.append_entry(book_obj)
# Dirty hack!
form.books[-1].book_id.data = book_obj.id
if request.method == 'POST':
target = filter(
lambda book: any((book.data['edit'], book.data['delete'])),
form.books)[0]
if target.edit.data:
return redirect(url_for('editbook',
book_id=target.data['book_id']))
elif target.delete.data:
book_mgr.delete(id=int(target.data['book_id']))
return redirect(url_for('booklist'))
return render_template(t,
form=form,
page_title='List of books',
user=current_user)
class AuthorList(views.View):
""" Lists all the authors for current logged in user """
methods = ('GET', 'POST')
@login_required
def dispatch_request(self, author_id=None, t="authorlist.html"):
author_mgr = AuthorAbstraction()
authors = author_mgr.get_author_list(current_user)
form = AuthorListForm()
if request.method == 'GET':
for author_obj in authors:
form.authors.append_entry(author_obj)
# And another one!
form.authors[-1].author_id.data = author_obj.id
if request.method == 'POST':
target = filter(
lambda author: any((author.data['edit'],
author.data['delete'])),
form.authors)[0]
if target.edit.data:
return redirect(url_for('editauthor',
author_id=target.data['author_id']))
elif target.delete.data:
author_mgr.delete(id=int(target.data['author_id']))
return redirect(url_for('authorlist'))
return render_template(t,
form=form,
page_title='List of authors',
user=current_user)
class AddEditBook(views.View):
methods = ('GET', 'POST')
@login_required
def dispatch_request(self, book_id=None, t="addbook.html"):
book_mgr = BookAbstraction()
form = AddEditBookForm(request.form, current_user)
form.authors.choices = [
(str(a.id), a.name) for a in current_user.authors
]
page_title = 'Add book'
if book_id is not None and request.method == 'GET':
book = current_user.books.filter(
book_mgr.model.id == book_id).one()
form.authors.default = [str(a.id) for a in book.authors]
form.process()
form.new_book.data = book.title
form.submit.label.text = 'Edit book'
page_title = 'Edit book'
if request.method == 'POST':
if book_id is not None:
# In case if validation fails we need to change title and label
# again because Flask would re-render the whole form
form.submit.label.text = 'Edit book'
page_title = 'Edit book'
if form.validate_on_submit():
book = {
'title': form.new_book.data,
'authors': form.authors.data,
'id': book_id
}
book_mgr.add_edit_book(current_user, book)
return redirect(url_for('booklist'))
return render_template(t,
form=form,
page_title=page_title,
user=current_user)
class AddEditAuthor(views.View):
methods = ('GET', 'POST')
@login_required
def dispatch_request(self, author_id=None, t="addauthor.html"):
author_mgr = AuthorAbstraction()
form = AddEditAuthorForm(request.form, current_user)
page_title = 'Add author'
if author_id is not None and request.method == 'GET':
author = current_user.authors.filter(
author_mgr.model.id == author_id).one()
form.new_author.data = author.name
form.submit.label.text = 'Edit author'
page_title = 'Edit author'
if request.method == 'POST':
if author_id is not None:
# In case if validation fails we need to change title and label
# again because Flask would re-render the whole form
form.submit.label.text = 'Edit author'
page_title = 'Edit author'
if form.validate_on_submit():
author = {
'name': form.new_author.data,
'id': author_id,
}
author_mgr.add_edit_author(current_user, author)
return redirect(url_for('authorlist'))
return render_template(t,
form=form,
page_title=page_title,
user=current_user)
|
gpl-2.0
| 2,764,888,172,949,385,000 | 32.794286 | 79 | 0.521644 | false |
mollie/mollie-api-python
|
mollie/api/objects/capture.py
|
1
|
1497
|
from .base import Base
class Capture(Base):
@classmethod
def get_resource_class(cls, client):
from ..resources.captures import Captures
return Captures(client)
@property
def id(self):
return self._get_property("id")
@property
def mode(self):
return self._get_property("mode")
@property
def amount(self):
return self._get_property("amount")
@property
def settlement_amount(self):
return self._get_property("settlementAmount")
@property
def payment_id(self):
return self._get_property("paymentId")
@property
def shipment_id(self):
return self._get_property("shipmentId")
@property
def settlement_id(self):
return self._get_property("settlementId")
@property
def created_at(self):
return self._get_property("createdAt")
@property
def payment(self):
"""Return the payment for this capture."""
return self.client.payments.get(self.payment_id)
@property
def shipment(self):
"""Return the shipment for this capture."""
from .shipment import Shipment
url = self._get_link("shipment")
if url:
resp = self.client.orders.perform_api_call(self.client.orders.REST_READ, url)
return Shipment(resp)
@property
def settlement(self):
"""Return the settlement for this capture."""
return self.client.settlements.get(self.settlement_id)
|
bsd-2-clause
| -2,216,945,610,880,563,500 | 23.145161 | 89 | 0.623914 | false |
mitodl/open-discussions
|
sites/migrations/0001_add_authenticated_site.py
|
1
|
2689
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2018-02-09 19:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="AuthenticatedSite",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
(
"key",
models.CharField(
help_text="Key to lookup site in JWT token, must match exactly the key set by the authenticating site",
max_length=20,
),
),
(
"title",
models.CharField(
help_text="Name of site to display in discussions",
max_length=50,
),
),
(
"base_url",
models.URLField(
help_text="Base url / home page for the site (e.g. http://my.site.domain/)",
verbose_name="External Base URL",
),
),
(
"login_url",
models.URLField(
help_text="This url should require a user to login and then redirect back to discussions (e.g. http://my.site.domain/discussions)",
verbose_name="External Login URL",
),
),
(
"session_url",
models.URLField(
help_text="The URL where discussions can request a new session (e.g. http://my.site.domain/discussionsToken)",
verbose_name="External Session URL",
),
),
(
"tos_url",
models.URLField(
help_text="There URL where discussions can link the user to view the site's TOS (e.g. http://my.site.domain/terms-of-service)",
verbose_name="External TOS URL",
),
),
],
options={"abstract": False},
)
]
|
bsd-3-clause
| -8,328,817,798,779,035,000 | 35.337838 | 155 | 0.410933 | false |
kevgliss/historical
|
historical/s3/poller.py
|
1
|
3444
|
"""
.. module: historical.s3.poller
:platform: Unix
:copyright: (c) 2017 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. author:: Mike Grima <mgrima@netflix.com>
"""
import os
import uuid
import logging
import boto3
from botocore.exceptions import ClientError
from cloudaux.aws.s3 import list_buckets
from raven_python_lambda import RavenLambdaWrapper
from historical.constants import CURRENT_REGION, HISTORICAL_ROLE
from historical.s3.models import s3_polling_schema
from historical.common.accounts import get_historical_accounts
logging.basicConfig()
log = logging.getLogger("historical")
log.setLevel(logging.INFO)
def get_record(all_buckets, index, account):
return {
"Data": bytes(s3_polling_schema.serialize_me(account, {
"bucket_name": all_buckets[index]["Name"],
"creation_date": all_buckets[index]["CreationDate"].replace(tzinfo=None, microsecond=0).isoformat() + "Z"
}), "utf-8"),
"PartitionKey": uuid.uuid4().hex
}
def create_polling_event(account, stream):
# Place onto the S3 Kinesis stream each S3 bucket for each account...
# This should probably fan out on an account-by-account basis (we'll need to examine if this is an issue)
all_buckets = list_buckets(account_number=account,
assume_role=HISTORICAL_ROLE,
session_name="historical-cloudwatch-s3list",
region=CURRENT_REGION)["Buckets"]
client = boto3.client("kinesis", region_name=CURRENT_REGION)
# Need to add all buckets into the stream:
limiter = int(os.environ.get("MAX_BUCKET_BATCH", 50))
current_batch = 1
total_batch = int(len(all_buckets) / limiter)
remainder = len(all_buckets) % limiter
offset = 0
while current_batch <= total_batch:
records = []
while offset < (limiter * current_batch):
records.append(get_record(all_buckets, offset, account))
offset += 1
client.put_records(Records=records, StreamName=stream)
current_batch += 1
# Process remainder:
if remainder:
records = []
while offset < len(all_buckets):
records.append(get_record(all_buckets, offset, account))
offset += 1
client.put_records(Records=records, StreamName=stream)
@RavenLambdaWrapper()
def handler(event, context):
"""
Historical S3 event poller.
This poller is run at a set interval in order to ensure that changes do not go undetected by historical.
Historical pollers generate `polling events` which simulate changes. These polling events contain configuration
data such as the account/region defining where the collector should attempt to gather data from.
"""
log.debug('Running poller. Configuration: {}'.format(event))
for account in get_historical_accounts():
# Skip accounts that have role assumption errors:
try:
create_polling_event(account['id'], os.environ.get("HISTORICAL_STREAM", "HistoricalS3PollerStream"))
except ClientError as e:
log.warning('Unable to generate events for account. AccountId: {account_id} Reason: {reason}'.format(
account_id=account['id'],
reason=e
))
log.debug('Finished generating polling events. Events Created: {}'.format(len(account['id'])))
|
apache-2.0
| 8,197,142,188,748,092,000 | 35.638298 | 117 | 0.664634 | false |
cysuncn/python
|
spark/crm/PROC_M_R_RET_CUST_ASSETS.py
|
1
|
4509
|
#coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_M_R_RET_CUST_ASSETS').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
#----------------------------------------------业务逻辑开始----------------------------------------------------------
#源表
OCRM_F_CI_PER_CUST_INFO = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_PER_CUST_INFO/*')
OCRM_F_CI_PER_CUST_INFO.registerTempTable("OCRM_F_CI_PER_CUST_INFO")
OCRM_F_CUST_ORG_MGR = sqlContext.read.parquet(hdfs+'/OCRM_F_CUST_ORG_MGR/*')
OCRM_F_CUST_ORG_MGR.registerTempTable("OCRM_F_CUST_ORG_MGR")
TMP_PER_ASSETS_SUM = sqlContext.read.parquet(hdfs+'/TMP_PER_ASSETS_SUM/*')
TMP_PER_ASSETS_SUM.registerTempTable("TMP_PER_ASSETS_SUM")
#目标表
#MCRM_RET_CUST_ASSETS 全量表
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql="""
SELECT
A.CUST_ID as CUST_ID
,A.CUST_NAME as CUST_ZH_NAME
,C.MGR_ID as CUST_MANAGER
,C.MGR_NAME as CUST_MANAGER_NAME
,C.ORG_ID as ORG_ID
,C.ORG_NAME as ORG_NAME
,A.CUST_SEX as SEX
,cast(case when A.CUST_BIR = '' or A.CUST_BIR is null then 0
when SUBSTR (A.CUST_BIR, 3, 1) = '-' then (SUBSTR(V_DT,1,4) - SUBSTR(A.CUST_BIR, 7, 4))
when SUBSTR (A.CUST_BIR, 5, 1) = '-' then (SUBSTR(V_DT,1,4) - SUBSTR(A.CUST_BIR, 1, 4))
else 0 end as INTEGER) AS AGE
,A.CUST_EDU_LVL_COD as EDUCATION
,C.OBJ_RATING as CUST_LEVEL
,cast(COALESCE(D.MONTH_BAL,0) as DECIMAL(24,6)) as MONTH_BAL
,cast(COALESCE(D.MONTH_AVG_BAL,0) as DECIMAL(24,6)) as MONTH_AVG_BAL
,cast(COALESCE(D.THREE_MONTH_AVG_BAL,0) as DECIMAL(24,6)) as THREE_MONTH_AVG_BAL
,cast(COALESCE(D.LAST_MONTH_BAL,0) as DECIMAL(24,6)) as LAST_MONTH_BAL
,cast(COALESCE(D.LAST_MONTH_AVG_BAL,0) as DECIMAL(24,6)) as LAST_MONTH_AVG_BAL
,cast(COALESCE(D.LTHREE_MONTH_AVG_BAL,0) as DECIMAL(24,6)) as LTHREE_MONTH_AVG_BAL
,cast(COALESCE(D.YEAR_BAL,0) as DECIMAL(24,6)) as YEAR_BAL
,cast(COALESCE(D.YEAR_AVG_BAL,0) as DECIMAL(24,6)) as YEAR_AVG_BAL
,cast(COALESCE(D.YEAR_THREE_AVG_BAL,0) as DECIMAL(24,6)) as YEAR_THREE_AVG_BAL
,V_DT as ST_DATE
,C.M_MAIN_TYPE as MAIN_TYPE
,C.O_MAIN_TYPE as O_MAIN_TYPE
,C.OBJ_DATE as GRADE_DATE
,C.OLD_OBJ_RATING as OLD_CUST_LEVEL
,A.FR_ID as FR_ID
FROM OCRM_F_CI_PER_CUST_INFO A
INNER JOIN OCRM_F_CUST_ORG_MGR C ON A.CUST_ID = C.CUST_ID AND CUST_TYP='1' AND O_MAIN_TYPE ='1' AND C.FR_ID =A.FR_ID
LEFT JOIN TMP_PER_ASSETS_SUM D ON A.CUST_ID = D.CUST_ID AND D.FR_ID =A.FR_ID
-- WHERE A.FR_ID=V_FR_ID
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
MCRM_RET_CUST_ASSETS = sqlContext.sql(sql)
dfn="MCRM_RET_CUST_ASSETS/"+V_DT+".parquet"
MCRM_RET_CUST_ASSETS.write.save(path=hdfs + '/' + dfn, mode='overwrite')
#全量表,保存后需要删除前一天数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/MCRM_RET_CUST_ASSETS/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds)
|
gpl-3.0
| -4,341,465,601,094,905,300 | 45.691489 | 127 | 0.548189 | false |
rsmmr/hilti
|
doc/scripts/hlt.py
|
1
|
5732
|
# -*- coding: utf-8 -*-
"""X
The Hlt domain for Sphinx.
"""
def setup(Sphinx):
Sphinx.add_domain(HltDomain)
from sphinx import addnodes
from sphinx.domains import Domain, ObjType
from sphinx.locale import l_, _
from sphinx.directives import ObjectDescription
from sphinx.roles import XRefRole
from sphinx.util.nodes import make_refnode
from sphinx import version_info
from docutils import nodes
# Wrapper for creating a tuple for index nodes, staying backwards
# compatible to Sphinx < 1.4:
def make_index_tuple(indextype, indexentry, targetname, targetname2):
if version_info >= (1, 4, 0, '', 0):
return (indextype, indexentry, targetname, targetname2, None)
else:
return (indextype, indexentry, targetname, targetname2)
class HltGeneric(ObjectDescription):
def add_target_and_index(self, name, sig, signode):
targetname = self.objtype + '-' + name
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
signode['ids'].append(targetname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['hlt']['objects']
key = (self.objtype, name)
if key in objects:
self.env.warn(self.env.docname,
'duplicate description of %s %s, ' %
(self.objtype, name) +
'other instance in ' +
self.env.doc2path(objects[key]),
self.lineno)
objects[key] = self.env.docname
indextext = self.get_index_text(self.objtype, name)
if indextext:
self.indexnode['entries'].append(make_index_tuple('single', indextext,
targetname, targetname))
def get_index_text(self, objectname, name):
return _('%s (%s)') % (name, self.objtype)
def handle_signature(self, sig, signode):
signode += addnodes.desc_name("", sig)
return sig
class HltInstruction(HltGeneric):
def handle_signature(self, sig, signode):
m = sig.split()
cls = m[0]
name = m[1]
args = m[2:] if len(m) > 2 else []
if len(args) > 1 and args[0] == "target":
args = args[1:] if len(args) > 1 else []
signode += addnodes.desc_addname("", "target = ")
args = " ".join(args)
args = " " + args
desc_name = name
signode += addnodes.desc_name("", desc_name)
if len(args) > 0:
signode += addnodes.desc_addname("", args)
signode += addnodes.desc_addname("", " [%s]" % cls)
return name
class HltType(HltGeneric):
def handle_signature(self, sig, signode):
# Do nothing, we just want an anchor for xrefing.
m = sig.split()
name = m[0]
return name
class HltTypeDef(HltGeneric):
def handle_signature(self, sig, signode):
m = sig.split()
full = m[0]
short = m[1]
ty = m[2]
signode += addnodes.desc_addname("", ty + " ")
signode += addnodes.desc_name("", short)
return full
class HltGlobal(HltGeneric):
def handle_signature(self, sig, signode):
m = sig.split()
full = m[0]
short = m[1]
signode += addnodes.desc_name("", short)
return full
class HltModule(HltGeneric):
def handle_signature(self, sig, signode):
# Do nothing, we just want an anchor for xrefing.
m = sig.split()
name = m[0]
return name
class HltDomain(Domain):
"""Hlt domain."""
name = 'hlt'
label = 'HILTI'
object_types = {
'instruction': ObjType(l_('instruction'), 'ins'),
'operator': ObjType(l_('operator'), 'op'),
'overload': ObjType(l_('overload'), 'ovl'),
'type': ObjType(l_('type'), 'type'),
'typedef': ObjType(l_('typedef'), 'typedef'),
'function': ObjType(l_('function'), 'func'),
'global': ObjType(l_('global'), 'glob'),
'module': ObjType(l_('module'), 'mod'),
}
directives = {
'instruction': HltInstruction,
'operator': HltInstruction,
'overload': HltInstruction,
'type': HltType,
'typedef': HltTypeDef,
'function': HltGeneric,
'global': HltGlobal,
'module': HltModule,
}
roles = {
'ins': XRefRole(),
'op': XRefRole(),
'ovl': XRefRole(),
'type': XRefRole(),
'func': XRefRole(),
'glob': XRefRole(),
'mod': XRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
}
def clear_doc(self, docname):
for (typ, name), doc in self.data['objects'].items():
if doc == docname:
del self.data['objects'][typ, name]
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
contnode):
objects = self.data['objects']
objtypes = self.objtypes_for_role(typ)
for objtype in objtypes:
if (objtype, target) in objects:
return make_refnode(builder, fromdocname,
objects[objtype, target],
objtype + '-' + target,
contnode, target + ' ' + objtype)
def get_objects(self):
for (typ, name), docname in self.data['objects'].items():
yield name, name, typ, docname, typ + '-' + name, 1
|
bsd-3-clause
| -4,610,790,904,439,784,000 | 32.325581 | 82 | 0.532798 | false |
theno/fabsetup
|
fabsetup/fabfile-data/presetting-fabsetup-custom/fabfile_/custom/__init__.py
|
1
|
2213
|
from fabsetup.fabutils import checkup_git_repo_legacy, checkup_git_repos_legacy
from fabsetup.fabutils import install_package, install_packages, flo
from fabsetup.fabutils import install_user_command_legacy, run, suggest_localhost
from fabsetup.fabutils import custom_task as task # here, every task is custom
import config
def users_bin_dir():
'''Put custom commands at '~/bin/'
For the conversion of diagrams into the pdf format:
* dia2pdf, ep2svg, svg2pdf
* alldia2pdf, allep2svg, alldia2pdf
'''
# circumvent circular import, cf. http://stackoverflow.com/a/18486863
from fabfile.setup import pencil2
pencil2() # used by ~/bin/ep2svg
install_packages([
'dia',
'inkscape', # used by ~/bin/svg2pdf
'xsltproc', # used by ~/bin/ep2svg
])
commands = [
'alldia2pdf',
'allep2svg',
'allepgz2ep',
'allsvg2pdf',
'dia2pdf',
'ep2svg',
'epgz2ep',
'greypdf',
'svg2pdf'
]
for command in commands:
install_user_command_legacy(command)
@task
@suggest_localhost
def latex():
'''Install all packages and tools required to compile my latex documents.
* Install or update a lot of latex packages.
* Install or update pencil, dia, inkscape, xsltproc for diagrams and
images.
* Install or update util commands for conversion of dia, ep, svg into pdf
files.
* Checkout or update a haw-thesis template git repository which uses all of
the upper mentioned tools.
'''
users_bin_dir()
# circumvent circular import, cf. http://stackoverflow.com/a/18486863
from fabfile.setup import latex
latex()
checkup_git_repo_legacy(
'https://github.com/theno/haw-inf-thesis-template.git')
@task
@suggest_localhost
def repos():
'''Checkout or update (git) repositories, mostly from github.
The repositories are defined in list 'git_repos' in config.py.
'''
checkup_git_repos_legacy(config.git_repos)
@task
@suggest_localhost
def vim():
'''Set up my vim environment.'''
from fabfile.setup import vim
vim()
checkup_git_repos_legacy(config.vim_package_repos, base_dir='~/.vim/bundle')
|
mit
| 7,198,231,537,723,629,000 | 28.118421 | 81 | 0.66742 | false |
salimfadhley/jenkinsapi
|
jenkinsapi_tests/unittests/test_requester.py
|
2
|
12127
|
from __future__ import print_function
import pytest
import requests
from jenkinsapi.jenkins import Requester
from jenkinsapi.custom_exceptions import JenkinsAPIException
from mock import patch
def test_no_parameters_uses_default_values():
req = Requester()
assert isinstance(req, Requester)
assert req.username is None
assert req.password is None
assert req.ssl_verify
assert req.cert is None
assert req.base_scheme is None
assert req.timeout == 10
def test_all_named_parameters():
req = Requester(username='foo', password='bar', ssl_verify=False,
cert='foobar', baseurl='http://dummy', timeout=5)
assert isinstance(req, Requester)
assert req.username == 'foo'
assert req.password == 'bar'
assert not req.ssl_verify
assert req.cert == 'foobar'
assert req.base_scheme == 'http', 'dummy'
assert req.timeout == 5
def test_mix_one_unnamed_named_parameters():
req = Requester('foo', password='bar', ssl_verify=False, cert='foobar',
baseurl='http://dummy', timeout=5)
assert isinstance(req, Requester)
assert req.username == 'foo'
assert req.password == 'bar'
assert not req.ssl_verify
assert req.cert == 'foobar'
assert req.base_scheme == 'http', 'dummy'
assert req.timeout == 5
def test_mix_two_unnamed_named_parameters():
req = Requester('foo', 'bar', ssl_verify=False, cert='foobar',
baseurl='http://dummy', timeout=5)
assert isinstance(req, Requester)
assert req.username == 'foo'
assert req.password == 'bar'
assert not req.ssl_verify
assert req.cert == 'foobar'
assert req.base_scheme == 'http', 'dummy'
assert req.timeout == 5
def test_mix_three_unnamed_named_parameters():
req = Requester('foo', 'bar', False, cert='foobar', baseurl='http://dummy',
timeout=5)
assert isinstance(req, Requester)
assert req.username == 'foo'
assert req.password == 'bar'
assert not req.ssl_verify
assert req.cert == 'foobar'
assert req.base_scheme == 'http', 'dummy'
assert req.timeout == 5
def test_mix_four_unnamed_named_parameters():
req = Requester('foo', 'bar', False, 'foobar', baseurl='http://dummy',
timeout=5)
assert isinstance(req, Requester)
assert req.username == 'foo'
assert req.password == 'bar'
assert not req.ssl_verify
assert req.cert == 'foobar'
assert req.base_scheme == 'http', 'dummy'
assert req.timeout == 5
def test_mix_five_unnamed_named_parameters():
req = Requester('foo', 'bar', False, 'foobar', 'http://dummy', timeout=5)
assert isinstance(req, Requester)
assert req.username == 'foo'
assert req.password == 'bar'
assert not req.ssl_verify
assert req.cert == 'foobar'
assert req.base_scheme == 'http', 'dummy'
assert req.timeout == 5
def test_all_unnamed_parameters():
req = Requester('foo', 'bar', False, 'foobar', 'http://dummy', 5)
assert isinstance(req, Requester)
assert req.username == 'foo'
assert req.password == 'bar'
assert not req.ssl_verify
assert req.cert == 'foobar'
assert req.base_scheme == 'http', 'dummy'
assert req.timeout == 5
def test_to_much_unnamed_parameters_raises_error():
with pytest.raises(Exception):
Requester('foo', 'bar', False, 'foobar', 'http://dummy', 5, 'test')
def test_username_without_password_raises_error():
with pytest.raises(Exception):
Requester(username='foo')
Requester('foo')
def test_password_without_username_raises_error():
with pytest.raises(AssertionError):
Requester(password='bar')
def test_get_request_dict_auth():
req = Requester('foo', 'bar')
req_return = req.get_request_dict(
params={},
data=None,
headers=None
)
assert isinstance(req_return, dict)
assert req_return.get('auth')
assert req_return['auth'] == ('foo', 'bar')
@patch('jenkinsapi.jenkins.Requester.AUTH_COOKIE', 'FAKE')
def test_get_request_dict_cookie():
req = Requester('foo', 'bar')
req_return = req.get_request_dict(
params={},
data=None,
headers=None
)
assert isinstance(req_return, dict)
assert req_return.get('headers')
assert req_return.get('headers').get('Cookie')
assert req_return.get('headers').get('Cookie') == 'FAKE'
@patch('jenkinsapi.jenkins.Requester.AUTH_COOKIE', 'FAKE')
def test_get_request_dict_updatecookie():
req = Requester('foo', 'bar')
req_return = req.get_request_dict(
params={},
data=None,
headers={'key': 'value'}
)
assert isinstance(req_return, dict)
assert req_return.get('headers')
assert req_return.get('headers').get('key')
assert req_return.get('headers').get('key') == 'value'
assert req_return.get('headers').get('Cookie')
assert req_return.get('headers').get('Cookie') == 'FAKE'
def test_get_request_dict_nocookie():
req = Requester('foo', 'bar')
req_return = req.get_request_dict(
params={},
data=None,
headers=None
)
assert isinstance(req_return, dict)
assert not req_return.get('headers')
def test_get_request_dict_wrong_params():
req = Requester('foo', 'bar')
with pytest.raises(AssertionError) as na:
req.get_request_dict(
params='wrong',
data=None,
headers=None
)
assert "Params must be a dict, got 'wrong'" in str(na.value)
def test_get_request_dict_correct_params():
req = Requester('foo', 'bar')
req_return = req.get_request_dict(
params={'param': 'value'},
data=None,
headers=None
)
assert isinstance(req_return, dict)
assert req_return.get('params')
assert req_return['params'] == {'param': 'value'}
def test_get_request_dict_wrong_headers():
req = Requester('foo', 'bar')
with pytest.raises(AssertionError) as na:
req.get_request_dict(
params={},
data=None,
headers='wrong'
)
assert "headers must be a dict, got 'wrong'" in str(na.value)
def test_get_request_dict_correct_headers():
req = Requester('foo', 'bar')
req_return = req.get_request_dict(
params={'param': 'value'},
data=None,
headers={'header': 'value'}
)
assert isinstance(req_return, dict)
assert req_return.get('headers')
assert req_return['headers'] == {'header': 'value'}
def test_get_request_dict_data_passed():
req = Requester('foo', 'bar')
req_return = req.get_request_dict(
params={'param': 'value'},
data='some data',
headers={'header': 'value'}
)
assert isinstance(req_return, dict)
assert req_return.get('data')
assert req_return['data'] == 'some data'
def test_get_request_dict_data_not_passed():
req = Requester('foo', 'bar')
req_return = req.get_request_dict(
params={'param': 'value'},
data=None,
headers={'header': 'value'}
)
assert isinstance(req_return, dict)
assert req_return.get('data') is None
def test_get_url_get(monkeypatch):
def fake_get(*args, **kwargs): # pylint: disable=unused-argument
return 'SUCCESS'
monkeypatch.setattr(requests.Session, 'get', fake_get)
req = Requester('foo', 'bar')
response = req.get_url(
'http://dummy',
params={'param': 'value'},
headers=None)
assert response == 'SUCCESS'
def test_get_url_post(monkeypatch):
def fake_post(*args, **kwargs): # pylint: disable=unused-argument
return 'SUCCESS'
monkeypatch.setattr(requests.Session, 'post', fake_post)
req = Requester('foo', 'bar')
response = req.post_url(
'http://dummy',
params={'param': 'value'},
headers=None)
assert response == 'SUCCESS'
def test_post_xml_empty_xml(monkeypatch):
def fake_post(*args, **kwargs): # pylint: disable=unused-argument
return 'SUCCESS'
monkeypatch.setattr(requests.Session, 'post', fake_post)
req = Requester('foo', 'bar')
with pytest.raises(AssertionError):
req.post_xml_and_confirm_status(
url='http://dummy',
params={'param': 'value'},
data=None
)
def test_post_xml_and_confirm_status_some_xml(monkeypatch):
class FakeResponse(requests.Response):
def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
self.status_code = 200
def fake_post(*args, **kwargs): # pylint: disable=unused-argument
return FakeResponse()
monkeypatch.setattr(requests.Session, 'post', fake_post)
req = Requester('foo', 'bar')
ret = req.post_xml_and_confirm_status(
url='http://dummy',
params={'param': 'value'},
data='<xml/>'
)
assert isinstance(ret, requests.Response)
def test_post_and_confirm_status_empty_data(monkeypatch):
def fake_post(*args, **kwargs): # pylint: disable=unused-argument
return 'SUCCESS'
monkeypatch.setattr(requests.Session, 'post', fake_post)
req = Requester('foo', 'bar')
with pytest.raises(AssertionError):
req.post_and_confirm_status(
url='http://dummy',
params={'param': 'value'},
data=None
)
def test_post_and_confirm_status_some_data(monkeypatch):
class FakeResponse(requests.Response):
def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
self.status_code = 200
def fake_post(*args, **kwargs): # pylint: disable=unused-argument
return FakeResponse()
monkeypatch.setattr(requests.Session, 'post', fake_post)
req = Requester('foo', 'bar')
ret = req.post_and_confirm_status(
url='http://dummy',
params={'param': 'value'},
data='some data'
)
assert isinstance(ret, requests.Response)
def test_post_and_confirm_status_bad_result(monkeypatch):
class FakeResponse(object):
def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
self.status_code = 500
self.url = 'http://dummy'
self.text = 'something'
def fake_post(*args, **kwargs): # pylint: disable=unused-argument
return FakeResponse()
monkeypatch.setattr(requests.Session, 'post', fake_post)
req = Requester('foo', 'bar')
with pytest.raises(JenkinsAPIException) as error:
req.post_and_confirm_status(
url='http://dummy',
params={'param': 'value'},
data='some data'
)
assert 'status=500' in str(error)
def test_get_and_confirm_status(monkeypatch):
class FakeResponse(requests.Response):
def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
self.status_code = 200
def fake_get(*args, **kwargs): # pylint: disable=unused-argument
return FakeResponse()
monkeypatch.setattr(requests.Session, 'get', fake_get)
req = Requester('foo', 'bar')
ret = req.get_and_confirm_status(
url='http://dummy',
params={'param': 'value'}
)
assert isinstance(ret, requests.Response)
def test_get_and_confirm_status_bad_result(monkeypatch):
class FakeResponse(object):
def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
self.status_code = 500
self.url = 'http://dummy'
self.text = 'something'
def fake_get(*args, **kwargs): # pylint: disable=unused-argument
return FakeResponse()
monkeypatch.setattr(requests.Session, 'get', fake_get)
req = Requester('foo', 'bar', baseurl='http://dummy')
with pytest.raises(JenkinsAPIException) as error:
req.get_and_confirm_status(
url='http://dummy',
params={'param': 'value'}
)
assert 'status=500' in str(error)
def test_configure_max_retries():
req = Requester('username', 'password', baseurl='http://dummy', max_retries=3)
for adapter in req.session.adapters.values():
assert adapter.max_retries.total == 3
|
mit
| -207,026,566,923,361,760 | 28.151442 | 82 | 0.61829 | false |
facelessuser/subclrschm
|
setup.py
|
1
|
2691
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Setup package."""
from setuptools import setup, find_packages
import os
import imp
import traceback
def get_version():
"""Get version and version_info without importing the entire module."""
devstatus = {
'alpha': '3 - Alpha',
'beta': '4 - Beta',
'candidate': '4 - Beta',
'final': '5 - Production/Stable'
}
path = os.path.join(os.path.dirname(__file__), 'subclrschm', 'lib')
fp, pathname, desc = imp.find_module('__version__', [path])
try:
v = imp.load_module('__version__', fp, pathname, desc)
return v.version, devstatus[v.version_info[3]]
except Exception:
print(traceback.format_exc())
finally:
fp.close()
VER, DEVSTATUS = get_version()
LONG_DESC = '''
Sublime Color Scheme Editor (subclrschm) is a color scheme editor for Sublime Text 3.
It is built with wxPython 4.0.0+ and requires Python 2.7 or 3.4+.
You can learn more about using subclrschm by `reading the docs`_.
.. _`reading the docs`: http://facelessuser.github.io/subclrschm/
Support
=======
Help and support is available here at the repository's `bug tracker`_.
Please read about `support and contributing`_ before creating issues.
.. _`bug tracker`: https://github.com/facelessuser/subclrschm/issues
.. _`support and contributing`: http://facelessuser.github.io/subclrschm/contributing/
'''
setup(
name='subclrschm',
version=VER,
keywords='Sublime color scheme',
description='GUI for editing Sublime Text color schemes.',
long_description=LONG_DESC,
author='Isaac Muse',
author_email='Isaac.Muse@gmail.com',
url='https://github.com/facelessuser/subclrschm',
packages=find_packages(exclude=['tests', 'tools']),
install_requires=[
"wxpython>=4.0.0a3"
],
zip_safe=False,
entry_points={
'gui_scripts': [
'subclrschm=subclrschm.__main__:main'
]
},
package_data={
'subclrschm.lib.gui.data': ['*.png', '*.ico', '*.icns']
},
license='MIT License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
mit
| -6,795,916,051,965,609,000 | 29.931034 | 86 | 0.621702 | false |
sam-m888/gprime
|
gprime/db/test/test_where.py
|
1
|
3409
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2016 Gramps Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
from gprime.db.where import eval_where
from gprime.lib import Person
import unittest
##########
# Tests:
def make_closure(surname):
"""
Test closure.
"""
from gprime.lib import Person
return (lambda person:
(person.primary_name.surname_list[0].surname == surname and
person.gender == Person.MALE))
class Thing:
def __init__(self):
self.list = ["I0", "I1", "I2"]
def where(self):
return lambda person: person.gid == self.list[1]
def apply(self, db, person):
return person.gender == Person.MALE
class ClosureTest(unittest.TestCase):
def check(self, test):
result = eval_where(test[0])
self.assertTrue(result == test[1], "%s is not %s" % (result, test[1]))
def test_01(self):
self.check(
(lambda family: (family.private and
family.mother_handle.gid != "I0001"),
['AND', [['private', '==', True],
['mother_handle.gid', '!=', 'I0001']]]))
def test_02(self):
self.check(
(lambda person: LIKE(person.gid, "I0001"),
['gid', 'LIKE', 'I0001']))
def test_03(self):
self.check(
(lambda note: note.gid == "N0001",
['gid', '==', 'N0001']))
def test_04(self):
self.check(
(lambda person: person.event_ref_list.ref.gid == "E0001",
['event_ref_list.ref.gid', '==', 'E0001']))
def test_05(self):
self.check(
(lambda person: LIKE(person.gid, "I0001") or person.private,
["OR", [['gid', 'LIKE', 'I0001'],
["private", "==", True]]]))
def test_06(self):
self.check(
(lambda person: person.event_ref_list <= 0,
["event_ref_list", "<=", 0]))
def test_07(self):
self.check(
(lambda person: person.primary_name.surname_list[0].surname == "Smith",
["primary_name.surname_list.0.surname", "==", "Smith"]))
def test_08(self):
self.check(
(make_closure("Smith"),
["AND", [["primary_name.surname_list.0.surname", "==", "Smith"],
["gender", "==", 1]]]))
def test_09(self):
self.check(
[Thing().where(), ["gid", "==", "I1"]])
def test_10(self):
self.check(
(lambda person: LIKE(person.gid, "I000%"),
["gid", "LIKE", "I000%"]))
def test_11(self):
self.check(
[Thing().apply, ["gender", "==", 1]])
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
| -2,685,789,864,473,225,000 | 29.990909 | 83 | 0.555295 | false |
Reality9/spiderfoot
|
modules/sfp_whois.py
|
1
|
2855
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_whois
# Purpose: SpiderFoot plug-in for searching Whois servers for domain names
# and netblocks identified.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 06/04/2015
# Copyright: (c) Steve Micallef 2012
# Licence: GPL
# -------------------------------------------------------------------------------
import pythonwhois
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_whois(SpiderFootPlugin):
"""Whois:Footprint,Investigate:Perform a WHOIS look-up on domain names and owned netblocks."""
# Default options
opts = {
}
# Option descriptions
optdescs = {
}
results = list()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = list()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["DOMAIN_NAME", "OWNED_NETBLOCK"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["DOMAIN_WHOIS", "NETBLOCK_WHOIS", "DOMAIN_REGISTRAR"]
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
parentEvent = event.sourceEvent
if eventData in self.results:
return None
else:
self.results.append(eventData)
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
try:
data = pythonwhois.net.get_whois_raw(eventData)
except BaseException as e:
self.sf.error("Unable to perform WHOIS on " + eventData + ": " + str(e), False)
return None
if eventName == "DOMAIN_NAME":
typ = "DOMAIN_WHOIS"
else:
typ = "NETBLOCK_WHOIS"
evt = SpiderFootEvent(typ, '\n'.join(data), self.__name__, event)
self.notifyListeners(evt)
try:
info = pythonwhois.parse.parse_raw_whois(data, True)
except BaseException as e:
self.sf.debug("Error parsing whois data for " + eventData)
return None
if info.has_key('registrar'):
if eventName == "DOMAIN_NAME" and info['registrar'] is not None:
evt = SpiderFootEvent("DOMAIN_REGISTRAR", info['registrar'][0],
self.__name__, event)
self.notifyListeners(evt)
# End of sfp_whois class
|
gpl-2.0
| -5,858,934,050,261,415,000 | 30.078652 | 98 | 0.544658 | false |
lago-project/lago-ost-plugin
|
ovirtlago/utils.py
|
1
|
2164
|
#
# Copyright 2014-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import functools
import pkg_resources
import sys
def get_data_file(basename):
"""
Load a data as a string from the data directory
Args:
basename(str): filename
Returns:
str: string representation of the file
"""
return pkg_resources.resource_string(
__name__, '/'.join(['data', basename])
)
def available_sdks(modules=None):
modules = modules or sys.modules
res = []
if 'ovirtsdk' in modules:
res.append('3')
if 'ovirtsdk4' in modules:
res.append('4')
return res
def require_sdk(version, modules=None):
modules = modules or sys.modules
def wrap(func):
@functools.wraps(func)
def wrapped_func(*args, **kwargs):
sdks = available_sdks(modules)
if version not in sdks:
raise RuntimeError(
(
'{0} requires oVirt Python SDK v{1}, '
'available SDKs: {2}'
).format(func.__name__, version, ','.join(sdks))
)
else:
return func(*args, **kwargs)
return wrapped_func
return wrap
def partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
|
gpl-2.0
| -7,402,835,338,542,384,000 | 27.853333 | 79 | 0.634473 | false |
mikar/60-days-of-python
|
gui/filebrowser/fileops.py
|
1
|
24240
|
# -*- coding: utf-8 -*-
from operator import itemgetter
from unicodedata import normalize
import fnmatch
import logging
import os
import re
import string
import helpers
log = logging.getLogger("fileops")
class FileOps(object):
def __init__(self, casemode=0, countpos=0, dirsonly=False, exclude="",
filesonly=False, hidden=False, ignorecase=False,
interactive=False, keepext=False, mediamode=False,
noclobber=False, recursive=False, regex=False, remdups=False,
remext=False, remnonwords=False, remsymbols=False,
simulate=False, spacemode=0, quiet=False, verbosity=1,
matchpattern="", replacepattern="", recursivedepth=0):
# Universal options:
try:
self._casemode = int(casemode) # 0=lc, 1=uc, 2=flfw, 3=flew
except TypeError:
self._casemode = 0
try:
self._countpos = int(countpos) # Adds numerical index at position.
except TypeError:
self._countpos = 0
try:
self._spacemode = int(spacemode) # 0=su, 1=sh, 2=sd, 3=ds, 4=hs, 5=us
except TypeError:
self.spacemode = 0
self._dirsonly = dirsonly # Only edit directory names.
self._filesonly = False if dirsonly else filesonly # Only file names.
self._hidden = hidden # Look at hidden files and directories, too.
self._ignorecase = ignorecase # Case sensitivity.
self._interactive = interactive # Confirm before overwriting.
self._keepext = keepext # Don't modify remext.
self._mediamode = mediamode # Mode to sanitize NTFS-filenames/dirnames.
self._noclobber = noclobber # Don't overwrite anything.
self._recursive = recursive # Look for files recursively
self._regex = regex # Use regular expressions instead of glob/fnmatch.
self._remdups = remdups # Remove remdups.
self._remext = remext # Remove all remext.
self._remnonwords = remnonwords # Only allow wordchars (\w)
self._remsymbols = remsymbols # Normalize remsymbols (ñé becomes ne).
self._simulate = simulate # Simulate renaming and dump result to stdout.
# Initialize GUI options.
self._recursivedepth = recursivedepth
self._excludeedit = "" if not exclude else exclude
self._matchedit = "" if not matchpattern else matchpattern
self._replaceedit = "" if not replacepattern else replacepattern
self._autostop = False # Automatically stop execution on rename error.
self._countbase = 1 # Base to start counting from.
self._countfill = True # 9->10: 9 becomes 09. 99->100: 99 becomes 099.
self._countpreedit = "" # String that is prepended to the counter.
self._countstep = 1 # Increment per count iteration.
self._countsufedit = "" # String that is appended to the counter.
self._deletecheck = False # Whether to delete a specified range.
self._deleteend = 1 # End index of deletion sequence.
self._deletestart = 0 # Start index of deletion sequence.
self._filteredit = ""
self._insertcheck = False # Whether to apply an insertion.
self._insertedit = "" # The inserted text/string.
self._insertpos = 0 # Position/Index to insert at.
self._manualmirror = False # Mirror manual rename to all targets.
self._matchcheck = True # Whether to apply source/target patterns.
self._matchexcludecheck = False
self._matchfiltercheck = False
self._matchreplacecheck = True
self._casecheck = True if isinstance(casemode, str) else False
self._countcheck = True if isinstance(countpos, str) else False
removelist = [remdups, remext, remnonwords, remsymbols]
self._removecheck = True if any(removelist) else False
self._spacecheck = True if isinstance(spacemode, str) else False
self.stopupdate = False
self.stopcommit = False
self.includes = set()
self.excludes = set()
self.recursiveincludes = set()
self.recursiveexcludes = set()
self.configdir = helpers.get_configdir()
# Create the logger.
helpers.configure_logger(verbosity, quiet, self.configdir)
self.history = [] # History of commited operations, used to undo them.
# Match everything inside one set of braces:
self.bracerx = re.compile("(?<=\{)(.*?)(?=\})")
def match_filter(self, target):
if not self.filteredit:
return True
if "/" in self.filteredit:
patterns = self.filteredit.split("/")
else:
patterns = [self.filteredit]
if self.regex:
for pattern in patterns:
try:
if re.search(pattern, target, flags=self.ignorecase):
return True
except:
pass
else:
for pattern in patterns:
if fnmatch.fnmatch(target, pattern):
return True
return False
def match_exclude(self, target):
if not self.excludeedit:
return
if "/" in self.excludeedit:
patterns = self.excludeedit.split("/")
else:
patterns = [self.excludeedit]
if self.regex:
for pattern in patterns:
try:
if re.search(pattern, target, flags=self.ignorecase):
return False
except:
pass
else:
for pattern in patterns:
if fnmatch.fnmatch(target, pattern):
return False
def match(self, target):
"""Searches target for pattern and returns a bool."""
if not self.hidden and target.startswith("."):
return False
if self.matchexcludecheck:
if self.match_exclude(target) is False:
return False
if self.excludes and target in self.excludes:
return False
if self.includes and target in self.includes:
return True
if self.matchfiltercheck:
if self.match_filter(target) is False:
return False
return True
def get_dirs(self, root, dirs):
"""Sort, match and decode a list of dirs."""
return [(root, d.decode("utf-8"), u"") for d in dirs if self.match(d)]
def get_files(self, root, files):
"""Sort, match and decode a list of files."""
return [(root,) + os.path.splitext(f.decode("utf-8")) for f in
files if self.match(f)]
def get_targets(self, path=None):
"""Return a list of files and/or dirs in path."""
if not path:
path = os.getcwd()
# Determine recursion depth.
levels = 0
if self.recursive:
levels = self.recursivedepth
targets = []
for root, dirs, files in helpers.walklevels(path, levels):
# To unicode.
root = root.decode("utf-8") + "/"
if self.dirsonly:
target = self.get_dirs(root, dirs)
elif self.filesonly:
target = self.get_files(root, files)
else:
target = self.get_dirs(root, dirs) + self.get_files(root, files)
targets.extend(target)
if self.stopupdate:
return targets
return targets
def get_previews(self, targets, matchpat=None, replacepat=None):
"""Simulate rename operation on targets and return results as list."""
if matchpat:
self.matchedit = matchpat
if replacepat:
self.replaceedit = replacepat
if self.mediamode:
self.set_mediaoptions()
return self.modify_previews(targets)
def set_mediaoptions(self):
self.casecheck = True
self.spacecheck = True
self.removecheck = True
self.casemode = 0
self.spacemode = 6
self.remdups = True
self.keepext = True
self.remsymbols = True
def commit(self, previews):
# The sorted generator comprehension of (unicode)doom:
# Reverse sort the paths so that the longest paths are changed first.
# This should minimize rename errors for recursive operations, for now.
actions = sorted((("".join(i[0]).encode("utf-8"), i[0][0].encode("utf-8")
+ i[1].encode("utf-8")) for i in previews),
key=lambda i: i[0].count("/"), reverse=True)
for i in actions:
if self.simulate:
log.debug("{} -> {}.".format(i[0], i[1]))
continue
if self.stopcommit:
idx = actions.index(i)
log.warn("Stopping commit after {} renames." .format(idx + 1))
if idx:
log.warn("Use undo to revert the rename actions.")
self.history.append(actions[:idx + 1])
return
try:
os.rename(i[0], i[1])
except Exception as e:
log.debug("Rename Error: {} -> {} ({}).".format(i[0], i[1], e))
if self.autostop:
break
self.history.append(actions)
log.info("Renaming complete.")
def undo(self, actions=None):
if actions is None:
try:
actions = self.history.pop()
except IndexError:
log.error("History list is empty.")
return
for i in actions:
if self.simulate:
log.debug("{} -> {}.".format(i[1], i[0]))
continue
try:
os.rename(i[1], i[0])
except Exception as e:
log.error("Rename Error: {} -> {} ({}).".format(i[1], i[0], e))
if self.autostop:
break
log.info("Undo complete.")
def modify_previews(self, previews):
if self.countcheck:
lenp, base, step = len(previews), self.countbase, self.countstep
countlen = len(str(lenp))
countrange = xrange(base, lenp * step + 1, step)
if self.countfill:
count = (str(i).rjust(countlen, "0") for i in countrange)
else:
count = (str(i) for i in countrange)
modified = []
for preview in previews:
name = preview[1]
if not self.remext and not self.keepext:
name += preview[2]
if self.casecheck:
name = self.apply_case(name)
if self.spacecheck:
name = self.apply_space(name)
if self.deletecheck:
name = self.apply_delete(name)
if self.removecheck:
name = self.apply_remove(name)
if self.insertcheck:
name = self.apply_insert(name)
if self.matchcheck:
name = self.apply_replace(name)
if self.countcheck:
try:
name = self.apply_count(name, count.next())
except StopIteration:
pass
if self.keepext:
name += preview[2]
preview = ((preview[0], preview[1] + preview[2]), name)
modified.append(preview)
return modified
def apply_space(self, s):
if not self.spacecheck:
return s
if self.spacemode == 0:
s = s.replace(" ", "_")
elif self.spacemode == 1:
s = s.replace(" ", "-")
elif self.spacemode == 2:
s = s.replace(" ", ".")
elif self.spacemode == 3:
s = s.replace(".", " ")
elif self.spacemode == 4:
s = s.replace("-", " ")
elif self.spacemode == 5:
s = s.replace("_", " ")
elif self.spacemode == 6:
s = re.sub("[.\s]", "_", s)
return s
def apply_case(self, s):
if not self.casecheck:
return s
if self.casemode == 0:
s = s.lower()
elif self.casemode == 1:
s = s.upper()
elif self.casemode == 2:
s = s.capitalize()
elif self.casemode == 3:
s = " ".join([c.capitalize() for c in s.split()])
return s
def apply_insert(self, s):
if not self.insertcheck or not self.insertedit:
return s
s = list(s)
s.insert(self.insertpos, self.insertedit)
return "".join(s)
def apply_count(self, s, count):
if not self.countcheck:
return s
s = list(s)
if self.countpreedit:
count = self.countpreedit + count
if self.countsufedit:
count += self.countsufedit
s.insert(self.countpos, count)
return "".join(s)
def apply_delete(self, s):
if not self.deletecheck:
return s
return s[:self.deletestart] + s[self.deleteend:]
def apply_remove(self, s):
if not self.removecheck:
return s
if self.remnonwords:
s = re.sub("\W", "", s, flags=self.ignorecase)
if self.remsymbols:
allowed = string.ascii_letters + string.digits + " .-_+" # []()
s = "".join(c for c in normalize("NFKD", s) if c in allowed)
if self.remdups:
s = re.sub(r"([-_ .])\1+", r"\1", s, flags=self.ignorecase)
return s
def apply_replace(self, s):
if not self.matchreplacecheck or not self.matchedit:
return s
if not self.regex:
matchpat = fnmatch.translate(self.matchedit)
replacepat = helpers.translate(self.replaceedit)
else:
matchpat = self.matchedit
replacepat = self.replaceedit
try:
s = re.sub(matchpat, replacepat, s, flags=self.ignorecase)
except:
pass
return s
@property
def dirsonly(self):
return self._dirsonly
@dirsonly.setter
def dirsonly(self, boolean):
log.debug("dirsonly: {}".format(boolean))
self._dirsonly = boolean
if boolean:
self.filesonly = False
@property
def filesonly(self):
return self._filesonly
@filesonly.setter
def filesonly(self, boolean):
log.debug("filesonly: {}".format(boolean))
self._filesonly = boolean
if boolean:
self.dirsonly = False
@property
def recursive(self):
return self._recursive
@recursive.setter
def recursive(self, boolean):
log.debug("recursive: {}".format(boolean))
self._recursive = boolean
@property
def recursivedepth(self):
return self._recursivedepth
@recursivedepth.setter
def recursivedepth(self, num):
log.debug("recursivedepth: {}".format(num))
self._recursivedepth = num
@property
def hidden(self):
return self._hidden
@hidden.setter
def hidden(self, boolean):
log.debug("hidden: {}".format(boolean))
self._hidden = boolean
@property
def simulate(self):
return self._simulate
@simulate.setter
def simulate(self, boolean):
log.debug("simulate: {}".format(boolean))
self._simulate = boolean
@property
def interactive(self):
return self._interactive
@interactive.setter
def interactive(self, boolean):
log.debug("interactive: {}".format(boolean))
self._interactive = boolean
@property
def noclobber(self):
return self._noclobber
@noclobber.setter
def noclobber(self, boolean):
log.debug("noclobber: {}".format(boolean))
self._noclobber = boolean
@property
def keepext(self):
return self._keepext
@keepext.setter
def keepext(self, boolean):
log.debug("keepext: {}.".format(boolean))
self._keepext = boolean
@property
def regex(self):
return self._regex
@regex.setter
def regex(self, boolean):
log.debug("regex: {}.".format(boolean))
self._regex = boolean
@property
def varcheck(self):
return self._varcheck
@varcheck.setter
def varcheck(self, boolean):
log.debug("varcheck: {}".format(boolean))
self._varcheck = boolean
@property
def matchcheck(self):
return self._matchcheck
@matchcheck.setter
def matchcheck(self, boolean):
log.debug("matchcheck: {}".format(boolean))
self._matchcheck = boolean
@property
def matchexcludecheck(self):
return self._matchexcludecheck
@matchexcludecheck.setter
def matchexcludecheck(self, boolean):
log.debug("matchexcludecheck: {}".format(boolean))
self._matchexcludecheck = boolean
@property
def matchfiltercheck(self):
return self._matchfiltercheck
@matchfiltercheck.setter
def matchfiltercheck(self, boolean):
log.debug("matchfiltercheck: {}".format(boolean))
self._matchfiltercheck = boolean
@property
def matchreplacecheck(self):
return self._matchreplacecheck
@matchreplacecheck.setter
def matchreplacecheck(self, boolean):
log.debug("matchreplacecheck: {}".format(boolean))
self._matchreplacecheck = boolean
@property
def countpreedit(self):
return self._countpreedit
@countpreedit.setter
def countpreedit(self, text):
log.debug("countpreedit: {}".format(text))
self._countpreedit = text.decode("utf-8")
@property
def countsufedit(self):
return self._countsufedit
@countsufedit.setter
def countsufedit(self, text):
log.debug("countsufedit: {}".format(text))
self._countsufedit = text.decode("utf-8")
@property
def insertedit(self):
return self._insertedit
@insertedit.setter
def insertedit(self, text):
log.debug("insertedit: {}.".format(text))
self._insertedit = text.decode("utf-8")
@property
def matchedit(self):
return self._matchedit
@matchedit.setter
def matchedit(self, text):
log.debug("matchedit: {}.".format(text))
self._matchedit = text.decode("utf-8")
@property
def replaceedit(self):
return self._replaceedit
@replaceedit.setter
def replaceedit(self, text):
log.debug("replaceedit: {}.".format(text))
self._replaceedit = text.decode("utf-8")
@property
def filteredit(self):
return self._filteredit
@filteredit.setter
def filteredit(self, text):
log.debug("filteredit: {}.".format(text))
self._filteredit = text.decode("utf-8")
@property
def excludeedit(self):
return self._excludeedit
@excludeedit.setter
def excludeedit(self, text):
log.debug("excludeedit: {}.".format(text))
self._excludeedit = text.decode("utf-8")
@property
def remsymbols(self):
return self._remsymbols
@remsymbols.setter
def remsymbols(self, boolean):
log.debug("remsymbols: {}".format(boolean))
self._remsymbols = boolean
@property
def autostop(self):
return self._autostop
@autostop.setter
def autostop(self, boolean):
log.debug("autostop: {}".format(boolean))
self._autostop = boolean
@property
def manualmirror(self):
return self._manualmirror
@manualmirror.setter
def manualmirror(self, boolean):
log.debug("manualmirror: {}".format(boolean))
self._manualmirror = boolean
@property
def removecheck(self):
return self._removecheck
@removecheck.setter
def removecheck(self, boolean):
log.debug("removecheck: {}".format(boolean))
self._removecheck = boolean
@property
def remdups(self):
return self._remdups
@remdups.setter
def remdups(self, boolean):
log.debug("remdups: {}".format(boolean))
self._remdups = boolean
@property
def remext(self):
return self._remext
@remext.setter
def remext(self, boolean):
log.debug("remext: {}".format(boolean))
self._remext = boolean
@property
def remnonwords(self):
return self._remnonwords
@remnonwords.setter
def remnonwords(self, boolean):
log.debug("remnonwords: {}".format(boolean))
self._remnonwords = boolean
@property
def ignorecase(self):
return self._ignorecase
@ignorecase.setter
def ignorecase(self, boolean):
flag = 0
if boolean:
flag = re.I
log.debug("ignorecase: {}".format(boolean))
self._ignorecase = flag
@property
def mediamode(self):
return self._mediamode
@mediamode.setter
def mediamode(self, boolean):
log.debug("mediamode: {}".format(boolean))
self._mediamode = boolean
@property
def countcheck(self):
return self._countcheck
@countcheck.setter
def countcheck(self, boolean):
log.debug("countcheck: {}".format(boolean))
self._countcheck = boolean
@property
def countfill(self):
return self._countfill
@countfill.setter
def countfill(self, boolean):
log.debug("countfill: {}".format(boolean))
self._countfill = boolean
@property
def countpos(self):
return self._countpos
@countpos.setter
def countpos(self, index):
log.debug("countpos: {}".format(index))
self._countpos = index
@property
def countbase(self):
return self._countbase
@countbase.setter
def countbase(self, num):
log.debug("countbase: {}".format(num))
self._countbase = num
@property
def countstep(self):
return self._countstep
@countstep.setter
def countstep(self, num):
log.debug("countstep: {}".format(num))
self._countstep = num
@property
def insertcheck(self):
return self._insertcheck
@insertcheck.setter
def insertcheck(self, boolean):
log.debug("insertcheck: {}".format(boolean))
self._insertcheck = boolean
@property
def insertpos(self):
return self._insertpos
@insertpos.setter
def insertpos(self, index):
log.debug("insertpos: {}".format(index))
self._insertpos = index
@property
def deletecheck(self):
return self._deletecheck
@deletecheck.setter
def deletecheck(self, boolean):
log.debug("deletecheck: {}".format(boolean))
self._deletecheck = boolean
@property
def deletestart(self):
return self._deletestart
@deletestart.setter
def deletestart(self, index):
log.debug("deletestart: {}".format(index))
self._deletestart = index
@property
def deleteend(self):
return self._deleteend
@deleteend.setter
def deleteend(self, index):
log.debug("deleteend: {}".format(index))
self._deleteend = index
@property
def casecheck(self):
return self._casecheck
@casecheck.setter
def casecheck(self, boolean):
log.debug("casecheck: {}".format(boolean))
self._casecheck = boolean
@property
def casemode(self):
return self._casemode
@casemode.setter
def casemode(self, num):
log.debug("casemode: {}".format(num))
self._casemode = num
@property
def spacecheck(self):
return self._spacecheck
@spacecheck.setter
def spacecheck(self, boolean):
log.debug("spacecheck: {}".format(boolean))
self._spacecheck = boolean
@property
def spacemode(self):
return self._spacemode
@spacemode.setter
def spacemode(self, num):
log.debug("spacemode: {}".format(num))
self._spacemode = num
if __name__ == "__main__":
fileops = FileOps(hidden=True, recursive=True, casemode="1")
fileops.get_previews(fileops.get_targets(), "*", "asdf")
|
mit
| 6,557,784,114,560,238,000 | 29.221945 | 82 | 0.573768 | false |
agendaTCC/AgendaTCC
|
tccweb/apps/usuarios/admin.py
|
1
|
2272
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import ugettext_lazy as _
from models import _User, CSVUsuario
from forms import _UserChangeForm,_UserCreationForm
def deactivate(modeladmin, request, queryset):
queryset.update(is_active=False)
deactivate.short_description = "Desativar Usuarios selecionados"
def activate(modeladmin, request, queryset):
queryset.update(is_active=True)
activate.short_description = "Ativar Usuarios selecionados"
class _UserAdmin(UserAdmin):
fieldsets = (
(None, {'fields': ('cpf', 'password')}),
(_(u'Informações Pessoais'), {'fields': ('nome_completo','email','numero_usp','curso','endereco',
'numero','complemento','cidade','uf',
'bairro','tel','cep',)}),
(_(u'Permissões do Sistema'), {'fields': ('is_active', 'is_staff', 'is_superuser',
# 'groups',
'user_permissions'
)}),
(_(u'Funções'), {'fields': ('docente','doutorando', 'mestrando','aluno', 'funcionario','monitor','pae','supervisor','secretario')}),
(_('Datas Importantes'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('cpf', 'password1', 'password2')}
),
)
form = _UserChangeForm
add_form = _UserCreationForm
list_display = ('nome_completo', 'email', 'cpf', 'is_staff',)
search_fields = ('nome_completo', 'email', 'cpf','numero_usp')
ordering = ('nome_completo',)
actions = [deactivate,activate]
list_filter = ['docente','doutorando','mestrando','aluno','funcionario','supervisor','monitor','pae','secretario' , 'is_staff', 'is_superuser', 'is_active']
class CsvUsuarioAdmin(admin.ModelAdmin):
# save_on_top = True
# list_display = (['criada_em'])
# list_display_links = (['criada_em'])
# search_fields = ['criada_em']
# date_hierarchy = 'criada_em'
readonly_fields=('log',)
admin.site.register(CSVUsuario,CsvUsuarioAdmin)
admin.site.register(_User, _UserAdmin)
|
gpl-2.0
| -1,889,795,591,340,552,000 | 40.218182 | 160 | 0.588884 | false |
datamade/represent-boundaries
|
boundaries/tests/test_boundary.py
|
1
|
5043
|
# coding: utf-8
from __future__ import unicode_literals
import datetime
from django.test import TestCase
from django.contrib.gis.gdal import OGRGeometry
from django.contrib.gis.geos import Point, GEOSGeometry
from boundaries.models import BoundarySet, Boundary, Geometry
class BoundaryTestCase(TestCase):
maxDiff = None
def test___str__(self):
self.assertEqual(str(Boundary(set_name='Foo', name='Bar')), 'Bar (Foo)')
def test_get_absolute_url(self):
self.assertEqual(Boundary(set_id='foo', slug='bar').get_absolute_url(), '/boundaries/foo/bar/')
def test_boundary_set(self):
self.assertEqual(Boundary(set=BoundarySet(slug='foo')).boundary_set, 'foo')
def test_boundary_set_name(self):
self.assertEqual(Boundary(set_name='Foo').boundary_set_name, 'Foo')
def test_get_dicts(self):
boundaries = [
('bar', 'foo', 'Bar', 'Foo', 1),
('bzz', 'baz', 'Bzz', 'Baz', 2),
]
self.assertEqual(Boundary.get_dicts(boundaries), [
{
'url': '/boundaries/foo/bar/',
'name': 'Bar',
'related': {
'boundary_set_url': '/boundary-sets/foo/',
},
'boundary_set_name': 'Foo',
'external_id': 1,
},
{
'url': '/boundaries/baz/bzz/',
'name': 'Bzz',
'related': {
'boundary_set_url': '/boundary-sets/baz/',
},
'boundary_set_name': 'Baz',
'external_id': 2,
},
])
def test_as_dict(self):
self.assertEqual(Boundary(
set_id='foo',
slug='bar',
set_name='Foo',
name='Bar',
metadata={
'baz': 'bzz',
},
external_id=1,
extent=[0, 0, 1, 1],
centroid=Point(0, 1),
start_date=datetime.date(2000, 1, 1),
end_date=datetime.date(2010, 1, 1),
).as_dict(), {
'related': {
'boundary_set_url': '/boundary-sets/foo/',
'shape_url': '/boundaries/foo/bar/shape',
'simple_shape_url': '/boundaries/foo/bar/simple_shape',
'centroid_url': '/boundaries/foo/bar/centroid',
'boundaries_url': '/boundaries/foo/',
},
'boundary_set_name': 'Foo',
'name': 'Bar',
'metadata': {
'baz': 'bzz',
},
'external_id': 1,
'extent': [0, 0, 1, 1],
'centroid': {
'type': 'Point',
'coordinates': (0.0, 1.0),
},
'start_date': '2000-01-01',
'end_date': '2010-01-01',
})
self.assertEqual(Boundary(
set_id='foo',
slug='bar',
).as_dict(), {
'related': {
'boundary_set_url': '/boundary-sets/foo/',
'shape_url': '/boundaries/foo/bar/shape',
'simple_shape_url': '/boundaries/foo/bar/simple_shape',
'centroid_url': '/boundaries/foo/bar/centroid',
'boundaries_url': '/boundaries/foo/',
},
'boundary_set_name': '',
'name': '',
'metadata': {},
'external_id': '',
'extent': None,
'centroid': None,
'start_date': None,
'end_date': None,
})
def test_prepare_queryset_for_get_dicts(self):
geom = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,0 0)))')
Boundary.objects.create(
slug='bar',
set=BoundarySet(slug='foo'),
name='Bar',
set_name='Foo',
external_id=1,
shape=geom,
simple_shape=geom,
)
# Coerce the django.contrib.gis.db.models.query.GeoValuesListQuerySet.
self.assertEqual(list(Boundary.prepare_queryset_for_get_dicts(Boundary.objects)), [
('bar', 'foo', 'Bar', 'Foo', '1'),
])
def test_merge(self):
boundary = Boundary(shape='MULTIPOLYGON (((0 0,0 5,2.5 5.0001,5 5,0 0)))', simple_shape='MULTIPOLYGON (((0 0,0 5,5 5,0 0)))')
boundary.merge(Geometry(OGRGeometry('MULTIPOLYGON (((0 0,5 0,5.0001 2.5,5 5,0 0)))')))
self.assertEqual(boundary.shape.ogr.wkt, 'MULTIPOLYGON (((0 0,0 5,2.5 5.0001,5 5,0 0)),((0 0,5 0,5.0001 2.5,5 5,0 0)))')
self.assertEqual(boundary.simple_shape.ogr.wkt, 'MULTIPOLYGON (((0 0,0 5,5 5,0 0)),((0 0,5 0,5 5,0 0)))')
def test_cascaded_union(self):
boundary = Boundary(shape='MULTIPOLYGON (((0 0,0 5,2.5 5.0001,5 5,0 0)))')
boundary.cascaded_union(Geometry(OGRGeometry('MULTIPOLYGON (((0 0,5 0,5 5,0 0)))')))
self.assertEqual(boundary.shape.ogr.wkt, 'MULTIPOLYGON (((5 5,5 0,0 0,0 5,2.5 5.0001,5 5)))')
self.assertEqual(boundary.simple_shape.ogr.wkt, 'MULTIPOLYGON (((5 5,5 0,0 0,0 5,5 5)))')
|
mit
| -7,161,883,801,366,320,000 | 35.280576 | 133 | 0.493555 | false |
vladimir-ipatov/ganeti
|
lib/ssh.py
|
1
|
10583
|
#
#
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Module encapsulating ssh functionality.
"""
import os
import logging
from ganeti import utils
from ganeti import errors
from ganeti import constants
from ganeti import netutils
from ganeti import pathutils
from ganeti import vcluster
from ganeti import compat
def GetUserFiles(user, mkdir=False, dircheck=True, kind=constants.SSHK_DSA,
_homedir_fn=None):
"""Return the paths of a user's SSH files.
@type user: string
@param user: Username
@type mkdir: bool
@param mkdir: Whether to create ".ssh" directory if it doesn't exist
@type dircheck: bool
@param dircheck: Whether to check if ".ssh" directory exists
@type kind: string
@param kind: One of L{constants.SSHK_ALL}
@rtype: tuple; (string, string, string)
@return: Tuple containing three file system paths; the private SSH key file,
the public SSH key file and the user's C{authorized_keys} file
@raise errors.OpExecError: When home directory of the user can not be
determined
@raise errors.OpExecError: Regardless of the C{mkdir} parameters, this
exception is raised if C{~$user/.ssh} is not a directory and C{dircheck}
is set to C{True}
"""
if _homedir_fn is None:
_homedir_fn = utils.GetHomeDir
user_dir = _homedir_fn(user)
if not user_dir:
raise errors.OpExecError("Cannot resolve home of user '%s'" % user)
if kind == constants.SSHK_DSA:
suffix = "dsa"
elif kind == constants.SSHK_RSA:
suffix = "rsa"
else:
raise errors.ProgrammerError("Unknown SSH key kind '%s'" % kind)
ssh_dir = utils.PathJoin(user_dir, ".ssh")
if mkdir:
utils.EnsureDirs([(ssh_dir, constants.SECURE_DIR_MODE)])
elif dircheck and not os.path.isdir(ssh_dir):
raise errors.OpExecError("Path %s is not a directory" % ssh_dir)
return [utils.PathJoin(ssh_dir, base)
for base in ["id_%s" % suffix, "id_%s.pub" % suffix,
"authorized_keys"]]
def GetAllUserFiles(user, mkdir=False, dircheck=True, _homedir_fn=None):
"""Wrapper over L{GetUserFiles} to retrieve files for all SSH key types.
See L{GetUserFiles} for details.
@rtype: tuple; (string, dict with string as key, tuple of (string, string) as
value)
"""
helper = compat.partial(GetUserFiles, user, mkdir=mkdir, dircheck=dircheck,
_homedir_fn=_homedir_fn)
result = [(kind, helper(kind=kind)) for kind in constants.SSHK_ALL]
authorized_keys = [i for (_, (_, _, i)) in result]
assert len(frozenset(authorized_keys)) == 1, \
"Different paths for authorized_keys were returned"
return (authorized_keys[0],
dict((kind, (privkey, pubkey))
for (kind, (privkey, pubkey, _)) in result))
class SshRunner:
"""Wrapper for SSH commands.
"""
def __init__(self, cluster_name, ipv6=False):
"""Initializes this class.
@type cluster_name: str
@param cluster_name: name of the cluster
@type ipv6: bool
@param ipv6: If true, force ssh to use IPv6 addresses only
"""
self.cluster_name = cluster_name
self.ipv6 = ipv6
def _BuildSshOptions(self, batch, ask_key, use_cluster_key,
strict_host_check, private_key=None, quiet=True):
"""Builds a list with needed SSH options.
@param batch: same as ssh's batch option
@param ask_key: allows ssh to ask for key confirmation; this
parameter conflicts with the batch one
@param use_cluster_key: if True, use the cluster name as the
HostKeyAlias name
@param strict_host_check: this makes the host key checking strict
@param private_key: use this private key instead of the default
@param quiet: whether to enable -q to ssh
@rtype: list
@return: the list of options ready to use in L{utils.process.RunCmd}
"""
options = [
"-oEscapeChar=none",
"-oHashKnownHosts=no",
"-oGlobalKnownHostsFile=%s" % pathutils.SSH_KNOWN_HOSTS_FILE,
"-oUserKnownHostsFile=/dev/null",
"-oCheckHostIp=no",
]
if use_cluster_key:
options.append("-oHostKeyAlias=%s" % self.cluster_name)
if quiet:
options.append("-q")
if private_key:
options.append("-i%s" % private_key)
# TODO: Too many boolean options, maybe convert them to more descriptive
# constants.
# Note: ask_key conflicts with batch mode
if batch:
if ask_key:
raise errors.ProgrammerError("SSH call requested conflicting options")
options.append("-oBatchMode=yes")
if strict_host_check:
options.append("-oStrictHostKeyChecking=yes")
else:
options.append("-oStrictHostKeyChecking=no")
else:
# non-batch mode
if ask_key:
options.append("-oStrictHostKeyChecking=ask")
elif strict_host_check:
options.append("-oStrictHostKeyChecking=yes")
else:
options.append("-oStrictHostKeyChecking=no")
if self.ipv6:
options.append("-6")
else:
options.append("-4")
return options
def BuildCmd(self, hostname, user, command, batch=True, ask_key=False,
tty=False, use_cluster_key=True, strict_host_check=True,
private_key=None, quiet=True):
"""Build an ssh command to execute a command on a remote node.
@param hostname: the target host, string
@param user: user to auth as
@param command: the command
@param batch: if true, ssh will run in batch mode with no prompting
@param ask_key: if true, ssh will run with
StrictHostKeyChecking=ask, so that we can connect to an
unknown host (not valid in batch mode)
@param use_cluster_key: whether to expect and use the
cluster-global SSH key
@param strict_host_check: whether to check the host's SSH key at all
@param private_key: use this private key instead of the default
@param quiet: whether to enable -q to ssh
@return: the ssh call to run 'command' on the remote host.
"""
argv = [constants.SSH]
argv.extend(self._BuildSshOptions(batch, ask_key, use_cluster_key,
strict_host_check, private_key,
quiet=quiet))
if tty:
argv.extend(["-t", "-t"])
argv.append("%s@%s" % (user, hostname))
# Insert variables for virtual nodes
argv.extend("export %s=%s;" %
(utils.ShellQuote(name), utils.ShellQuote(value))
for (name, value) in
vcluster.EnvironmentForHost(hostname).items())
argv.append(command)
return argv
def Run(self, *args, **kwargs):
"""Runs a command on a remote node.
This method has the same return value as `utils.RunCmd()`, which it
uses to launch ssh.
Args: see SshRunner.BuildCmd.
@rtype: L{utils.process.RunResult}
@return: the result as from L{utils.process.RunCmd()}
"""
return utils.RunCmd(self.BuildCmd(*args, **kwargs))
def CopyFileToNode(self, node, filename):
"""Copy a file to another node with scp.
@param node: node in the cluster
@param filename: absolute pathname of a local file
@rtype: boolean
@return: the success of the operation
"""
if not os.path.isabs(filename):
logging.error("File %s must be an absolute path", filename)
return False
if not os.path.isfile(filename):
logging.error("File %s does not exist", filename)
return False
command = [constants.SCP, "-p"]
command.extend(self._BuildSshOptions(True, False, True, True))
command.append(filename)
if netutils.IP6Address.IsValid(node):
node = netutils.FormatAddress((node, None))
command.append("%s:%s" % (node, vcluster.ExchangeNodeRoot(node, filename)))
result = utils.RunCmd(command)
if result.failed:
logging.error("Copy to node %s failed (%s) error '%s',"
" command was '%s'",
node, result.fail_reason, result.output, result.cmd)
return not result.failed
def VerifyNodeHostname(self, node):
"""Verify hostname consistency via SSH.
This functions connects via ssh to a node and compares the hostname
reported by the node to the name with have (the one that we
connected to).
This is used to detect problems in ssh known_hosts files
(conflicting known hosts) and inconsistencies between dns/hosts
entries and local machine names
@param node: nodename of a host to check; can be short or
full qualified hostname
@return: (success, detail), where:
- success: True/False
- detail: string with details
"""
cmd = ("if test -z \"$GANETI_HOSTNAME\"; then"
" hostname --fqdn;"
"else"
" echo \"$GANETI_HOSTNAME\";"
"fi")
retval = self.Run(node, constants.SSH_LOGIN_USER, cmd, quiet=False)
if retval.failed:
msg = "ssh problem"
output = retval.output
if output:
msg += ": %s" % output
else:
msg += ": %s (no output)" % retval.fail_reason
logging.error("Command %s failed: %s", retval.cmd, msg)
return False, msg
remotehostname = retval.stdout.strip()
if not remotehostname or remotehostname != node:
if node.startswith(remotehostname + "."):
msg = "hostname not FQDN"
else:
msg = "hostname mismatch"
return False, ("%s: expected %s but got %s" %
(msg, node, remotehostname))
return True, "host matches"
def WriteKnownHostsFile(cfg, file_name):
"""Writes the cluster-wide equally known_hosts file.
"""
data = ""
if cfg.GetRsaHostKey():
data += "%s ssh-rsa %s\n" % (cfg.GetClusterName(), cfg.GetRsaHostKey())
if cfg.GetDsaHostKey():
data += "%s ssh-dss %s\n" % (cfg.GetClusterName(), cfg.GetDsaHostKey())
utils.WriteFile(file_name, mode=0600, data=data)
|
gpl-2.0
| 2,119,428,609,877,526,000 | 30.218289 | 79 | 0.651422 | false |
alphagov/backdrop
|
tests/read/test_validators.py
|
1
|
3336
|
import unittest
from hamcrest import *
from backdrop.read.validation import ParameterMustBeOneOfTheseValidator, MondayValidator, FirstOfMonthValidator, ParamDependencyValidator
# TODO: looked around and couldn't see any other validator tests
class TestValidators(unittest.TestCase):
def test_value_must_be_one_of_these_validator(self):
request = {
"foo": "not_allowed"
}
validator = ParameterMustBeOneOfTheseValidator(
request_args=request,
param_name="foo",
must_be_one_of_these=["bar", "zap"]
)
assert_that(validator.invalid(), is_(True))
def test_monday_validator_only_validates_when_period_is_week(self):
period_week_request = {
"period": "week",
"_start_at": "2013-01-02T00:00:00+00:00"
}
period_month_request = {
"period": "month",
"_start_at": "2013-01-02T00:00:00+00:00"
}
i_should_be_invalid = MondayValidator(
request_args=period_week_request,
param_name="_start_at"
)
i_should_be_valid = MondayValidator(
request_args=period_month_request,
param_name="_start_at"
)
assert_that(i_should_be_invalid.invalid(), is_(True))
assert_that(i_should_be_valid.invalid(), is_(False))
def test_first_of_month_validator_only_validates_for_period_month(self):
period_week_request = {
"period": "week",
"_start_at": "2013-01-02T00:00:00+00:00"
}
period_month_request = {
"period": "month",
"_start_at": "2013-01-02T00:00:00+00:00"
}
i_should_be_invalid = FirstOfMonthValidator(
request_args=period_month_request,
param_name="_start_at"
)
i_should_be_valid = FirstOfMonthValidator(
request_args=period_week_request,
param_name="_start_at"
)
assert_that(i_should_be_invalid.invalid(), is_(True))
assert_that(i_should_be_valid.invalid(), is_(False))
def test_param_dependency_validator(self):
query = {
"collect": "foo",
"group_by": "test"
}
validator = ParamDependencyValidator(request_args=query,
param_name="collect",
depends_on=["group_by"])
assert_that(validator.invalid(), is_(False))
def test_param_dependency_validator_invalidates_correctly(self):
query = {
"collect": "foo",
"group_by": "test"
}
validator = ParamDependencyValidator(request_args=query,
param_name="collect",
depends_on=["wibble"])
assert_that(validator.invalid(), is_(True))
def test_that_a_parameter_can_have_multiple_dependencies(self):
query = {
"collect": "foo",
"period": "week"
}
validator = ParamDependencyValidator(request_args=query,
param_name="collect",
depends_on=["group_by", "period"])
assert_that(validator.invalid(), is_(False))
|
mit
| -7,594,048,280,196,841,000 | 32.029703 | 137 | 0.536871 | false |
NeCTAR-RC/nova
|
nova/tests/unit/compute/test_tracker.py
|
1
|
76576
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_utils import units
from nova.compute import arch
from nova.compute import claims
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import exception as exc
from nova import objects
from nova.objects import base as obj_base
from nova.pci import manager as pci_manager
from nova import test
_VIRT_DRIVER_AVAIL_RESOURCES = {
'vcpus': 4,
'memory_mb': 512,
'local_gb': 6,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': None,
}
_COMPUTE_NODE_FIXTURES = [
objects.ComputeNode(
id=1,
host='fake-host',
vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
memory_mb=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'],
local_gb=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'],
vcpus_used=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus_used'],
memory_mb_used=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used'],
local_gb_used=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used'],
hypervisor_type='fake',
hypervisor_version=0,
hypervisor_hostname='fake-host',
free_ram_mb=(_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'] -
_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used']),
free_disk_gb=(_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'] -
_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used']),
current_workload=0,
running_vms=0,
cpu_info='{}',
disk_available_least=0,
host_ip='1.1.1.1',
supported_hv_specs=[
objects.HVSpec.from_list([arch.I686, hv_type.KVM, vm_mode.HVM])
],
metrics=None,
pci_device_pools=None,
extra_resources=None,
stats={},
numa_topology=None,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
),
]
_INSTANCE_TYPE_FIXTURES = {
1: {
'id': 1,
'flavorid': 'fakeid-1',
'name': 'fake1.small',
'memory_mb': 128,
'vcpus': 1,
'root_gb': 1,
'ephemeral_gb': 0,
'swap': 0,
'rxtx_factor': 0,
'vcpu_weight': 1,
'extra_specs': {},
},
2: {
'id': 2,
'flavorid': 'fakeid-2',
'name': 'fake1.medium',
'memory_mb': 256,
'vcpus': 2,
'root_gb': 5,
'ephemeral_gb': 0,
'swap': 0,
'rxtx_factor': 0,
'vcpu_weight': 1,
'extra_specs': {},
},
}
_INSTANCE_TYPE_OBJ_FIXTURES = {
1: objects.Flavor(id=1, flavorid='fakeid-1', name='fake1.small',
memory_mb=128, vcpus=1, root_gb=1,
ephemeral_gb=0, swap=0, rxtx_factor=0,
vcpu_weight=1, extra_specs={}),
2: objects.Flavor(id=2, flavorid='fakeid-2', name='fake1.medium',
memory_mb=256, vcpus=2, root_gb=5,
ephemeral_gb=0, swap=0, rxtx_factor=0,
vcpu_weight=1, extra_specs={}),
}
_2MB = 2 * units.Mi / units.Ki
_INSTANCE_NUMA_TOPOLOGIES = {
'2mb': objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=_2MB, pagesize=0),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=_2MB, pagesize=0)]),
}
_NUMA_LIMIT_TOPOLOGIES = {
'2mb': objects.NUMATopologyLimits(id=0,
cpu_allocation_ratio=1.0,
ram_allocation_ratio=1.0),
}
_NUMA_PAGE_TOPOLOGIES = {
'2kb*8': objects.NUMAPagesTopology(size_kb=2, total=8, used=0)
}
_NUMA_HOST_TOPOLOGIES = {
'2mb': objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([1, 2]), memory=_2MB,
cpu_usage=0, memory_usage=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2kb*8']],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=_2MB,
cpu_usage=0, memory_usage=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2kb*8']],
siblings=[], pinned_cpus=set([]))]),
}
_INSTANCE_FIXTURES = [
objects.Instance(
id=1,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124',
memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'],
numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
instance_type_id=1,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=None,
os_type='fake-os', # Used by the stats collector.
project_id='fake-project', # Used by the stats collector.
flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
),
objects.Instance(
id=2,
host=None,
node=None,
uuid='33805b54-dea6-47b8-acb2-22aeb1b57919',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.DELETED,
power_state=power_state.SHUTDOWN,
task_state=None,
os_type='fake-os',
project_id='fake-project-2',
flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
),
]
_MIGRATION_FIXTURES = {
# A migration that has only this compute node as the source host
'source-only': objects.Migration(
id=1,
instance_uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
source_compute='fake-host',
dest_compute='other-host',
source_node='fake-node',
dest_node='other-node',
old_instance_type_id=1,
new_instance_type_id=2,
migration_type='resize',
status='migrating'
),
# A migration that has only this compute node as the dest host
'dest-only': objects.Migration(
id=2,
instance_uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
source_compute='other-host',
dest_compute='fake-host',
source_node='other-node',
dest_node='fake-node',
old_instance_type_id=1,
new_instance_type_id=2,
migration_type='resize',
status='migrating'
),
# A migration that has this compute node as both the source and dest host
'source-and-dest': objects.Migration(
id=3,
instance_uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
source_compute='fake-host',
dest_compute='fake-host',
source_node='fake-node',
dest_node='fake-node',
old_instance_type_id=1,
new_instance_type_id=2,
migration_type='resize',
status='migrating'
),
# A migration that has this compute node as destination and is an evac
'dest-only-evac': objects.Migration(
id=4,
instance_uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
source_compute='other-host',
dest_compute='fake-host',
source_node='other-node',
dest_node='fake-node',
old_instance_type_id=2,
new_instance_type_id=None,
migration_type='evacuation',
status='pre-migrating'
),
}
_MIGRATION_INSTANCE_FIXTURES = {
# source-only
'f15ecfb0-9bf6-42db-9837-706eb2c4bf08': objects.Instance(
id=101,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'],
numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
instance_type_id=1,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
# dest-only
'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.Instance(
id=102,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
# source-and-dest
'f4f0bfea-fe7e-4264-b598-01cb13ef1997': objects.Instance(
id=3,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
# dest-only-evac
'077fb63a-bdc8-4330-90ef-f012082703dc': objects.Instance(
id=102,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.REBUILDING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
}
_MIGRATION_CONTEXT_FIXTURES = {
'f4f0bfea-fe7e-4264-b598-01cb13ef1997': objects.MigrationContext(
instance_uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
migration_id=3,
new_numa_topology=None,
old_numa_topology=None),
'c17741a5-6f3d-44a8-ade8-773dc8c29124': objects.MigrationContext(
instance_uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124',
migration_id=3,
new_numa_topology=None,
old_numa_topology=None),
'f15ecfb0-9bf6-42db-9837-706eb2c4bf08': objects.MigrationContext(
instance_uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
migration_id=1,
new_numa_topology=None,
old_numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb']),
'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.MigrationContext(
instance_uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
migration_id=2,
new_numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
old_numa_topology=None),
'077fb63a-bdc8-4330-90ef-f012082703dc': objects.MigrationContext(
instance_uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
migration_id=2,
new_numa_topology=None,
old_numa_topology=None),
}
def overhead_zero(instance):
# Emulate that the driver does not adjust the memory
# of the instance...
return {
'memory_mb': 0
}
def setup_rt(hostname, nodename, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
estimate_overhead=overhead_zero):
"""Sets up the resource tracker instance with mock fixtures.
:param virt_resources: Optional override of the resource representation
returned by the virt driver's
`get_available_resource()` method.
:param estimate_overhead: Optional override of a function that should
return overhead of memory given an instance
object. Defaults to returning zero overhead.
"""
sched_client_mock = mock.MagicMock()
notifier_mock = mock.MagicMock()
vd = mock.MagicMock()
# Make sure we don't change any global fixtures during tests
virt_resources = copy.deepcopy(virt_resources)
vd.get_available_resource.return_value = virt_resources
vd.estimate_instance_overhead.side_effect = estimate_overhead
with test.nested(
mock.patch('nova.scheduler.client.SchedulerClient',
return_value=sched_client_mock),
mock.patch('nova.rpc.get_notifier', return_value=notifier_mock)):
rt = resource_tracker.ResourceTracker(hostname, vd, nodename)
return (rt, sched_client_mock, vd)
class BaseTestCase(test.NoDBTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.rt = None
self.flags(my_ip='1.1.1.1')
def _setup_rt(self, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
estimate_overhead=overhead_zero):
(self.rt, self.sched_client_mock,
self.driver_mock) = setup_rt(
'fake-host', 'fake-node', virt_resources, estimate_overhead)
class TestUpdateAvailableResources(BaseTestCase):
def _update_available_resources(self):
# We test RT._update separately, since the complexity
# of the update_available_resource() function is high enough as
# it is, we just want to focus here on testing the resources
# parameter that update_available_resource() eventually passes
# to _update().
with mock.patch.object(self.rt, '_update') as update_mock:
self.rt.update_available_resource(mock.sentinel.ctx)
return update_mock
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_no_reserved(self, get_mock, migr_mock,
get_cn_mock):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self._setup_rt()
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
vd = self.driver_mock
vd.get_available_resource.assert_called_once_with('fake-node')
get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node',
expected_attrs=[
'system_metadata',
'numa_topology',
'flavor',
'migration_context'])
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
migr_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 512,
'memory_mb_used': 0,
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_reserved_disk_and_ram(
self, get_mock, migr_mock, get_cn_mock):
self.flags(reserved_host_disk_mb=1024,
reserved_host_memory_mb=512)
self._setup_rt()
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 5, # 6GB avail - 1 GB reserved
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 0, # 512MB avail - 512MB reserved
'memory_mb_used': 512, # 0MB used + 512MB reserved
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 1, # 0GB used + 1 GB reserved
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_no_migrations(self, get_mock, migr_mock,
get_cn_mock):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=1,
memory_mb_used=128,
local_gb_used=1)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = _INSTANCE_FIXTURES
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 5, # 6 - 1 used
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 384, # 512 - 128 used
'memory_mb_used': 128,
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 1,
'hypervisor_type': 'fake',
'local_gb_used': 1,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 1 # One active instance
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_orphaned_instances_no_migrations(self, get_mock, migr_mock,
get_cn_mock):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(memory_mb_used=64)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
# Orphaned instances are those that the virt driver has on
# record as consuming resources on the compute node, but the
# Nova database has no record of the instance being active
# on the host. For some reason, the resource tracker only
# considers orphaned instance's memory usage in its calculations
# of free resources...
orphaned_usages = {
'71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d': {
# Yes, the return result format of get_per_instance_usage
# is indeed this stupid and redundant. Also note that the
# libvirt driver just returns an empty dict always for this
# method and so who the heck knows whether this stuff
# actually works.
'uuid': '71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d',
'memory_mb': 64
}
}
vd = self.driver_mock
vd.get_per_instance_usage.return_value = orphaned_usages
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 448, # 512 - 64 orphaned usage
'memory_mb_used': 64,
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
# Yep, for some reason, orphaned instances are not counted
# as running VMs...
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_source_migration(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the source host not the destination host, and the resource
# tracker does not have any instances assigned to it. This is
# the case when a migration from this compute host to another
# has been completed, but the user has not confirmed the resize
# yet, so the resource tracker must continue to keep the resources
# for the original instance type available on the source compute
# node in case of a revert of the resize.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=4,
memory_mb_used=128,
local_gb_used=1)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['source-only']
migr_mock.return_value = [migr_obj]
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
# Migration.instance property is accessed in the migration
# processing code, and this property calls
# objects.Instance.get_by_uuid, so we have the migration return
inst_uuid = migr_obj.instance_uuid
instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
get_inst_mock.return_value = instance
instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 5,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 384, # 512 total - 128 for possible revert of orig
'memory_mb_used': 128, # 128 possible revert amount
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 1,
'hypervisor_type': 'fake',
'local_gb_used': 1,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_dest_migration(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the destination host not the source host, and the resource
# tracker does not yet have any instances assigned to it. This is
# the case when a migration to this compute host from another host
# is in progress, but the user has not confirmed the resize
# yet, so the resource tracker must reserve the resources
# for the possibly-to-be-confirmed instance's instance type
# node in case of a confirm of the resize.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=2,
memory_mb_used=256,
local_gb_used=5)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['dest-only']
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
get_inst_mock.return_value = instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 1,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 256, # 512 total - 256 for possible confirm of new
'memory_mb_used': 256, # 256 possible confirmed amount
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 2,
'hypervisor_type': 'fake',
'local_gb_used': 5,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_dest_evacuation(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock):
# We test the behavior of update_available_resource() when
# there is an active evacuation that involves this compute node
# as the destination host not the source host, and the resource
# tracker does not yet have any instances assigned to it. This is
# the case when a migration to this compute host from another host
# is in progress, but not finished yet.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=2,
memory_mb_used=256,
local_gb_used=5)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['dest-only-evac']
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
get_inst_mock.return_value = instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 1,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 256, # 512 total - 256 for possible confirm of new
'memory_mb_used': 256, # 256 possible confirmed amount
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 2,
'hypervisor_type': 'fake',
'local_gb_used': 5,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
return_value=None)
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_source_and_dest_migration(self, get_mock,
get_inst_mock, migr_mock,
get_cn_mock,
get_mig_ctxt_mock):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the destination host AND the source host, and the resource
# tracker has a few instances assigned to it, including the
# instance that is resizing to this same compute node. The tracking
# of resource amounts takes into account both the old and new
# resize instance types as taking up space on the node.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=4,
memory_mb_used=512,
local_gb_used=7)
self._setup_rt(virt_resources=virt_resources)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
# The resizing instance has already had its instance type
# changed to the *new* instance type (the bigger one, instance type 2)
resizing_instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
resizing_instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[resizing_instance.uuid])
all_instances = _INSTANCE_FIXTURES + [resizing_instance]
get_mock.return_value = all_instances
get_inst_mock.return_value = resizing_instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
# 6 total - 1G existing - 5G new flav - 1G old flav
'free_disk_gb': -1,
'hypervisor_version': 0,
'local_gb': 6,
# 512 total - 128 existing - 256 new flav - 128 old flav
'free_ram_mb': 0,
'memory_mb_used': 512, # 128 exist + 256 new flav + 128 old flav
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 4,
'hypervisor_type': 'fake',
'local_gb_used': 7, # 1G existing, 5G new flav + 1 old flav
'memory_mb': 512,
'current_workload': 1, # One migrating instance...
'vcpus': 4,
'running_vms': 2
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_no_op_init_compute_node(self, get_mock, service_mock,
create_mock):
self._setup_rt()
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.rt.compute_node = compute_node
self.rt._init_compute_node(mock.sentinel.ctx, resources)
self.assertFalse(service_mock.called)
self.assertFalse(get_mock.called)
self.assertFalse(create_mock.called)
self.assertFalse(self.rt.disabled)
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_compute_node_loaded(self, get_mock, create_mock):
self._setup_rt()
def fake_get_node(_ctx, host, node):
res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
return res
get_mock.side_effect = fake_get_node
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
self.rt._init_compute_node(mock.sentinel.ctx, resources)
get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
self.assertFalse(create_mock.called)
self.assertFalse(self.rt.disabled)
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_compute_node_created_on_empty(self, get_mock, create_mock):
self._setup_rt()
get_mock.side_effect = exc.NotFound
cpu_alloc_ratio = 1.0
ram_alloc_ratio = 1.0
disk_alloc_ratio = 1.0
resources = {
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 512,
'memory_mb_used': 0,
'pci_device_pools': [],
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0,
'pci_passthrough_devices': '[]'
}
# The expected compute represents the initial values used
# when creating a compute node.
expected_compute = objects.ComputeNode(
host_ip=resources['host_ip'],
vcpus=resources['vcpus'],
memory_mb=resources['memory_mb'],
local_gb=resources['local_gb'],
cpu_info=resources['cpu_info'],
vcpus_used=resources['vcpus_used'],
memory_mb_used=resources['memory_mb_used'],
local_gb_used=resources['local_gb_used'],
numa_topology=resources['numa_topology'],
hypervisor_type=resources['hypervisor_type'],
hypervisor_version=resources['hypervisor_version'],
hypervisor_hostname=resources['hypervisor_hostname'],
# NOTE(sbauza): ResourceTracker adds host field
host='fake-host',
# NOTE(sbauza): ResourceTracker adds CONF allocation ratios
ram_allocation_ratio=ram_alloc_ratio,
cpu_allocation_ratio=cpu_alloc_ratio,
disk_allocation_ratio=disk_alloc_ratio,
)
# Forcing the flags to the values we know
self.rt.ram_allocation_ratio = ram_alloc_ratio
self.rt.cpu_allocation_ratio = cpu_alloc_ratio
self.rt.disk_allocation_ratio = disk_alloc_ratio
self.rt._init_compute_node(mock.sentinel.ctx, resources)
self.assertFalse(self.rt.disabled)
get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
create_mock.assert_called_once_with()
self.assertTrue(obj_base.obj_equal_prims(expected_compute,
self.rt.compute_node))
def test_copy_resources_adds_allocation_ratios(self):
self.flags(cpu_allocation_ratio=4.0, ram_allocation_ratio=3.0,
disk_allocation_ratio=2.0)
self._setup_rt()
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.rt.compute_node = compute_node
self.rt._copy_resources(resources)
self.assertEqual(4.0, self.rt.compute_node.cpu_allocation_ratio)
self.assertEqual(3.0, self.rt.compute_node.ram_allocation_ratio)
self.assertEqual(2.0, self.rt.compute_node.disk_allocation_ratio)
class TestUpdateComputeNode(BaseTestCase):
@mock.patch('nova.objects.Service.get_by_compute_host')
def test_existing_compute_node_updated_same_resources(self, service_mock):
self._setup_rt()
# This is the same set of resources as the fixture, deliberately. We
# are checking below to see that update_resource_stats() is not
# needlessly called when the resources don't actually change.
compute = objects.ComputeNode(
host_ip='1.1.1.1',
numa_topology=None,
metrics='[]',
cpu_info='',
hypervisor_hostname='fakehost',
free_disk_gb=6,
hypervisor_version=0,
local_gb=6,
free_ram_mb=512,
memory_mb_used=0,
pci_device_pools=objects.PciDevicePoolList(),
vcpus_used=0,
hypervisor_type='fake',
local_gb_used=0,
memory_mb=512,
current_workload=0,
vcpus=4,
running_vms=0,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
)
self.rt.compute_node = compute
self.rt._update(mock.sentinel.ctx)
self.assertFalse(self.rt.disabled)
self.assertFalse(service_mock.called)
# The above call to _update() will populate the
# RT.old_resources collection with the resources. Here, we check that
# if we call _update() again with the same resources, that
# the scheduler client won't be called again to update those
# (unchanged) resources for the compute node
self.sched_client_mock.reset_mock()
urs_mock = self.sched_client_mock.update_resource_stats
self.rt._update(mock.sentinel.ctx)
self.assertFalse(urs_mock.called)
@mock.patch('nova.objects.Service.get_by_compute_host')
def test_existing_compute_node_updated_new_resources(self, service_mock):
self._setup_rt()
# Deliberately changing local_gb_used, vcpus_used, and memory_mb_used
# below to be different from the compute node fixture's base usages.
# We want to check that the code paths update the stored compute node
# usage records with what is supplied to _update().
compute = objects.ComputeNode(
host='fake-host',
host_ip='1.1.1.1',
numa_topology=None,
metrics='[]',
cpu_info='',
hypervisor_hostname='fakehost',
free_disk_gb=2,
hypervisor_version=0,
local_gb=6,
free_ram_mb=384,
memory_mb_used=128,
pci_device_pools=objects.PciDevicePoolList(),
vcpus_used=2,
hypervisor_type='fake',
local_gb_used=4,
memory_mb=512,
current_workload=0,
vcpus=4,
running_vms=0,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
)
self.rt.compute_node = compute
self.rt._update(mock.sentinel.ctx)
self.assertFalse(self.rt.disabled)
self.assertFalse(service_mock.called)
urs_mock = self.sched_client_mock.update_resource_stats
urs_mock.assert_called_once_with(self.rt.compute_node)
class TestInstanceClaim(BaseTestCase):
def setUp(self):
super(TestInstanceClaim, self).setUp()
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self._setup_rt()
self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
# not using mock.sentinel.ctx because instance_claim calls #elevated
self.ctx = mock.MagicMock()
self.elevated = mock.MagicMock()
self.ctx.elevated.return_value = self.elevated
self.instance = _INSTANCE_FIXTURES[0].obj_clone()
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def test_claim_disabled(self):
self.rt.compute_node = None
self.assertTrue(self.rt.disabled)
with mock.patch.object(self.instance, 'save'):
claim = self.rt.instance_claim(mock.sentinel.ctx, self.instance,
None)
self.assertEqual(self.rt.host, self.instance.host)
self.assertEqual(self.rt.host, self.instance.launched_on)
self.assertEqual(self.rt.nodename, self.instance.node)
self.assertIsInstance(claim, claims.NopClaim)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_update_usage_with_claim(self, migr_mock, pci_mock):
# Test that RT.update_usage() only changes the compute node
# resources if there has been a claim first.
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.rt.update_usage(self.ctx, self.instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected.update({
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected['local_gb'] - disk_used,
"free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
})
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
update_mock.assert_called_once_with(self.elevated)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_update_usage_removed(self, migr_mock, pci_mock):
# Test that RT.update_usage() removes the instance when update is
# called in a removed state
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected.update({
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected['local_gb'] - disk_used,
"free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
})
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
update_mock.assert_called_once_with(self.elevated)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
expected_updated = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_updated['pci_device_pools'] = objects.PciDevicePoolList()
self.instance.vm_state = vm_states.SHELVED_OFFLOADED
with mock.patch.object(self.rt, '_update') as update_mock:
self.rt.update_usage(self.ctx, self.instance)
self.assertTrue(obj_base.obj_equal_prims(expected_updated,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim(self, migr_mock, pci_mock):
self.assertFalse(self.rt.disabled)
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected.update({
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected['local_gb'] - disk_used,
"free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
})
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
update_mock.assert_called_once_with(self.elevated)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests',
return_value=True)
@mock.patch('nova.pci.manager.PciDevTracker.claim_instance')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_with_pci(self, migr_mock, pci_mock,
pci_manager_mock, pci_stats_mock):
# Test that a claim involving PCI requests correctly claims
# PCI devices on the host and sends an updated pci_device_pools
# attribute of the ComputeNode object.
self.assertFalse(self.rt.disabled)
# TODO(jaypipes): Remove once the PCI tracker is always created
# upon the resource tracker being initialized...
self.rt.pci_tracker = pci_manager.PciDevTracker(mock.sentinel.ctx)
pci_pools = objects.PciDevicePoolList()
pci_manager_mock.return_value = pci_pools
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
pci_mock.return_value = objects.InstancePCIRequests(requests=[request])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected.update({
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected['local_gb'] - disk_used,
"free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': pci_pools
})
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
update_mock.assert_called_once_with(self.elevated)
pci_manager_mock.assert_called_once_with(mock.ANY, # context...
pci_mock.return_value,
None)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_abort_context_manager(self, migr_mock, pci_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
self.assertEqual(0, self.rt.compute_node.local_gb_used)
self.assertEqual(0, self.rt.compute_node.memory_mb_used)
self.assertEqual(0, self.rt.compute_node.running_vms)
mock_save = mock.MagicMock()
mock_clear_numa = mock.MagicMock()
@mock.patch.object(self.instance, 'save', mock_save)
@mock.patch.object(self.instance, 'clear_numa_topology',
mock_clear_numa)
@mock.patch.object(objects.Instance, 'obj_clone',
return_value=self.instance)
def _doit(mock_clone):
with self.rt.instance_claim(self.ctx, self.instance, None):
# Raise an exception. Just make sure below that the abort()
# method of the claim object was called (and the resulting
# resources reset to the pre-claimed amounts)
raise test.TestingException()
self.assertRaises(test.TestingException, _doit)
self.assertEqual(2, mock_save.call_count)
mock_clear_numa.assert_called_once_with()
self.assertIsNone(self.instance.host)
self.assertIsNone(self.instance.node)
# Assert that the resources claimed by the Claim() constructor
# are returned to the resource tracker due to the claim's abort()
# method being called when triggered by the exception raised above.
self.assertEqual(0, self.rt.compute_node.local_gb_used)
self.assertEqual(0, self.rt.compute_node.memory_mb_used)
self.assertEqual(0, self.rt.compute_node.running_vms)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_abort(self, migr_mock, pci_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
@mock.patch.object(objects.Instance, 'obj_clone',
return_value=self.instance)
@mock.patch.object(self.instance, 'save')
def _claim(mock_save, mock_clone):
return self.rt.instance_claim(self.ctx, self.instance, None)
claim = _claim()
self.assertEqual(disk_used, self.rt.compute_node.local_gb_used)
self.assertEqual(self.instance.memory_mb,
self.rt.compute_node.memory_mb_used)
self.assertEqual(1, self.rt.compute_node.running_vms)
mock_save = mock.MagicMock()
mock_clear_numa = mock.MagicMock()
@mock.patch.object(self.instance, 'save', mock_save)
@mock.patch.object(self.instance, 'clear_numa_topology',
mock_clear_numa)
def _abort():
claim.abort()
_abort()
mock_save.assert_called_once_with()
mock_clear_numa.assert_called_once_with()
self.assertIsNone(self.instance.host)
self.assertIsNone(self.instance.node)
self.assertEqual(0, self.rt.compute_node.local_gb_used)
self.assertEqual(0, self.rt.compute_node.memory_mb_used)
self.assertEqual(0, self.rt.compute_node.running_vms)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_limits(self, migr_mock, pci_mock):
self.assertFalse(self.rt.disabled)
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
good_limits = {
'memory_mb': _COMPUTE_NODE_FIXTURES[0]['memory_mb'],
'disk_gb': _COMPUTE_NODE_FIXTURES[0]['local_gb'],
'vcpu': _COMPUTE_NODE_FIXTURES[0]['vcpus'],
}
for key in good_limits.keys():
bad_limits = copy.deepcopy(good_limits)
bad_limits[key] = 0
self.assertRaises(exc.ComputeResourcesUnavailable,
self.rt.instance_claim,
self.ctx, self.instance, bad_limits)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_numa(self, migr_mock, pci_mock):
self.assertFalse(self.rt.disabled)
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
self.instance.numa_topology = _INSTANCE_NUMA_TOPOLOGIES['2mb']
host_topology = _NUMA_HOST_TOPOLOGIES['2mb']
self.rt.compute_node['numa_topology'] = host_topology._to_json()
limits = {'numa_topology': _NUMA_LIMIT_TOPOLOGIES['2mb']}
expected_numa = copy.deepcopy(host_topology)
for cell in expected_numa.cells:
cell.memory_usage += _2MB
cell.cpu_usage += 1
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, limits)
update_mock.assert_called_once_with(self.ctx.elevated())
updated_compute_node = self.rt.compute_node
new_numa = updated_compute_node['numa_topology']
new_numa = objects.NUMATopology.obj_from_db_obj(new_numa)
self.assertEqualNUMAHostTopology(expected_numa, new_numa)
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
class TestMoveClaim(BaseTestCase):
def setUp(self):
super(TestMoveClaim, self).setUp()
self._setup_rt()
self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.instance = _INSTANCE_FIXTURES[0].obj_clone()
self.flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1]
self.limits = {}
# not using mock.sentinel.ctx because resize_claim calls #elevated
self.ctx = mock.MagicMock()
self.elevated = mock.MagicMock()
self.ctx.elevated.return_value = self.elevated
# Initialise extensible resource trackers
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
with test.nested(
mock.patch('nova.objects.InstanceList.get_by_host_and_node'),
mock.patch('nova.objects.MigrationList.'
'get_in_progress_by_host_and_node')
) as (inst_list_mock, migr_mock):
inst_list_mock.return_value = objects.InstanceList(objects=[])
migr_mock.return_value = objects.MigrationList(objects=[])
self.rt.update_available_resource(self.ctx)
def register_mocks(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock, inst_save_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
self.inst_list_mock = inst_list_mock
self.inst_by_uuid = inst_by_uuid
self.migr_mock = migr_mock
self.inst_save_mock = inst_save_mock
def audit(self, rt, instances, migrations, migr_inst):
self.inst_list_mock.return_value = \
objects.InstanceList(objects=instances)
self.migr_mock.return_value = \
objects.MigrationList(objects=migrations)
self.inst_by_uuid.return_value = migr_inst
rt.update_available_resource(self.ctx)
def assertEqual(self, expected, actual):
if type(expected) != dict or type(actual) != dict:
super(TestMoveClaim, self).assertEqual(expected, actual)
return
fail = False
for k, e in expected.items():
a = actual[k]
if e != a:
print("%s: %s != %s" % (k, e, a))
fail = True
if fail:
self.fail()
def adjust_expected(self, expected, flavor):
disk_used = flavor['root_gb'] + flavor['ephemeral_gb']
expected.free_disk_gb -= disk_used
expected.local_gb_used += disk_used
expected.free_ram_mb -= flavor['memory_mb']
expected.memory_mb_used += flavor['memory_mb']
expected.vcpus_used += flavor['vcpus']
@mock.patch('nova.objects.Flavor.get_by_id')
def test_claim(self, flavor_mock, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock, inst_save_mock):
"""Resize self.instance and check that the expected quantities of each
resource have been consumed.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
self.driver_mock.get_host_ip_addr.return_value = "fake-ip"
flavor_mock.return_value = objects.Flavor(**self.flavor)
mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid]
self.instance.migration_context = mig_context_obj
expected = copy.deepcopy(self.rt.compute_node)
self.adjust_expected(expected, self.flavor)
create_mig_mock = mock.patch.object(self.rt, '_create_migration')
mig_ctxt_mock = mock.patch('nova.objects.MigrationContext',
return_value=mig_context_obj)
with create_mig_mock as migr_mock, mig_ctxt_mock as ctxt_mock:
migr_mock.return_value = _MIGRATION_FIXTURES['source-only']
claim = self.rt.resize_claim(
self.ctx, self.instance, self.flavor, None)
self.assertEqual(1, ctxt_mock.call_count)
self.assertIsInstance(claim, claims.MoveClaim)
inst_save_mock.assert_called_once_with()
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
def test_claim_abort(self, pci_mock, inst_list_mock,
inst_by_uuid, migr_mock, inst_save_mock):
# Resize self.instance and check that the expected quantities of each
# resource have been consumed. The abort the resize claim and check
# that the resources have been set back to their original values.
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
self.driver_mock.get_host_ip_addr.return_value = "fake-host"
migr_obj = _MIGRATION_FIXTURES['dest-only']
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid]
self.instance.migration_context = mig_context_obj
self.flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
with mock.patch.object(self.rt, '_create_migration') as migr_mock:
migr_mock.return_value = migr_obj
claim = self.rt.resize_claim(
self.ctx, self.instance, self.flavor, None)
self.assertIsInstance(claim, claims.MoveClaim)
self.assertEqual(5, self.rt.compute_node.local_gb_used)
self.assertEqual(256, self.rt.compute_node.memory_mb_used)
self.assertEqual(1, len(self.rt.tracked_migrations))
with mock.patch('nova.objects.Instance.'
'drop_migration_context') as drop_migr_mock:
claim.abort()
drop_migr_mock.assert_called_once_with()
self.assertEqual(0, self.rt.compute_node.local_gb_used)
self.assertEqual(0, self.rt.compute_node.memory_mb_used)
self.assertEqual(0, len(self.rt.tracked_migrations))
def test_same_host(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock, inst_save_mock):
"""Resize self.instance to the same host but with a different flavor.
Then abort the claim. Check that the same amount of resources are
available afterwards as we started with.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
self.instance._context = self.ctx
mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid]
self.instance.migration_context = mig_context_obj
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
expected = copy.deepcopy(self.rt.compute_node)
create_mig_mock = mock.patch.object(self.rt, '_create_migration')
mig_ctxt_mock = mock.patch('nova.objects.MigrationContext',
return_value=mig_context_obj)
with create_mig_mock as migr_mock, mig_ctxt_mock as ctxt_mock:
migr_mock.return_value = migr_obj
claim = self.rt.resize_claim(self.ctx, self.instance,
_INSTANCE_TYPE_OBJ_FIXTURES[1], None)
self.assertEqual(1, ctxt_mock.call_count)
self.audit(self.rt, [self.instance], [migr_obj], self.instance)
inst_save_mock.assert_called_once_with()
self.assertNotEqual(expected, self.rt.compute_node)
claim.instance.migration_context = mig_context_obj
with mock.patch('nova.objects.MigrationContext._destroy') as destroy_m:
claim.abort()
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
destroy_m.assert_called_once_with(self.ctx, claim.instance.uuid)
def test_revert_reserve_source(
self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock):
"""Check that the source node of an instance migration reserves
resources until the migration has completed, even if the migration is
reverted.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
# Get our migrations, instances and itypes in a row
src_migr = _MIGRATION_FIXTURES['source-only']
src_instance = (
_MIGRATION_INSTANCE_FIXTURES[src_migr['instance_uuid']].obj_clone()
)
src_instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[src_instance.uuid])
old_itype = _INSTANCE_TYPE_FIXTURES[src_migr['old_instance_type_id']]
dst_migr = _MIGRATION_FIXTURES['dest-only']
dst_instance = (
_MIGRATION_INSTANCE_FIXTURES[dst_migr['instance_uuid']].obj_clone()
)
new_itype = _INSTANCE_TYPE_FIXTURES[dst_migr['new_instance_type_id']]
dst_instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[dst_instance.uuid])
# Set up the destination resource tracker
# update_available_resource to initialise extensible resource trackers
src_rt = self.rt
(dst_rt, _, _) = setup_rt("other-host", "other-node")
dst_rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
inst_list_mock.return_value = objects.InstanceList(objects=[])
dst_rt.update_available_resource(self.ctx)
# Register the instance with dst_rt
expected = copy.deepcopy(dst_rt.compute_node)
with mock.patch.object(dst_instance, 'save'):
dst_rt.instance_claim(self.ctx, dst_instance)
self.adjust_expected(expected, new_itype)
expected.stats = {'num_task_resize_migrating': 1,
'io_workload': 1,
'num_instances': 1,
'num_proj_fake-project': 1,
'num_vm_active': 1,
'num_os_type_fake-os': 1}
expected.current_workload = 1
expected.running_vms = 1
self.assertTrue(obj_base.obj_equal_prims(expected,
dst_rt.compute_node))
# Provide the migration via a mock, then audit dst_rt to check that
# the instance + migration resources are not double-counted
self.audit(dst_rt, [dst_instance], [dst_migr], dst_instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
dst_rt.compute_node))
# Audit src_rt with src_migr
expected = copy.deepcopy(src_rt.compute_node)
self.adjust_expected(expected, old_itype)
self.audit(src_rt, [], [src_migr], src_instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
src_rt.compute_node))
# Flag the instance as reverting and re-audit
src_instance['vm_state'] = vm_states.RESIZED
src_instance['task_state'] = task_states.RESIZE_REVERTING
self.audit(src_rt, [], [src_migr], src_instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
src_rt.compute_node))
def test_update_available_resources_migration_no_context(self, pci_mock,
inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock):
"""When migrating onto older nodes - it is possible for the
migration_context record to be missing. Confirm resource audit works
regardless.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
self.instance.migration_context = None
expected = copy.deepcopy(self.rt.compute_node)
self.adjust_expected(expected, self.flavor)
self.audit(self.rt, [], [migr_obj], self.instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
def test_dupe_filter(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock, inst_save_mock):
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
# This is good enough to prevent a lazy-load; value is unimportant
migr_obj['updated_at'] = None
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
self.instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[self.instance.uuid])
self.audit(self.rt, [], [migr_obj, migr_obj], self.instance)
self.assertEqual(1, len(self.rt.tracked_migrations))
class TestInstanceInResizeState(test.NoDBTestCase):
def test_active_suspending(self):
instance = objects.Instance(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(resource_tracker._instance_in_resize_state(instance))
def test_resized_suspending(self):
instance = objects.Instance(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(resource_tracker._instance_in_resize_state(instance))
def test_resized_resize_migrating(self):
instance = objects.Instance(vm_state=vm_states.RESIZED,
task_state=task_states.RESIZE_MIGRATING)
self.assertTrue(resource_tracker._instance_in_resize_state(instance))
def test_resized_resize_finish(self):
instance = objects.Instance(vm_state=vm_states.RESIZED,
task_state=task_states.RESIZE_FINISH)
self.assertTrue(resource_tracker._instance_in_resize_state(instance))
|
apache-2.0
| -7,270,558,305,109,929,000 | 42.757714 | 79 | 0.591008 | false |
woutdenolf/spectrocrunch
|
spectrocrunch/visualization/tests/test_scene.py
|
1
|
2667
|
# -*- coding: utf-8 -*-
import unittest
import matplotlib.pyplot as plt
import numpy as np
from .. import scene
from ...patch.pint import ureg
class test_scene(unittest.TestCase):
def test_images(self):
n0, n1 = 5, 10
img = np.arange(n0 * n1).reshape(n0, n1)
unit0 = ureg.mm
unit1 = ureg.micrometer
s1 = scene.Scene(unit0=unit0, unit1=unit1)
s2 = scene.Scene(unit0=unit0, unit1=unit1)
s2.transpose(True)
# s2.flipx(increasing=True)
s2.axlabels = ["dim0", "dim1"]
s2.cmap = plt.get_cmap("gray")
o1 = scene.Image(
img, lim0=s1.q0([8, 8 + n0 - 1]), lim1=s1.q1([10 + n1 - 1, 10])
)
s1.register(o1)
s2.register(o1)
p0 = sorted(o1.datarange(0, border=False))
p1 = sorted(o1.datarange(1, border=False))
o = scene.Polyline([p0[0], p0[1], p0[1], p0[0]], [p1[0], p1[0], p1[1], p1[1]])
s1.register(o)
s2.register(o)
o.set_setting("scatter", True)
o2 = scene.Image(
img, lim0=s1.q0([-2, -2 + n0 - 1]), lim1=s1.q1([-1, -1 + n1 - 1])
)
s1.register(o2)
s2.register(o2)
o.set_setting("scatter", True)
p0 = sorted(o2.datarange(0, border=False))
p1 = sorted(o2.datarange(1, border=False))
o = scene.Text(
[p0[0], p0[1], p0[1], p0[0]],
[p1[0], p1[0], p1[1], p1[1]],
labels=[1, 2, 3, 4],
)
s1.register(o)
s2.register(o)
f, ax = plt.subplots()
s1.setaxes(ax)
f, ax = plt.subplots()
s2.setaxes(ax)
# Update scene 1
s1.updateview()
# Shift image, axes scaling and update scene 2
o1.lim[0] = s1.q0([9, 9 + n0 - 1])
s2.setdatarange(0, s1.q0([0, 1]))
s2.setdatarange(1, s1.q1([0, 1]))
s2.updateview()
# plt.pause(0.01)
# Update scene 1
s1.updateview()
# Reset axes of scene 1
f, ax = plt.subplots()
s1.setaxes(ax)
# Shift image, axes offset, different normalization and update scene 1
o1.lim[0] = s1.q0([9, 9 + n0 - 1])
s1.set_settings({"cnorm": "power", "cnormargs": (0.1,)})
s1.updateview()
# plt.pause(0.01)
# plt.show()
def test_suite():
"""Test suite including all test suites"""
testSuite = unittest.TestSuite()
testSuite.addTest(test_scene("test_images"))
return testSuite
if __name__ == "__main__":
import sys
mysuite = test_suite()
runner = unittest.TextTestRunner()
if not runner.run(mysuite).wasSuccessful():
sys.exit(1)
|
mit
| 1,212,141,342,712,263,000 | 24.893204 | 86 | 0.522685 | false |
gurneyalex/odoo
|
addons/website_hr_recruitment/models/hr_recruitment.py
|
8
|
2050
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from werkzeug import urls
from odoo import api, fields, models
from odoo.tools.translate import html_translate
class RecruitmentSource(models.Model):
_inherit = 'hr.recruitment.source'
url = fields.Char(compute='_compute_url', string='Url Parameters')
@api.depends('source_id', 'source_id.name', 'job_id')
def _compute_url(self):
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
for source in self:
source.url = urls.url_join(base_url, "%s?%s" % (source.job_id.website_url,
urls.url_encode({
'utm_campaign': self.env.ref('hr_recruitment.utm_campaign_job').name,
'utm_medium': self.env.ref('utm.utm_medium_website').name,
'utm_source': source.source_id.name
})
))
class Applicant(models.Model):
_inherit = 'hr.applicant'
def website_form_input_filter(self, request, values):
if 'partner_name' in values:
values.setdefault('name', '%s\'s Application' % values['partner_name'])
return values
class Job(models.Model):
_name = 'hr.job'
_inherit = ['hr.job', 'website.seo.metadata', 'website.published.multi.mixin']
def _get_default_website_description(self):
default_description = self.env["ir.model.data"].xmlid_to_object("website_hr_recruitment.default_website_description")
return (default_description.render() if default_description else "")
website_description = fields.Html('Website description', translate=html_translate, sanitize_attributes=False, default=_get_default_website_description, prefetch=False)
def _compute_website_url(self):
super(Job, self)._compute_website_url()
for job in self:
job.website_url = "/jobs/detail/%s" % job.id
def set_open(self):
self.write({'website_published': False})
return super(Job, self).set_open()
|
agpl-3.0
| -3,908,755,139,462,205,000 | 35.607143 | 171 | 0.639024 | false |
khertan/Wleux
|
make.py
|
1
|
3091
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 Benoît HERVIER <khertan@khertan.net>
# Licenced under GPLv3
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
import os
import sys
from glob import glob
import pypackager
sys.path.append('wleux')
from wleux import __version__
__build__ = '2'
__author__ = "Benoît HERVIER (khertan)"
__mail__ = "khertan@khertan.net"
__upgrade__ = '''1.0: First public release
1.0-2: Fix package where python-gconf dep was missing
1.1: Add a feature to center the image by scrolling it in the preview before setting it as wallpaper
'''
if __name__ == "__main__":
try:
os.chdir(os.path.dirname(sys.argv[0]))
except:
pass
p=pypackager.PyPackager("wleux")
p.display_name = 'Wleux'
p.version = __version__+'.0'
p.buildversion = __build__
p.description="Setup a wallpaper from the desktoppr.co service."
p.upgrade_description=__upgrade__
p.author=__author__
p.maintainer=__author__
p.email=__mail__
p.depends = "python, python-pyside.qtgui, python-pyside.qtdeclarative, python-pyside.qtcore, python-pyside.qtopengl, python-gconf"
p.suggests = ""
p.section="user/office"
p.arch="armel"
p.urgency="low"
p.icon='wleux.png'
p.distribution="harmattan"
p.repository="Khertan Repository"
p.bugtracker = 'http://github.com/khertan/Wleux/issues'
p.changelog = p.upgrade_description
p.maemo_flags = 'visible'
p.meego_desktop_entry_filename = '/usr/share/applications/wleux.desktop'
p.createDigsigsums = True
files = []
p.postinst = '''#!/bin/sh
echo "Giving permissions for apps to execute"
chmod +x /opt/wleux/__init__.py
echo "Pre compiling Wleux"
pycompile -O /opt/wleux/*.py
exit 0'''
p.createDigsigsums = True
#Remove pyc and pyo
for filepath in glob(os.path.join(os.path.dirname(__file__), p.name, '*.pyc')):
os.remove(filepath)
#Remove pyc and pyo
for filepath in glob(os.path.join(os.path.dirname(__file__), p.name, '*.pyo')):
os.remove(filepath)
#Src
for root, dirs, fs in os.walk(os.path.join(os.path.dirname(__file__), p.name)):
for f in fs:
files.append(os.path.join(root, f))
p['/usr/share/dbus-1/services'] = ['wleux.service',]
p['/usr/share/icons/blanco/80x80/apps'] = ['wleux.png',]
p['/usr/share/applications'] = ['wleux.desktop',]
p["/opt"] = files
print p.generate(build_binary=True,build_src=True)
if not os.path.exists('dists'):
os.mkdir('dists')
for filepath in glob(p.name+'_'+p.version+'-'+p.buildversion+'*'):
os.rename(filepath, os.path.join(os.path.dirname(filepath), 'dists', os.path.basename(filepath)))
|
gpl-3.0
| -7,431,925,628,490,577,000 | 33.322222 | 134 | 0.663969 | false |
joequant/sptrader
|
sptrader/spstore.py
|
1
|
14099
|
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015,2016 Daniel Rodriguez
#
# Licensed under the GPLv3+ License
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
from datetime import datetime, timedelta
import time as _time
import json
import threading
import requests
import sseclient
import logging
import sys
import backtrader as bt
import random
from backtrader.metabase import MetaParams
from backtrader.utils.py3 import queue, with_metaclass
from backtrader.utils import AutoDict
from backtrader.position import Position
from copy import copy
class MetaSingleton(MetaParams):
'''Metaclass to make a metaclassed class a singleton'''
def __init__(cls, name, bases, dct):
super(MetaSingleton, cls).__init__(name, bases, dct)
cls._singleton = None
def __call__(cls, *args, **kwargs):
if cls._singleton is None:
cls._singleton = (
super(MetaSingleton, cls).__call__(*args, **kwargs))
return cls._singleton
class SharpPointStore(with_metaclass(MetaSingleton, object)):
'''Singleton class wrapping to control the connections to SharpPoint.
Params:
- ``token`` (default:``None``): API access token
- ``account`` (default: ``None``): account id
- ``practice`` (default: ``False``): use the test environment
- ``account_tmout`` (default: ``10.0``): refresh period for account
value/cash refresh
'''
BrokerCls = None # broker class will autoregister
DataCls = None # data class will auto register
BackTestCls = None
params = (
('gateway', 'http://localhost:5000/'),
('token', ''),
('account', ''),
('login', None),
('practice', False),
('loglevel', logging.DEBUG),
('account_tmout', 30.0),
)
_DTEPOCH = datetime(1970, 1, 1)
_ENVPRACTICE = 'practice'
_ENVLIVE = 'live'
@classmethod
def getdata(cls, *args, **kwargs):
'''Returns ``DataCls`` with args, kwargs'''
return cls.DataCls(*args, **kwargs)
@classmethod
def getbroker(cls, *args, **kwargs):
'''Returns broker with *args, **kwargs from registered ``BrokerCls``
or ``BackTestCls``
'''
backtest = kwargs.pop('backtest', False)
if backtest:
return cls.BackTestCls(*args, **kwargs)
return cls.BrokerCls(*args, **kwargs)
def __init__(self, *args, **kwargs):
super(SharpPointStore, self).__init__()
self.log = kwargs.get('log', sys.stdout)
self.positions = collections.defaultdict(Position)
self.notifs = collections.deque() # store notifications for cerebro
self._env = None # reference to cerebro for general notifications
self.broker = None # broker instance
self.datas = list() # datas that have registered over start
self._orders = collections.OrderedDict() # map order.ref to oid
self._ordersrev = collections.OrderedDict() # map oid to order.ref
self._transpend = collections.defaultdict(collections.deque)
self._oenv = self._ENVPRACTICE if self.p.practice else self._ENVLIVE
self._cash = 0.0
self._value = 0.0
self.q_account = queue.Queue()
self.q_ordercreate = queue.Queue()
self.q_orderclose = queue.Queue()
self.streaming_events()
def start(self, data=None, broker=None):
# Datas require some processing to kickstart data reception
if data is None and broker is None:
self.cash = None
return
if data is not None:
self._env = data._env
# For datas simulate a queue with None to kickstart co
self.datas.append(data)
if self.broker is not None:
self.broker.data_started(data)
elif broker is not None:
self.broker = broker
self.broker_threads()
def stop(self):
# signal end of thread
if self.broker is not None:
self.q_ordercreate.put(None)
self.q_orderclose.put(None)
self.q_account.put(None)
def put_notification(self, msg, *args, **kwargs):
self.notifs.append((msg, args, kwargs))
def get_notifications(self):
'''Return the pending "store" notifications'''
self.notifs.append(None) # put a mark / threads could still append
return [x for x in iter(self.notifs.popleft, None)]
def get_positions(self):
pass
def get_granularity(self, timeframe, compression):
pass
def get_instrument(self, dataname):
pass
def _get_request(self, method, **kwargs):
if self.p.gateway is not None:
return requests.get(self.p.gateway + method,
**kwargs)
def _post_request(self, method, **kwargs):
if self.p.gateway is not None:
return requests.post(self.p.gateway + method,
**kwargs)
def streaming_events(self, tmout=None):
q = queue.Queue()
kwargs = {'q': q, 'tmout': tmout}
t = threading.Thread(target=self._t_streaming_listener, kwargs=kwargs)
t.daemon = True
t.start()
def updateposition(self, data):
"""Update position from streamer"""
try:
position = Position(data['Qty'],
abs(data['TotalAmt']/data['Qty']))
self.positions[data['ProdCode']] = position
except KeyError:
print("key-error in updateposition", data, file=self.log)
def getposition(self, data, clone=False):
position = self.positions[data]
if clone:
return copy(position)
else:
return position
def _t_streaming_listener(self, q, tmout=None):
if self.p.gateway is None:
return
response = self._get_request("log/subscribe/" + str(_time.time()),
stream=True)
if self.p.loglevel <= logging.INFO:
print("connecting to events", file=self.log)
if response.status_code != requests.codes.ok:
if self.p.loglevel <= logging.ERROR:
print("failed response code", response,
file=self.log)
raise ValueError('failed response code')
client = sseclient.SSEClient(response)
for event in client.events():
data = json.loads(event.data)
info = data.get('data', None)
oref = 0
if event.event == "AccountPositionPush":
if self.p.loglevel <= logging.DEBUG:
print(event.event, data['data'], file=self.log)
self.updateposition(data['data'])
continue
if self.broker is None:
continue
try:
oref = info['Ref2']
except:
if self.p.loglevel <= logging.DEBUG:
print("Unhandled event", file=self.log)
continue
if self.p.loglevel <= logging.DEBUG:
print(event, file=self.log)
if event.event == "OrderBeforeSendReport":
if self.p.loglevel <= logging.DEBUG:
print(data, file=self.log)
self.broker._submit(oref)
elif event.event == "OrderRequestFailed":
if self.p.loglevel <= logging.DEBUG:
print(data, file=self.log)
self.broker._reject(oref)
elif event.event == "OrderReport":
if self.p.loglevel <= logging.DEBUG:
print(data, file=self.log)
status = int(info['Status'])
if status == 4:
self.broker._accept(oref)
elif status == 6:
self.broker._cancel(oref)
elif status == 8:
order = self.order_by_ref(oref)
order.partial()
self.broker.notify(order)
elif status == 9:
order = self.order_by_ref(oref)
order.completed()
self.broker.notify(order)
elif event.event == "TradeReport":
if self.p.loglevel <= logging.DEBUG:
print(data, file=self.log)
qty = int(info['TradedQty'])
price = float(info['Price'])
pqty = int(info['Qty'])
avgprice = float(info['AvgTradedPrice'])
self.broker._fill(oref, qty, price,
pqty=pqty, avgpice=avgprice)
def streaming_prices(self, dataname, tmout=None):
q = queue.Queue()
kwargs = {'q': q, 'dataname': dataname, 'tmout': tmout}
t = threading.Thread(target=self._t_streaming_prices, kwargs=kwargs)
t.daemon = True
t.start()
return q
def _t_streaming_prices(self, dataname, q, tmout):
r = self._get_request("ticker/subscribe/" + dataname)
def get_cash(self):
return self._cash
def get_value(self):
return self._value
def broker_threads(self):
self.q_account.put(True) # force an immediate update
t = threading.Thread(target=self._t_account)
t.daemon = True
t.start()
t = threading.Thread(target=self._t_order_create)
t.daemon = True
t.start()
t = threading.Thread(target=self._t_order_cancel)
t.daemon = True
t.start()
_ORDEREXECS = {
bt.Order.Market: 'market',
bt.Order.Limit: 'limit',
bt.Order.Stop: 'stop',
bt.Order.StopLimit: 'stop',
}
def isloggedin(self):
login_info = self._get_request("login-info").json()
if self.p.loglevel <= logging.DEBUG:
print("login-info", login_info, file=self.log)
return int(login_info['status']) != -1
def setlogin(self, login):
self.p.login = login
self.q_account.put(True) # force an immediate update
def _t_account(self):
if self.p.loglevel <= logging.DEBUG:
print("t_account", file=self.log)
while True:
try:
msg = self.q_account.get(timeout=self.p.account_tmout)
if msg is None:
break # end of thread
except queue.Empty: # tmout -> time to refresh
pass
try:
if self.p.login is not None and not self.isloggedin():
if self.p.loglevel <= logging.DEBUG:
print("login", self.p.login, file=self.log)
r = self._post_request("login", json=self.p.login)
except Exception as e:
self.put_notification(e)
continue
def order_create(self, order, **kwargs):
okwargs = {"DecInPrice": 0,
"OpenClose": 0,
"CondType": 0,
"OrderType": 0,
"ValidType": 0,
"StopType": 0,
"OrderOptions": 0}
if order.isbuy():
okwargs['BuySell'] = "B"
elif order.issell():
okwargs['BuySell'] = "S"
okwargs['Price'] = order.created.price
if order.exectype == bt.Order.Stop:
okwargs['StopType'] = 'L'
okwargs['Price'] = 0
okwargs['OrderType'] = 6
okwargs['CondType'] = 1
elif order.exectype == bt.Order.StopLimit:
okwargs['StopLevel'] = order.created.price
okwargs['Price'] = order.created.pricelimit
okwargs['CondType'] = 1
okwargs['StopType'] = 'L'
okwargs['Qty'] = abs(order.created.size)
okwargs['ProdCode'] = order.data._dataname
okwargs['Ref'] = kwargs.get('Ref', '')
order.ref = \
"{:%m%d%H%M%S}".format(datetime.utcnow()) + \
"%04d" % random.randrange(10000)
self._orders[order.ref] = order
okwargs['Ref2'] = str(order.ref)
okwargs['Inactive'] = kwargs.get('Inactive', 0)
if self.p.loglevel <= logging.DEBUG:
print(okwargs, file=self.log)
self.q_ordercreate.put((order.ref, okwargs,))
return order
def order_by_ref(self, oref):
o = self._orders.get(oref, None)
if o is None and self.p.loglevel <= logging.INFO:
print('cannot find oref %s' % oref, file=self-log)
print(self._orders, file=self.log)
return o
def _t_order_create(self):
while True:
msg = self.q_ordercreate.get()
if msg is None:
break
oref, okwargs = msg
if self.p.loglevel <= logging.DEBUG:
print(msg, file=self.log)
try:
r = self._post_request("order/add", json=okwargs)
except Exception as e:
self.put_notification(e)
self.broker._reject(order.ref)
return
def order_cancel(self, order):
self.q_orderclose.put(order.ref)
return order
def _t_order_cancel(self):
while True:
oref = self.q_orderclose.get()
if oref is None:
break
oid = self._orders.get(oref, None)
if oid is None:
continue # the order is no longer there
try:
okwargs = {'Ref2': oref}
r = self._post_request("order/delete", json=okwargs)
except Exception as e:
self.put_notification(e)
continue # not cancelled - FIXME: notify
self.broker._cancel(oref)
if __name__ == '__main__':
s = SharpPointStore()
|
bsd-2-clause
| 6,111,832,492,346,460,000 | 33.471883 | 79 | 0.536634 | false |
matrixorz/ut_ali
|
ut_engine/evaluate/evaluator.py
|
1
|
1366
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
import sys
from collections import defaultdict as ddict
def load_test(path):
test = ddict(list)
with file(path) as f:
for line in f:
elements = line.strip().split(',')
if elements[2] != '1': continue
uid = int(elements[0])
bid = int(elements[1])
test[uid].append(bid)
return test
def load_predict(path):
predict = ddict(list)
with file(path) as f:
for line in f:
elements = line.strip().split(' ')
uid = int(elements[0])
predict[uid] = map(lambda x: int(x), elements[1].split(','))
return predict
def evaluate(test, predict):
hitBrand = 0
pBrand = 0
for uid in predict:
pBrand += len(predict[uid])
hitBrand += len(set(predict[uid]) & set(test[uid]))
P = 1.0*hitBrand/pBrand
hitBrand = 0
bBrand = 0
for uid in test:
bBrand += len(test[uid])
hitBrand += len(set(predict[uid]) & set(test[uid]))
R = 1.0*hitBrand/bBrand
F1 = 2*P*R/(P+R)
print "F1=%f\nP=%f\nR=%f\n" % (F1, P, R)
if __name__ == "__main__":
if len(sys.argv) != 3:
print "usage: %s test_data predict_data\n" % sys.argv[0]
exit(0)
test = load_test(sys.argv[1])
predict = load_predict(sys.argv[2])
evaluate(test, predict)
|
mit
| -1,154,984,135,064,513,000 | 26.32 | 72 | 0.544656 | false |
dib-lab/kevlar
|
kevlar/cli/dist.py
|
1
|
2230
|
#!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (c) 2016 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/dib-lab/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
import khmer
from khmer import khmer_args
def subparser(subparsers):
"""Define the `kevlar dist` command-line interface."""
desc = 'Compute the k-mer abundance distribution for a data set.'
subparser = subparsers.add_parser('dist', description=desc)
subparser.add_argument('-o', '--out', metavar='FILE', help='output file; '
'default is terminal (stdout)')
subparser.add_argument('-k', '--ksize', metavar='K', type=int, default=31,
help='k-mer size; default is 31')
subparser.add_argument('-M', '--memory', type=khmer_args.memory_setting,
default=1e6, metavar='MEM',
help='memory to allocate for k-mer counting')
subparser.add_argument('-t', '--threads', type=int, metavar='T', default=1,
help='number of threads to use for k-mer counting; '
'default is 1')
subparser.add_argument('-p', '--plot', metavar='PNG', help='plot k-mer '
'abundance distribution to file `PNG`')
subparser.add_argument('--tsv', metavar='TSV', help='write k-mer '
'abundance distribution out to file formatted as '
'tab-separated values')
subparser.add_argument('--plot-xlim', metavar=('MIN', 'MAX'), type=int,
nargs=2, default=(0, 100), help='define the '
'minimum and maximum x values (k-mer abundance) '
'for the plot; default is `0 100`')
subparser.add_argument('mask', help='nodetable containing target k-mers '
'to count (such as single-copy exonic k-mers)')
subparser.add_argument('infiles', nargs='+', help='input files in '
'FASTA/FASTQ format')
|
mit
| 883,916,755,266,280,200 | 52.095238 | 79 | 0.527354 | false |
rocktavious/DevToolsLib
|
DTL/api/daemon.py
|
1
|
6655
|
# Taken and modified from:
# http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
import threading
import Queue
import atexit
import os
import signal
import sys
import time
import logging
import logging.handlers
from DTL.api import loggingUtils
if (hasattr(os, "devnull")):
DEVNULL = os.devnull
else:
DEVNULL = "/dev/null"
#------------------------------------------------------------
#------------------------------------------------------------
class DaemonThread(threading.Thread):
"""
Thread class that is a daemon by default. (Normal threading.Thread
objects are not daemons by default.)
"""
#------------------------------------------------------------
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.daemon = True
#------------------------------------------------------------
#------------------------------------------------------------
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the mainloop() and shutdown() method
"""
__metaclass__ = loggingUtils.LoggingMetaclass
#------------------------------------------------------------
def __init__(self, serviceName, pidfile, stdin=DEVNULL, stdout=DEVNULL, stderr=DEVNULL):
super(Daemon, self).__init__()
self._serviceName = serviceName
self._stdin = stdin
self._stdout = stdout
self._stderr = stderr
self._pidfile = pidfile
self._continue = True
#------------------------------------------------------------
def _daemonize(self):
"""
Do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self._stdin, 'r')
so = file(self._stdout, 'a+')
se = file(self._stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile and subsys file
pid = str(os.getpid())
file(self._pidfile,'w+').write("%s\n" % pid)
if os.path.exists('/var/lock/subsys'):
fh = open(os.path.join('/var/lock/subsys', self._serviceName), 'w')
fh.close()
#------------------------------------------------------------
def _delpid(self):
if os.path.exists(self._pidfile):
os.remove(self._pidfile)
subsysPath = os.path.join('/var/lock/subsys', self._serviceName)
if os.path.exists(subsysPath):
os.remove(subsysPath)
self.shutdown()
#------------------------------------------------------------
def _start(self, daemonize=True):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self._pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self._pidfile)
sys.exit(1)
# Start the daemon
if daemonize:
self._daemonize()
# Cleanup handling
def termHandler(signum, frame):
self._delpid()
signal.signal(signal.SIGTERM, termHandler)
atexit.register(self._delpid)
# Run the daemon
self.mainloop()
#------------------------------------------------------------
def _stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self._pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self._pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self._pidfile):
os.remove(self._pidfile)
else:
print str(err)
sys.exit(1)
#------------------------------------------------------------
# Begin Overrides
#------------------------------------------------------------
def start(self):
if sys.platform == 'win32':
self._start(daemonize=False)
else:
self._start()
#------------------------------------------------------------
def stop(self):
self._continue = False
self._stop()
#------------------------------------------------------------
def foreground(self):
self._start(daemonize=False)
#------------------------------------------------------------
def restart(self):
self.stop()
self.start()
#------------------------------------------------------------
def mainloop(self):
while self._continue :
self.log.info("Daemon is running!")
time.sleep(2)
#------------------------------------------------------------
def shutdown(self):
pass
if __name__ == "__main__":
myDaemon = Daemon('Testing','c:/test.pid')
myDaemon.start()
time.sleep(10)
myDaemon.stop()
|
mit
| 3,978,645,494,672,254,500 | 29.392694 | 92 | 0.433509 | false |
tony/libtmux
|
libtmux/server.py
|
1
|
15725
|
# -*- coding: utf-8 -*-
"""Pythonization of the :term:`tmux(1)` server.
libtmux.server
~~~~~~~~~~~~~~
"""
from __future__ import absolute_import, unicode_literals, with_statement
import logging
import os
from . import exc, formats
from .common import (
EnvironmentMixin,
TmuxRelationalObject,
has_gte_version,
session_check_name,
tmux_cmd,
)
from .session import Session
logger = logging.getLogger(__name__)
class Server(TmuxRelationalObject, EnvironmentMixin):
"""
The :term:`tmux(1)` :term:`server` [#]_.
- :attr:`Server._sessions` [:class:`Session`, ...]
- :attr:`Session._windows` [:class:`Window`, ...]
- :attr:`Window._panes` [:class:`Pane`, ...]
- :class:`Pane`
When instantiated stores information on live, running tmux server.
Parameters
----------
socket_name : str, optional
socket_path : str, optional
config_file : str, optional
colors : str, optional
References
----------
.. [#] CLIENTS AND SESSIONS. openbsd manpage for TMUX(1)
"The tmux server manages clients, sessions, windows and panes.
Clients are attached to sessions to interact with them, either when
they are created with the new-session command, or later with the
attach-session command. Each session has one or more windows linked
into it. Windows may be linked to multiple sessions and are made up
of one or more panes, each of which contains a pseudo terminal."
https://man.openbsd.org/tmux.1#CLIENTS_AND_SESSIONS.
Accessed April 1st, 2018.
"""
#: ``[-L socket-name]``
socket_name = None
#: ``[-S socket-path]``
socket_path = None
#: ``[-f file]``
config_file = None
#: ``-2`` or ``-8``
colors = None
#: unique child ID used by :class:`~libtmux.common.TmuxRelationalObject`
child_id_attribute = 'session_id'
#: namespace used :class:`~libtmux.common.TmuxMappingObject`
formatter_prefix = 'server_'
def __init__(
self,
socket_name=None,
socket_path=None,
config_file=None,
colors=None,
**kwargs
):
EnvironmentMixin.__init__(self, '-g')
self._windows = []
self._panes = []
if socket_name:
self.socket_name = socket_name
if socket_path:
self.socket_path = socket_path
if config_file:
self.config_file = config_file
if colors:
self.colors = colors
def cmd(self, *args, **kwargs):
"""
Execute tmux command and return output.
Returns
-------
:class:`common.tmux_cmd`
Notes
-----
.. versionchanged:: 0.8
Renamed from ``.tmux`` to ``.cmd``.
"""
args = list(args)
if self.socket_name:
args.insert(0, '-L{0}'.format(self.socket_name))
if self.socket_path:
args.insert(0, '-S{0}'.format(self.socket_path))
if self.config_file:
args.insert(0, '-f{0}'.format(self.config_file))
if self.colors:
if self.colors == 256:
args.insert(0, '-2')
elif self.colors == 88:
args.insert(0, '-8')
else:
raise ValueError('Server.colors must equal 88 or 256')
return tmux_cmd(*args, **kwargs)
def _list_sessions(self):
"""
Return list of sessions in :py:obj:`dict` form.
Retrieved from ``$ tmux(1) list-sessions`` stdout.
The :py:obj:`list` is derived from ``stdout`` in
:class:`common.tmux_cmd` which wraps :py:class:`subprocess.Popen`.
Returns
-------
list of dict
"""
sformats = formats.SESSION_FORMATS
tmux_formats = ['#{%s}' % f for f in sformats]
tmux_args = ('-F%s' % '\t'.join(tmux_formats),) # output
proc = self.cmd('list-sessions', *tmux_args)
if proc.stderr:
raise exc.LibTmuxException(proc.stderr)
sformats = formats.SESSION_FORMATS
tmux_formats = ['#{%s}' % format for format in sformats]
sessions = proc.stdout
# combine format keys with values returned from ``tmux list-sessions``
sessions = [dict(zip(sformats, session.split('\t'))) for session in sessions]
# clear up empty dict
sessions = [
dict((k, v) for k, v in session.items() if v) for session in sessions
]
return sessions
@property
def _sessions(self):
"""Property / alias to return :meth:`~._list_sessions`."""
return self._list_sessions()
def list_sessions(self):
"""
Return list of :class:`Session` from the ``tmux(1)`` session.
Returns
-------
list of :class:`Session`
"""
return [Session(server=self, **s) for s in self._sessions]
@property
def sessions(self):
"""Property / alias to return :meth:`~.list_sessions`."""
return self.list_sessions()
#: Alias :attr:`sessions` for :class:`~libtmux.common.TmuxRelationalObject`
children = sessions
def _list_windows(self):
"""
Return list of windows in :py:obj:`dict` form.
Retrieved from ``$ tmux(1) list-windows`` stdout.
The :py:obj:`list` is derived from ``stdout`` in
:class:`common.tmux_cmd` which wraps :py:class:`subprocess.Popen`.
Returns
-------
list of dict
"""
wformats = ['session_name', 'session_id'] + formats.WINDOW_FORMATS
tmux_formats = ['#{%s}' % format for format in wformats]
proc = self.cmd(
'list-windows', # ``tmux list-windows``
'-a',
'-F%s' % '\t'.join(tmux_formats), # output
)
if proc.stderr:
raise exc.LibTmuxException(proc.stderr)
windows = proc.stdout
wformats = ['session_name', 'session_id'] + formats.WINDOW_FORMATS
# combine format keys with values returned from ``tmux list-windows``
windows = [dict(zip(wformats, window.split('\t'))) for window in windows]
# clear up empty dict
windows = [dict((k, v) for k, v in window.items() if v) for window in windows]
# tmux < 1.8 doesn't have window_id, use window_name
for w in windows:
if 'window_id' not in w:
w['window_id'] = w['window_name']
if self._windows:
self._windows[:] = []
self._windows.extend(windows)
return self._windows
def _update_windows(self):
"""
Update internal window data and return ``self`` for chainability.
Returns
-------
:class:`Server`
"""
self._list_windows()
return self
def _list_panes(self):
"""
Return list of panes in :py:obj:`dict` form.
Retrieved from ``$ tmux(1) list-panes`` stdout.
The :py:obj:`list` is derived from ``stdout`` in
:class:`util.tmux_cmd` which wraps :py:class:`subprocess.Popen`.
Returns
-------
list
"""
pformats = [
'session_name',
'session_id',
'window_index',
'window_id',
'window_name',
] + formats.PANE_FORMATS
tmux_formats = ['#{%s}\t' % f for f in pformats]
proc = self.cmd('list-panes', '-a', '-F%s' % ''.join(tmux_formats)) # output
if proc.stderr:
raise exc.LibTmuxException(proc.stderr)
panes = proc.stdout
pformats = [
'session_name',
'session_id',
'window_index',
'window_id',
'window_name',
] + formats.PANE_FORMATS
# combine format keys with values returned from ``tmux list-panes``
panes = [dict(zip(pformats, window.split('\t'))) for window in panes]
# clear up empty dict
panes = [
dict(
(k, v) for k, v in window.items() if v or k == 'pane_current_path'
) # preserve pane_current_path, in case it entered a new process
# where we may not get a cwd from.
for window in panes
]
if self._panes:
self._panes[:] = []
self._panes.extend(panes)
return self._panes
def _update_panes(self):
"""
Update internal pane data and return ``self`` for chainability.
Returns
-------
:class:`Server`
"""
self._list_panes()
return self
@property
def attached_sessions(self):
"""
Return active :class:`Session` objects.
This will not work where multiple tmux sessions are attached.
Returns
-------
list of :class:`Session`
"""
sessions = self._sessions
attached_sessions = list()
for session in sessions:
if 'session_attached' in session:
# for now session_active is a unicode
if session.attached == '1':
logger.debug('session %s attached', session.name)
attached_sessions.append(session)
else:
continue
return [Session(server=self, **s) for s in attached_sessions] or None
def has_session(self, target_session, exact=True):
"""
Return True if session exists. ``$ tmux has-session``.
Parameters
----------
target_session : str
session name
exact : bool
match the session name exactly. tmux uses fnmatch by default.
Internally prepends ``=`` to the session in ``$ tmux has-session``.
tmux 2.1 and up only.
Raises
------
:exc:`exc.BadSessionName`
Returns
-------
bool
"""
session_check_name(target_session)
if exact and has_gte_version('2.1'):
target_session = '={}'.format(target_session)
proc = self.cmd('has-session', '-t%s' % target_session)
if not proc.returncode:
return True
return False
def kill_server(self):
"""``$ tmux kill-server``."""
self.cmd('kill-server')
def kill_session(self, target_session=None):
"""
Kill the tmux session with ``$ tmux kill-session``, return ``self``.
Parameters
----------
target_session : str, optional
target_session: str. note this accepts ``fnmatch(3)``. 'asdf' will
kill 'asdfasd'.
Returns
-------
:class:`Server`
Raises
------
:exc:`exc.BadSessionName`
"""
session_check_name(target_session)
proc = self.cmd('kill-session', '-t%s' % target_session)
if proc.stderr:
raise exc.LibTmuxException(proc.stderr)
return self
def switch_client(self, target_session):
"""
``$ tmux switch-client``.
Parameters
----------
target_session : str
name of the session. fnmatch(3) works.
Raises
------
:exc:`exc.BadSessionName`
"""
session_check_name(target_session)
proc = self.cmd('switch-client', '-t%s' % target_session)
if proc.stderr:
raise exc.LibTmuxException(proc.stderr)
def attach_session(self, target_session=None):
"""``$ tmux attach-session`` aka alias: ``$ tmux attach``.
Parameters
----------
target_session : str
name of the session. fnmatch(3) works.
Raises
------
:exc:`exc.BadSessionName`
"""
session_check_name(target_session)
tmux_args = tuple()
if target_session:
tmux_args += ('-t%s' % target_session,)
proc = self.cmd('attach-session', *tmux_args)
if proc.stderr:
raise exc.LibTmuxException(proc.stderr)
def new_session(
self,
session_name=None,
kill_session=False,
attach=False,
start_directory=None,
window_name=None,
window_command=None,
*args,
**kwargs
):
"""
Return :class:`Session` from ``$ tmux new-session``.
Uses ``-P`` flag to print session info, ``-F`` for return formatting
returns new Session object.
``$ tmux new-session -d`` will create the session in the background
``$ tmux new-session -Ad`` will move to the session name if it already
exists. todo: make an option to handle this.
Parameters
----------
session_name : str, optional
::
$ tmux new-session -s <session_name>
attach : bool, optional
create session in the foreground. ``attach=False`` is equivalent
to::
$ tmux new-session -d
Other Parameters
----------------
kill_session : bool, optional
Kill current session if ``$ tmux has-session``.
Useful for testing workspaces.
start_directory : str, optional
specifies the working directory in which the
new session is created.
window_name : str, optional
::
$ tmux new-session -n <window_name>
window_command : str
execute a command on starting the session. The window will close
when the command exits. NOTE: When this command exits the window
will close. This feature is useful for long-running processes
where the closing of the window upon completion is desired.
Returns
-------
:class:`Session`
Raises
------
:exc:`exc.BadSessionName`
"""
session_check_name(session_name)
if self.has_session(session_name):
if kill_session:
self.cmd('kill-session', '-t%s' % session_name)
logger.info('session %s exists. killed it.' % session_name)
else:
raise exc.TmuxSessionExists('Session named %s exists' % session_name)
logger.debug('creating session %s' % session_name)
sformats = formats.SESSION_FORMATS
tmux_formats = ['#{%s}' % f for f in sformats]
env = os.environ.get('TMUX')
if env:
del os.environ['TMUX']
tmux_args = (
'-s%s' % session_name,
'-P',
'-F%s' % '\t'.join(tmux_formats), # output
)
if not attach:
tmux_args += ('-d',)
if start_directory:
tmux_args += ('-c', start_directory)
if window_name:
tmux_args += ('-n', window_name)
# tmux 2.6 gives unattached sessions a tiny default area
# no need send in -x/-y if they're in a client already, though
if has_gte_version('2.6') and 'TMUX' not in os.environ:
tmux_args += ('-x', 800, '-y', 600)
if window_command:
tmux_args += (window_command,)
proc = self.cmd('new-session', *tmux_args)
if proc.stderr:
raise exc.LibTmuxException(proc.stderr)
session = proc.stdout[0]
if env:
os.environ['TMUX'] = env
# combine format keys with values returned from ``tmux list-windows``
session = dict(zip(sformats, session.split('\t')))
# clear up empty dict
session = dict((k, v) for k, v in session.items() if v)
session = Session(server=self, **session)
return session
|
bsd-3-clause
| 1,763,537,423,157,772,000 | 26.491259 | 86 | 0.534563 | false |
davidzchen/tensorflow
|
tensorflow/python/distribute/mirrored_strategy.py
|
1
|
30492
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class MirroredStrategy implementing tf.distribute.Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import mirrored_run
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# TODO(josh11b): Replace asserts in this file with if ...: raise ...
def _is_device_list_single_worker(devices):
"""Checks whether the devices list is for single or multi-worker.
Args:
devices: a list of device strings or tf.config.LogicalDevice objects, for
either local or for remote devices.
Returns:
a boolean indicating whether these device strings are for local or for
remote.
Raises:
ValueError: if device strings are not consistent.
"""
specs = []
for d in devices:
name = d.name if isinstance(d, context.LogicalDevice) else d
specs.append(tf_device.DeviceSpec.from_string(name))
num_workers = len({(d.job, d.task, d.replica) for d in specs})
all_local = all(d.job in (None, "localhost") for d in specs)
any_local = any(d.job in (None, "localhost") for d in specs)
if any_local and not all_local:
raise ValueError("Local device string cannot have job specified other "
"than 'localhost'")
if num_workers == 1 and not all_local:
if any(d.task is None for d in specs):
raise ValueError("Remote device string must have task specified.")
return num_workers == 1
def _cluster_spec_to_device_list(cluster_spec, num_gpus_per_worker):
"""Returns a device list given a cluster spec."""
cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
devices = []
for task_type in ("chief", "worker"):
for task_id in range(len(cluster_spec.as_dict().get(task_type, []))):
if num_gpus_per_worker == 0:
devices.append("/job:%s/task:%d/device:CPU:0" % (task_type, task_id))
else:
devices.extend([
"/job:%s/task:%d/device:GPU:%i" % (task_type, task_id, gpu_id)
for gpu_id in range(num_gpus_per_worker)
])
return devices
def _group_device_list(devices):
"""Groups the devices list by task_type and task_id.
Args:
devices: a list of device strings for remote devices.
Returns:
a dict of list of device strings mapping from task_type to a list of devices
for the task_type in the ascending order of task_id.
"""
assert not _is_device_list_single_worker(devices)
device_dict = {}
for d in devices:
d_spec = tf_device.DeviceSpec.from_string(d)
# Create an entry for the task_type.
if d_spec.job not in device_dict:
device_dict[d_spec.job] = []
# Fill the device list for task_type until it covers the task_id.
while len(device_dict[d_spec.job]) <= d_spec.task:
device_dict[d_spec.job].append([])
device_dict[d_spec.job][d_spec.task].append(d)
return device_dict
def _is_gpu_device(device):
return tf_device.DeviceSpec.from_string(device).device_type == "GPU"
def _infer_num_gpus_per_worker(devices):
"""Infers the number of GPUs on each worker.
Currently to make multi-worker cross device ops work, we need all workers to
have the same number of GPUs.
Args:
devices: a list of device strings, can be either local devices or remote
devices.
Returns:
number of GPUs per worker.
Raises:
ValueError if workers have different number of GPUs or GPU indices are not
consecutive and starting from 0.
"""
if _is_device_list_single_worker(devices):
return sum(1 for d in devices if _is_gpu_device(d))
else:
device_dict = _group_device_list(devices)
num_gpus = None
for _, devices_in_task in device_dict.items():
for device_in_task in devices_in_task:
if num_gpus is None:
num_gpus = sum(1 for d in device_in_task if _is_gpu_device(d))
# Verify other workers have the same number of GPUs.
elif num_gpus != sum(1 for d in device_in_task if _is_gpu_device(d)):
raise ValueError("All workers should have the same number of GPUs.")
for d in device_in_task:
d_spec = tf_device.DeviceSpec.from_string(d)
if (d_spec.device_type == "GPU" and
d_spec.device_index >= num_gpus):
raise ValueError("GPU `device_index` on a worker should be "
"consecutive and start from 0.")
return num_gpus
def all_local_devices(num_gpus=None):
devices = config.list_logical_devices("GPU")
if num_gpus is not None:
devices = devices[:num_gpus]
return devices or config.list_logical_devices("CPU")
def all_devices():
devices = []
tfconfig = TFConfigClusterResolver()
if tfconfig.cluster_spec().as_dict():
devices = _cluster_spec_to_device_list(tfconfig.cluster_spec(),
context.num_gpus())
return devices if devices else all_local_devices()
@tf_export("distribute.MirroredStrategy", v1=[]) # pylint: disable=g-classes-have-attributes
class MirroredStrategy(distribute_lib.Strategy):
"""Synchronous training across multiple replicas on one machine.
This strategy is typically used for training on one
machine with multiple GPUs. For TPUs, use
`tf.distribute.TPUStrategy`. To use `MirroredStrategy` with multiple workers,
please refer to `tf.distribute.experimental.MultiWorkerMirroredStrategy`.
For example, a variable created under a `MirroredStrategy` is a
`MirroredVariable`. If no devices are specified in the constructor argument of
the strategy then it will use all the available GPUs. If no GPUs are found, it
will use the available CPUs. Note that TensorFlow treats all CPUs on a
machine as a single device, and uses threads internally for parallelism.
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> with strategy.scope():
... x = tf.Variable(1.)
>>> x
MirroredVariable:{
0: <tf.Variable ... shape=() dtype=float32, numpy=1.0>,
1: <tf.Variable ... shape=() dtype=float32, numpy=1.0>
}
While using distribution strategies, all the variable creation should be done
within the strategy's scope. This will replicate the variables across all the
replicas and keep them in sync using an all-reduce algorithm.
Variables created inside a `MirroredStrategy` which is wrapped with a
`tf.function` are still `MirroredVariables`.
>>> x = []
>>> @tf.function # Wrap the function with tf.function.
... def create_variable():
... if not x:
... x.append(tf.Variable(1.))
... return x[0]
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> with strategy.scope():
... _ = create_variable()
... print(x[0])
MirroredVariable:{
0: <tf.Variable ... shape=() dtype=float32, numpy=1.0>,
1: <tf.Variable ... shape=() dtype=float32, numpy=1.0>
}
`experimental_distribute_dataset` can be used to distribute the dataset across
the replicas when writing your own training loop. If you are using `.fit` and
`.compile` methods available in `tf.keras`, then `tf.keras` will handle the
distribution for you.
For example:
```python
my_strategy = tf.distribute.MirroredStrategy()
with my_strategy.scope():
@tf.function
def distribute_train_epoch(dataset):
def replica_fn(input):
# process input and return result
return result
total_result = 0
for x in dataset:
per_replica_result = my_strategy.run(replica_fn, args=(x,))
total_result += my_strategy.reduce(tf.distribute.ReduceOp.SUM,
per_replica_result, axis=None)
return total_result
dist_dataset = my_strategy.experimental_distribute_dataset(dataset)
for _ in range(EPOCHS):
train_result = distribute_train_epoch(dist_dataset)
```
Args:
devices: a list of device strings such as `['/gpu:0', '/gpu:1']`. If
`None`, all available GPUs are used. If no GPUs are found, CPU is used.
cross_device_ops: optional, a descedant of `CrossDeviceOps`. If this is not
set, `NcclAllReduce()` will be used by default. One would customize this
if NCCL isn't available or if a special implementation that exploits
the particular hardware is available.
"""
def __init__(self, devices=None, cross_device_ops=None):
extended = MirroredExtended(
self, devices=devices, cross_device_ops=cross_device_ops)
super(MirroredStrategy, self).__init__(extended)
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"MirroredStrategy")
@tf_export(v1=["distribute.MirroredStrategy"])
class MirroredStrategyV1(distribute_lib.StrategyV1): # pylint: disable=g-missing-docstring
__doc__ = MirroredStrategy.__doc__
def __init__(self, devices=None, cross_device_ops=None):
extended = MirroredExtended(
self, devices=devices, cross_device_ops=cross_device_ops)
super(MirroredStrategyV1, self).__init__(extended)
distribute_lib.distribution_strategy_gauge.get_cell("V1").set(
"MirroredStrategy")
# TODO(josh11b): Switch to V2 when we no longer need to support tf.compat.v1.
class MirroredExtended(distribute_lib.StrategyExtendedV1):
"""Implementation of MirroredStrategy."""
def __init__(self, container_strategy, devices=None, cross_device_ops=None):
super(MirroredExtended, self).__init__(container_strategy)
if context.executing_eagerly():
if devices and not _is_device_list_single_worker(devices):
raise RuntimeError("In-graph multi-worker training with "
"`MirroredStrategy` is not supported in eager mode.")
else:
if TFConfigClusterResolver().cluster_spec().as_dict():
# if you are executing in eager mode, only the single machine code
# path is supported.
logging.info("Initializing local devices since in-graph multi-worker "
"training with `MirroredStrategy` is not supported in "
"eager mode. TF_CONFIG will be ignored when "
"when initializing `MirroredStrategy`.")
devices = devices or all_local_devices()
else:
devices = devices or all_devices()
assert devices, ("Got an empty `devices` list and unable to recognize "
"any local devices.")
self._cross_device_ops = cross_device_ops
self._initialize_strategy(devices)
# TODO(b/128995245): Enable last partial batch support in graph mode.
if ops.executing_eagerly_outside_functions():
self.experimental_enable_get_next_as_optional = True
# Flag to turn on VariablePolicy.
self._use_var_policy = False
def _initialize_strategy(self, devices):
# The _initialize_strategy method is intended to be used by distribute
# coordinator as well.
assert devices, "Must specify at least one device."
devices = tuple(device_util.resolve(d) for d in devices)
assert len(set(devices)) == len(devices), (
"No duplicates allowed in `devices` argument: %s" % (devices,))
if _is_device_list_single_worker(devices):
self._initialize_single_worker(devices)
else:
self._initialize_multi_worker(devices)
def _initialize_single_worker(self, devices):
"""Initializes the object for single-worker training."""
self._devices = tuple(device_util.canonicalize(d) for d in devices)
self._input_workers_devices = (
(device_util.canonicalize("/device:CPU:0", devices[0]), devices),)
self._inferred_cross_device_ops = None if self._cross_device_ops else (
cross_device_ops_lib.choose_the_best(devices))
self._host_input_device = numpy_dataset.SingleDevice(
self._input_workers_devices[0][0])
self._is_multi_worker_training = False
logging.info("Using MirroredStrategy with devices %r", devices)
device_spec = tf_device.DeviceSpec.from_string(
self._input_workers_devices[0][0])
# Ensures when we enter strategy.scope() we use the correct default device
if device_spec.job is not None and device_spec.job != "localhost":
self._default_device = "/job:%s/replica:%d/task:%d" % (
device_spec.job, device_spec.replica, device_spec.task)
def _initialize_multi_worker(self, devices):
"""Initializes the object for multi-worker training."""
device_dict = _group_device_list(devices)
workers = []
worker_devices = []
for job in ("chief", "worker"):
for task in range(len(device_dict.get(job, []))):
worker = "/job:%s/task:%d" % (job, task)
workers.append(worker)
worker_devices.append((worker, device_dict[job][task]))
# Setting `_default_device` will add a device scope in the
# distribution.scope. We set the default device to the first worker. When
# users specify device under distribution.scope by
# with tf.device("/cpu:0"):
# ...
# their ops will end up on the cpu device of its first worker, e.g.
# "/job:worker/task:0/device:CPU:0". Note this is not used in replica mode.
self._default_device = workers[0]
self._host_input_device = numpy_dataset.SingleDevice(workers[0])
self._devices = tuple(devices)
self._input_workers_devices = worker_devices
self._is_multi_worker_training = True
if len(workers) > 1:
# Grandfather usage in the legacy tests if they're configured properly.
if (not isinstance(self._cross_device_ops,
cross_device_ops_lib.ReductionToOneDevice) or
self._cross_device_ops._num_between_graph_workers > 1): # pylint: disable=protected-access
raise ValueError(
"In-graph multi-worker training with `MirroredStrategy` is not "
"supported.")
self._inferred_cross_device_ops = self._cross_device_ops
else:
# TODO(yuefengz): make `choose_the_best` work with device strings
# containing job names.
self._inferred_cross_device_ops = cross_device_ops_lib.NcclAllReduce()
logging.info("Using MirroredStrategy with remote devices %r", devices)
def _input_workers_with_options(self, options=None):
if not options or options.experimental_prefetch_to_device:
return input_lib.InputWorkers(self._input_workers_devices)
else:
return input_lib.InputWorkers(
[(host_device, (host_device,) * len(compute_devices)) for
host_device, compute_devices in self._input_workers_devices])
@property
def _input_workers(self):
return self._input_workers_with_options()
def _get_variable_creator_initial_value(self,
replica_id,
device,
primary_var,
**kwargs):
"""Return the initial value for variables on a replica."""
if replica_id == 0:
return kwargs["initial_value"]
else:
assert primary_var is not None
assert device is not None
assert kwargs is not None
def initial_value_fn():
if context.executing_eagerly() or ops.inside_function():
init_value = primary_var.value()
return array_ops.identity(init_value)
else:
with ops.device(device):
init_value = primary_var.initial_value
return array_ops.identity(init_value)
return initial_value_fn
def _create_variable(self, next_creator, **kwargs):
"""Create a mirrored variable. See `DistributionStrategy.scope`."""
colocate_with = kwargs.pop("colocate_with", None)
if colocate_with is None:
devices = self._devices
elif isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return next_creator(**kwargs)
else:
devices = colocate_with._devices # pylint: disable=protected-access
def _real_mirrored_creator(**kwargs): # pylint: disable=g-missing-docstring
value_list = []
for i, d in enumerate(devices):
with ops.device(d):
kwargs["initial_value"] = self._get_variable_creator_initial_value(
replica_id=i,
device=d,
primary_var=value_list[0] if value_list else None,
**kwargs)
if i > 0:
# Give replicas meaningful distinct names:
var0name = value_list[0].name.split(":")[0]
# We append a / to variable names created on replicas with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
# Don't record operations (e.g. other variable reads) during
# variable creation.
with tape.stop_recording():
v = next_creator(**kwargs)
assert not isinstance(v, values.DistributedVariable)
value_list.append(v)
return value_list
return distribute_utils.create_mirrored_variable(
self._container_strategy(), _real_mirrored_creator,
distribute_utils.VARIABLE_CLASS_MAPPING,
distribute_utils.VARIABLE_POLICY_MAPPING, **kwargs)
def _validate_colocate_with_variable(self, colocate_with_variable):
distribute_utils.validate_colocate_distributed_variable(
colocate_with_variable, self)
def _make_dataset_iterator(self, dataset):
return input_lib.DatasetIterator(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
input_contexts = []
num_workers = self._input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
return input_lib.InputFunctionIterator(input_fn, self._input_workers,
input_contexts,
self._container_strategy())
def _experimental_distribute_dataset(self, dataset, options):
return input_lib.get_distributed_dataset(
dataset,
self._input_workers_with_options(options),
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync)
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, self._host_input_device, session)
def _distribute_datasets_from_function(self, dataset_fn, options):
input_contexts = []
input_workers = self._input_workers_with_options(options)
num_workers = input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
return input_lib.get_distributed_datasets_from_function(
dataset_fn,
input_workers,
input_contexts,
self._container_strategy())
def _experimental_distribute_values_from_function(self, value_fn):
per_replica_values = []
for replica_id in range(self._num_replicas_in_sync):
per_replica_values.append(value_fn(
distribute_lib.ValueContext(replica_id,
self._num_replicas_in_sync)))
return distribute_utils.regroup(per_replica_values, always_wrap=True)
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,
initial_loop_values=None):
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = input_lib.MultiStepContext()
def body(i, *args):
"""A wrapper around `fn` to create the while loop body."""
del args
fn_result = fn(ctx, iterator.get_next())
for (name, output) in ctx.last_step_outputs.items():
# Convert all outputs to tensors, potentially from `DistributedValues`.
ctx.last_step_outputs[name] = self._local_results(output)
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
with ops.control_dependencies([fn_result]):
return [i + 1] + flat_last_step_outputs
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop. This is useful in cases where we might need to exit
# these contexts and get back to the outer context to do some things, for
# e.g. create an op which should be evaluated only once at the end of the
# loop on the host. One such usage is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
cond = lambda i, *args: i < iterations
i = constant_op.constant(0)
loop_result = control_flow_ops.while_loop(
cond, body, [i] + initial_loop_values, name="",
parallel_iterations=1, back_prop=False, swap_memory=False,
return_same_structure=True)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(loop_result)
# Convert the last_step_outputs from a list to the original dict structure
# of last_step_outputs.
last_step_tensor_outputs = loop_result[1:]
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
for name, reduce_op in ctx._last_step_outputs_reduce_ops.items(): # pylint: disable=protected-access
output = last_step_tensor_outputs_dict[name]
# For outputs that have already been reduced, wrap them in a Mirrored
# container, else in a PerReplica container.
if reduce_op is None:
last_step_tensor_outputs_dict[name] = distribute_utils.regroup(output)
else:
assert len(output) == 1
last_step_tensor_outputs_dict[name] = output[0]
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
return ctx
def _broadcast_to(self, tensor, destinations):
# This is both a fast path for Python constants, and a way to delay
# converting Python values to a tensor until we know what type it
# should be converted to. Otherwise we have trouble with:
# global_step.assign_add(1)
# since the `1` gets broadcast as an int32 but global_step is int64.
if isinstance(tensor, (float, int)):
return tensor
# TODO(josh11b): In eager mode, use one thread per device, or async mode.
if not destinations:
# TODO(josh11b): Use current logical device instead of 0 here.
destinations = self._devices
return self._get_cross_device_ops(tensor).broadcast(tensor, destinations)
def _call_for_each_replica(self, fn, args, kwargs):
return mirrored_run.call_for_each_replica(
self._container_strategy(), fn, args, kwargs)
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
del task_type, task_id
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
if cluster_spec:
# TODO(yuefengz): remove the following code once cluster_resolver is
# added.
num_gpus_per_worker = _infer_num_gpus_per_worker(self._devices)
multi_worker_devices = _cluster_spec_to_device_list(
cluster_spec, num_gpus_per_worker)
self._initialize_multi_worker(multi_worker_devices)
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
updated_config.isolate_session_state = True
return updated_config
def _get_cross_device_ops(self, value):
del value # Unused.
return self._cross_device_ops or self._inferred_cross_device_ops
def _reduce_to(self, reduce_op, value, destinations, experimental_hints):
if (distribute_utils.is_mirrored(value) and
reduce_op == reduce_util.ReduceOp.MEAN):
return value
assert not distribute_utils.is_mirrored(value)
if not isinstance(value, values.DistributedValues):
# This function handles reducing values that are not PerReplica or
# Mirrored values. For example, the same value could be present on all
# replicas in which case `value` would be a single value or value could
# be 0.
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, value, destinations, self._num_replicas_in_sync)
return self._get_cross_device_ops(value).reduce(
reduce_op,
value,
destinations=destinations,
experimental_hints=experimental_hints)
def _batch_reduce_to(self, reduce_op, value_destination_pairs,
experimental_hints):
cross_device_ops = None
for value, _ in value_destination_pairs:
if cross_device_ops is None:
cross_device_ops = self._get_cross_device_ops(value)
elif cross_device_ops is not self._get_cross_device_ops(value):
raise ValueError("inputs to batch_reduce_to must be either all on the "
"the host or all on the compute devices")
return cross_device_ops.batch_reduce(reduce_op, value_destination_pairs,
experimental_hints)
def _update(self, var, fn, args, kwargs, group):
# TODO(josh11b): In eager mode, use one thread per device.
assert isinstance(var, values.DistributedVariable)
updates = []
for i, v in enumerate(var.values):
name = "update_%d" % i
with ops.device(v.device), \
distribute_lib.UpdateContext(i), \
ops.name_scope(name):
# If args and kwargs are not mirrored, the value is returned as is.
updates.append(
fn(v, *distribute_utils.select_replica_mirrored(i, args),
**distribute_utils.select_replica_mirrored(i, kwargs)))
return distribute_utils.update_regroup(self, updates, group)
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
assert isinstance(colocate_with, tuple)
# TODO(josh11b): In eager mode, use one thread per device.
updates = []
for i, d in enumerate(colocate_with):
name = "update_%d" % i
with ops.device(d), distribute_lib.UpdateContext(i), ops.name_scope(name):
updates.append(
fn(*distribute_utils.select_replica_mirrored(i, args),
**distribute_utils.select_replica_mirrored(i, kwargs)))
return distribute_utils.update_regroup(self, updates, group)
def read_var(self, replica_local_var):
"""Read the aggregate value of a replica-local variable."""
# pylint: disable=protected-access
if distribute_utils.is_sync_on_read(replica_local_var):
return replica_local_var._get_cross_replica()
assert distribute_utils.is_mirrored(replica_local_var)
return array_ops.identity(replica_local_var._get())
# pylint: enable=protected-access
def _local_results(self, val):
if isinstance(val, values.DistributedValues):
return val._values # pylint: disable=protected-access
return (val,)
def value_container(self, val):
return distribute_utils.value_container(val)
@property
def _num_replicas_in_sync(self):
return len(self._devices)
@property
def worker_devices(self):
return self._devices
@property
def worker_devices_by_replica(self):
return [[d] for d in self._devices]
@property
def parameter_devices(self):
return self.worker_devices
@property
def experimental_between_graph(self):
return False
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
def non_slot_devices(self, var_list):
del var_list
# TODO(josh11b): Should this be the last logical device instead?
return self._devices
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
return False
|
apache-2.0
| -8,923,172,070,556,166,000 | 39.121053 | 105 | 0.669717 | false |
louisrli/grabrc-client
|
client/util.py
|
1
|
2688
|
"""
Utility functions for output, executing commands, and downloading files.
"""
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import commands
import sys
import urllib2
import os
import shutil
from const import Const
def print_msg(prefix, msg):
print "[%s] %s" % (prefix.upper(), msg)
def info(msg):
print_msg("info", msg)
def warn(msg):
print_msg("warning", msg)
def error(msg):
print_msg("error", msg)
def success(msg):
print_msg("success", msg)
def exit_runtime_error(*args):
error("Oops! Something went wrong:\n-- %s" % "\n-- ".join(args))
sys.exit(1)
def exec_cmd_status(cmd):
""" Returns True on success, False on failure """
return commands.getstatusoutput(cmd)[0] == 0
def exec_cmd_output(cmd):
(status, output) = commands.getstatusoutput(cmd)
return status == 0, output
def http_get_contents(url):
try:
return urllib2.urlopen(url).read()
except urllib2.HTTPError, e:
msg_404 = "If you're looking for a directory, did you add 'dir:' before the directory name?"
if ("404" in e.__str__()):
exit_runtime_error("The file wasn't found! (404).", msg_404)
exit_runtime_error(e.__str__(), "Requested URL: %s" % url)
def untar_gz(targz):
""" Untar and extract a .tar.gz """
targz.extractall()
targz.close()
def sanitize_path(path):
""" Clean up a pathname """
return os.path.normpath(os.path.expanduser(path)).strip()
def backup_file(filepath):
"""Backs up a file if it already exists. If a .bak file already exists,
then it appends .bak to it again and backs it up."""
if not os.path.exists(filepath):
return
elif os.path.exists(filepath):
backup_path = filepath + Const.BACKUP_SUFFIX
backup_file(backup_path + Const.BACKUP_SUFFIX)
shutil.move(filepath, backup_path)
def check_git():
""" Checks if git exists. Exits with a runtime error if it's not on the path. """
if not exec_cmd_output("git"):
exit_runtime_error("Couldn't find git! Are you sure it's \
installed and on the PATH?")
|
gpl-3.0
| 7,016,417,344,108,169,000 | 26.428571 | 100 | 0.667783 | false |
kmichalak/rosie
|
test/test_create_git_branch.py
|
1
|
4104
|
import os
import shutil
import git
from rosie.plugins import CreateGitBranch
from test.test_base import (
GitRepoTestCase,
JenkinsJobMock
)
class CreateGitBranchTest(GitRepoTestCase):
def setUp(self):
self.repo_dir = os.path.join(os.getcwd(), 'test/tmp')
self.jenkins_job = JenkinsJobMock('test-job')
def tearDown(self):
if os.path.exists(self.repo_dir) and os.path.isdir(self.repo_dir):
shutil.rmtree(self.repo_dir)
def test_plugin_creates_directory_if_not_exists(self):
# given
branch_name = 'test-branch1'
action = CreateGitBranch(
branch=branch_name, repo_dir=self.repo_dir,
from_branch='master', use_existing=False
)
# make sure that repo directory does not exist
if os.path.exists(self.repo_dir) and os.path.isdir(self.repo_dir):
shutil.rmtree(self.repo_dir)
try:
# when
action.execute(jenkins_job=self.jenkins_job)
# then
assert os.path.exists(self.repo_path())
assert os.path.isdir(self.repo_path())
finally:
# cleanup
repo = git.Repo(self.repo_path())
self.remove_branch_from_origin(branch_name, repo)
def test_plugin_uses_existing_repo(self):
# given
branch_name = 'test-branch1'
action = CreateGitBranch(
branch=branch_name, repo_dir=self.repo_path(),
from_branch='master', use_existing=True
)
try:
# when (repository should not exist when tests start
# - we are testing in a clean environment)
repo = git.Repo.clone_from(
self.jenkins_job.get_scm_url(),
self.repo_path()
)
action.execute(self.jenkins_job)
# then
assert branch_name in repo.heads
finally:
# cleanup
repo = git.Repo(self.repo_path())
self.remove_branch_from_origin(branch_name, repo)
def test_plugin_should_checkout_master_branch(self):
# given
branch_name = 'test-branch1'
action = CreateGitBranch(
branch=branch_name, repo_dir=self.repo_dir,
from_branch='master', use_existing=False
)
try:
# when
action.execute(jenkins_job=self.jenkins_job)
repo = git.Repo(self.repo_path())
head_name = repo.head.reference.name
# then
assert head_name == 'master'
finally:
# cleanup
repo = git.Repo(self.repo_path())
self.remove_branch_from_origin(branch_name, repo)
def test_plugin_should_create_branch_for_given_name(self):
# given
branch_name = 'test-branch1'
action = CreateGitBranch(
branch=branch_name, repo_dir=self.repo_dir,
from_branch='master', use_existing=False
)
try:
# when
action.execute(jenkins_job=self.jenkins_job)
repo = git.Repo(self.repo_path())
# then
assert branch_name in repo.heads
finally:
# cleanup
repo = git.Repo(self.repo_path())
self.remove_branch_from_origin(branch_name, repo)
def test_plugin_creates_remote_for_given_branch_name(self):
# given
branch_name = 'test-branch1'
action = CreateGitBranch(
branch=branch_name, repo_dir=self.repo_dir,
from_branch='master', use_existing=False
)
try:
# when
action.execute(jenkins_job=self.jenkins_job)
repo = git.Repo(self.repo_path())
origin = repo.remotes['origin']
# then
ref = 'origin/%s' % branch_name
assert ref in [ref.name for ref in origin.refs]
finally:
# cleanup
repo = git.Repo(self.repo_path())
self.remove_branch_from_origin(branch_name, repo)
|
apache-2.0
| -5,072,018,693,773,136,000 | 28.956204 | 74 | 0.552144 | false |
TaskEvolution/Task-Coach-Evolution
|
taskcoach/taskcoachlib/gui/iphone.py
|
1
|
2653
|
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2013 Task Coach developers <developers@taskcoach.org>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from taskcoachlib.gui.threads import DeferredCallMixin, synchronized, synchronizednb
from taskcoachlib.notify import NotificationFrameBase, NotificationCenter
from taskcoachlib.i18n import _
import wx
class IPhoneSyncFrame(DeferredCallMixin, NotificationFrameBase):
def __init__(self, settings, *args, **kwargs):
self.settings = settings
super(IPhoneSyncFrame, self).__init__(*args, **kwargs)
def AddInnerContent(self, sizer, panel):
self.text = wx.StaticText(panel, wx.ID_ANY, _('Synchronizing...'))
sizer.Add(self.text,
0, wx.ALL, 3)
self.gauge = wx.Gauge(panel, wx.ID_ANY)
self.gauge.SetRange(100)
sizer.Add(self.gauge, 0, wx.EXPAND|wx.ALL, 3)
if self.settings.getboolean('iphone', 'showlog'):
self.log = wx.TextCtrl(panel, wx.ID_ANY, u'', style=wx.TE_MULTILINE|wx.TE_READONLY)
sizer.Add(self.log, 1, wx.EXPAND|wx.ALL, 3)
self.btn = wx.Button(panel, wx.ID_ANY, _('OK'))
sizer.Add(self.btn, 0, wx.ALIGN_CENTRE|wx.ALL, 3)
self.btn.Enable(False)
wx.EVT_BUTTON(self.btn, wx.ID_ANY, self.OnOK)
def CloseButton(self, panel):
return None
@synchronized
def SetDeviceName(self, name):
self.text.SetLabel(_('Synchronizing with %s...') % name)
@synchronized
def SetProgress(self, value, total):
self.gauge.SetValue(int(100 * value / total))
@synchronized
def AddLogLine(self, line):
if self.settings.getboolean('iphone', 'showlog'):
self.log.AppendText(line + u'\n')
@synchronizednb
def Started(self):
NotificationCenter().NotifyFrame(self)
@synchronized
def Finished(self):
if self.settings.getboolean('iphone', 'showlog'):
self.btn.Enable(True)
else:
self.DoClose()
def OnOK(self, event):
self.DoClose()
|
gpl-3.0
| -6,450,007,675,775,254,000 | 33.454545 | 95 | 0.6683 | false |
thinkopensolutions/tkobr-addons
|
tko_account_payment_method_journal_entry_selection/account_invoice.py
|
1
|
1727
|
from openerp import models, fields, api
class account_invoice(models.Model):
_inherit = 'account.invoice'
@api.one
@api.depends(
'move_id.line_ids'
)
def _get_receivable_lines(self):
if self.move_id:
data_lines = [x for x in self.move_id.line_ids if (
x.account_id.id == self.account_id.id
and x.account_id.user_type_id.type in ('receivable', 'payable')
and self.journal_id.revenue_expense)]
New_ids = []
for line in data_lines:
New_ids.append(line.id)
New_ids.sort()
self.move_line_receivable_id = New_ids
move_line_receivable_id = fields.Many2many('account.move.line', compute='_get_receivable_lines',
inverse='set_receivable_lines', string='Entry Lines')
@api.multi
def set_receivable_lines(self):
for record in self:
return True
class account_voucher(models.Model):
_inherit = 'account.voucher'
# update due date of created move line on voucher payment
def voucher_move_line_create(self, line_total, move_id, company_currency, current_currency):
result = super(account_voucher, self).voucher_move_line_create(line_total, move_id,
company_currency, current_currency)
voucher = self.env['account.voucher'].browse(self._id)
move_lines = result[1][0]
if len(result[1][0]) == 2:
move_line = self.env['account.move.line'].browse([move_lines[0]])
move_line.write({'date_maturity': voucher.move_line_id.date_maturity})
return result
|
agpl-3.0
| 4,093,489,694,293,593,000 | 38.25 | 106 | 0.573827 | false |
salv-orlando/MyRepo
|
nova/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py
|
1
|
3887
|
# Copyright (c) 2011 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import *
from migrate import *
from nova import log as logging
meta = MetaData()
# Table stub-definitions
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
#
volumes = Table('volumes', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
#
# New Tables
#
flavors = Table('sm_flavors', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('label',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('description',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
backend = Table('sm_backend_config', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('flavor_id', Integer(), ForeignKey('sm_flavors.id'),
nullable=False),
Column('sr_uuid',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('sr_type',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('config_params',
String(length=2047,
convert_unicode=False,
assert_unicode=None,
unicode_error=None,
_warn_on_bytestring=False)),
)
sm_vol = Table('sm_volume', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), ForeignKey('volumes.id'),
primary_key=True, nullable=False),
Column('backend_id', Integer(), ForeignKey('sm_backend_config.id'),
nullable=False),
Column('vdi_uuid',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
)
#
# Tables to alter
#
# (none currently)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
for table in (flavors, backend, sm_vol):
try:
table.create()
except Exception:
logging.info(repr(table))
|
apache-2.0
| -2,864,362,244,569,372,700 | 38.663265 | 79 | 0.606638 | false |
tambu-j/signals
|
python/candlestick.py
|
1
|
1780
|
# __all__ = ['candlestick_trades']
import numpy as np
import ctypes
import os
def _getpath():
#return r"/Users/tim/Library/Caches/clion10/cmake/generated/c57b29e0/c57b29e0/Debug/"
return r"/Users/tim/dev/signals/build"
def _load_candlestick_lib():
return np.ctypeslib.load_library("libsignals", _getpath())
def candlestick_trades(samplet, lookback, t, px, sz):
#requires = ["CONTIGUOUS", "ALIGNED"]
lib = _load_candlestick_lib()
lib.c_candlestick.restype = None
lib.c_candlestick.argtypes = [np.ctypeslib.c_intp,
np.ctypeslib.ndpointer(float,
flags="aligned, contiguous"),
ctypes.c_double,
np.ctypeslib.c_intp,
np.ctypeslib.ndpointer(float, ndim=1,
flags="aligned, contiguous"),
np.ctypeslib.ndpointer(float, ndim=1,
flags="aligned, contiguous"),
np.ctypeslib.ndpointer(float, ndim=1,
flags="aligned, contiguous"),
np.ctypeslib.ndpointer(float, ndim=1,
flags="aligned, contiguous,"
"writeable")]
# samplet = np.require(samplet, float, requires)
# c = np.empty_like(a)
samplelen = np.alen(samplet)
datalen = np.alen(t)
res = np.empty(6*samplelen)
lib.c_candlestick(samplelen, samplet, lookback, datalen, t, px, sz, res)
return res
|
apache-2.0
| -4,548,263,752,612,800,000 | 40.418605 | 89 | 0.473596 | false |
bigswitch/snac-nox
|
src/nox/apps/tests/pyunittests/event_test.py
|
1
|
2007
|
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
from nox.apps.tests import unittest
pyunit = __import__('unittest')
class ModTestCase(unittest.TestCase):
def getInterface(self):
return str(ModTestCase)
def setUp(self):
pass
def tearDown(self):
pass
def testSpeeds(self):
"""test event speeds"""
from nox.lib.core import Event_type
fast = Event_type.allocate(Event_type.FAST);
medium = Event_type.allocate(Event_type.MEDIUM);
slow = Event_type.allocate(Event_type.SLOW);
self.failUnlessEqual(fast.get_speed(), Event_type.FAST, 'Fast not equal');
self.failUnlessEqual(medium.get_speed(), Event_type.MEDIUM, 'Medium not equal');
self.failUnlessEqual(slow.get_speed(), Event_type.SLOW, 'Slow not equal');
def testToInt(self):
"""test uniqueness of to_int"""
from nox.lib.core import Event_type
allocated = []
for i in range(0,256):
new_type = Event_type.allocate(Event_type.FAST).to_int();
if new_type in allocated:
self.fail('Allocation failure')
allocated.append(new_type)
def suite(ctxt):
suite = pyunit.TestSuite()
#suite.addTest(ModTestCase("testSpeeds", ctxt, name))
#suite.addTest(ModTestCase("testToInt", ctxt, name))
return suite
|
gpl-3.0
| 2,535,196,302,534,147,000 | 34.839286 | 88 | 0.659691 | false |
PnEcrins/GeoNature
|
contrib/module_example/backend/blueprint.py
|
1
|
1374
|
from flask import Blueprint, current_app, session
from geonature.utils.utilssqlalchemy import json_resp
from geonature.utils.env import get_id_module
# import des fonctions utiles depuis le sous-module d'authentification
from geonature.core.gn_permissions import decorators as permissions
from geonature.core.gn_permissions.tools import get_or_fetch_user_cruved
blueprint = Blueprint('<MY_MODULE_NAME>', __name__)
# Exemple d'une route simple
@blueprint.route('/test', methods=['GET'])
@json_resp
def get_view():
q = DB.session.query(MySQLAModel)
data = q.all()
return [d.as_dict() for d in data]
# Exemple d'une route protégée le CRUVED du sous module d'authentification
@blueprint.route('/test_cruved', methods=['GET'])
@permissions.check_cruved_scope('R', module_code="MY_MODULE_CODE")
@json_resp
def get_sensitive_view(info_role):
# Récupérer l'id de l'utilisateur qui demande la route
id_role = info_role.id_role
# Récupérer la portée autorisée à l'utilisateur pour l'acton 'R' (read)
read_scope = info_role.value_filter
#récupérer le CRUVED complet de l'utilisateur courant
user_cruved = get_or_fetch_user_cruved(
session=session,
id_role=info_role.id_role,
module_code='MY_MODULE_CODE',
)
q = DB.session.query(MySQLAModel)
data = q.all()
return [d.as_dict() for d in data]
|
bsd-2-clause
| -7,252,899,591,470,316,000 | 33.1 | 75 | 0.716801 | false |
alexanderfefelov/nav
|
python/nav/web/devicehistory/urls.py
|
1
|
1840
|
#
# Copyright (C) 2008-2009 UNINETT AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License version 2 as published by the Free
# Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Django URL configuration for devicehistory."""
from django.conf.urls.defaults import url, patterns
from nav.web.devicehistory.views import (devicehistory_search,
devicehistory_view, error_form,
register_error, delete_module,
do_delete_module)
# The patterns are relative to the base URL of the subsystem
urlpatterns = patterns('',
url(r'^$', devicehistory_search, name='devicehistory-search'),
url(r'^history/$', devicehistory_view, name='devicehistory-view'),
url(r'^history/\?netbox=(?P<netbox_id>\d+)$', devicehistory_view,
name='devicehistory-view-netbox'),
url(r'^history/\?room=(?P<room_id>.+)$', devicehistory_view,
name='devicehistory-view-room'),
url(r'^registererror/$', error_form,
name='devicehistory-registererror'),
url(r'^do_registererror/$', register_error,
name='devicehistory-do-registererror'),
url(r'^delete_module/$', delete_module,
name='devicehistory-module'),
url(r'^do_delete_module/$', do_delete_module,
name='devicehistory-do_delete_module'),
)
|
gpl-2.0
| 2,219,473,031,298,057,200 | 40.818182 | 79 | 0.668478 | false |
2ndy/RaspIM
|
usr/share/python-support/python-soappy/SOAPpy/wstools/c14n.py
|
1
|
21235
|
"""Compatibility module, imported by ZSI if you don't have PyXML 0.7.
No copyright violations -- we're only using parts of PyXML that we
wrote.
"""
_copyright = '''ZSI: Zolera Soap Infrastructure.
Copyright 2001, Zolera Systems, Inc. All Rights Reserved.
Copyright 2002-2003, Rich Salz. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, and/or
sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, provided that the above copyright notice(s) and
this permission notice appear in all copies of the Software and that
both the above copyright notice(s) and this permission notice appear in
supporting documentation.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
OR PERFORMANCE OF THIS SOFTWARE.
Except as contained in this notice, the name of a copyright holder
shall not be used in advertising or otherwise to promote the sale, use
or other dealings in this Software without prior written authorization
of the copyright holder.
'''
_copyright += "\n\nPortions are also: "
_copyright += '''Copyright 2001, Zolera Systems Inc. All Rights Reserved.
Copyright 2001, MIT. All Rights Reserved.
Distributed under the terms of:
Python 2.0 License or later.
http://www.python.org/2.0.1/license.html
or
W3C Software License
http://www.w3.org/Consortium/Legal/copyright-software-19980720
'''
from xml.dom import Node
from Namespaces import XMLNS
import cStringIO as StringIO
try:
from xml.dom.ext import c14n
except ImportError, ex:
_implementation2 = None
_attrs = lambda E: (E.attributes and E.attributes.values()) or []
_children = lambda E: E.childNodes or []
else:
class _implementation2(c14n._implementation):
"""Patch for exclusive c14n
"""
def __init__(self, node, write, **kw):
self.unsuppressedPrefixes = kw.get('unsuppressedPrefixes')
self._exclusive = None
if node.nodeType == Node.ELEMENT_NODE:
if not c14n._inclusive(self):
self._exclusive = self._inherit_context(node)
c14n._implementation.__init__(self, node, write, **kw)
def _do_element(self, node, initial_other_attrs = []):
"""Patch for the xml.dom.ext.c14n implemenation _do_element method.
This fixes a problem with sorting of namespaces.
"""
# Get state (from the stack) make local copies.
# ns_parent -- NS declarations in parent
# ns_rendered -- NS nodes rendered by ancestors
# ns_local -- NS declarations relevant to this element
# xml_attrs -- Attributes in XML namespace from parent
# xml_attrs_local -- Local attributes in XML namespace.
ns_parent, ns_rendered, xml_attrs = \
self.state[0], self.state[1].copy(), self.state[2].copy() #0422
ns_local = ns_parent.copy()
xml_attrs_local = {}
# Divide attributes into NS, XML, and others.
#other_attrs = initial_other_attrs[:]
other_attrs = []
sort_these_attrs = initial_other_attrs[:]
in_subset = c14n._in_subset(self.subset, node)
#for a in _attrs(node):
sort_these_attrs +=c14n._attrs(node)
for a in sort_these_attrs:
if a.namespaceURI == c14n.XMLNS.BASE:
n = a.nodeName
if n == "xmlns:": n = "xmlns" # DOM bug workaround
ns_local[n] = a.nodeValue
elif a.namespaceURI == c14n.XMLNS.XML:
if c14n._inclusive(self) or (in_subset and c14n._in_subset(self.subset, a)): #020925 Test to see if attribute node in subset
xml_attrs_local[a.nodeName] = a #0426
else:
if c14n._in_subset(self.subset, a): #020925 Test to see if attribute node in subset
other_attrs.append(a)
#add local xml:foo attributes to ancestor's xml:foo attributes
xml_attrs.update(xml_attrs_local)
# Render the node
W, name = self.write, None
if in_subset:
name = node.nodeName
W('<')
W(name)
# Create list of NS attributes to render.
ns_to_render = []
for n,v in ns_local.items():
# If default namespace is XMLNS.BASE or empty,
# and if an ancestor was the same
if n == "xmlns" and v in [ c14n.XMLNS.BASE, '' ] \
and ns_rendered.get('xmlns') in [ c14n.XMLNS.BASE, '', None ]:
continue
# "omit namespace node with local name xml, which defines
# the xml prefix, if its string value is
# http://www.w3.org/XML/1998/namespace."
if n in ["xmlns:xml", "xml"] \
and v in [ 'http://www.w3.org/XML/1998/namespace' ]:
continue
# If not previously rendered
# and it's inclusive or utilized
if (n,v) not in ns_rendered.items() \
and (c14n._inclusive(self) or \
c14n._utilized(n, node, other_attrs, self.unsuppressedPrefixes)):
ns_to_render.append((n, v))
#####################################
# JRB
#####################################
if not c14n._inclusive(self):
if node.prefix is None:
look_for = [('xmlns', node.namespaceURI),]
else:
look_for = [('xmlns:%s' %node.prefix, node.namespaceURI),]
for a in c14n._attrs(node):
if a.namespaceURI != XMLNS.BASE:
#print "ATTRIBUTE: ", (a.namespaceURI, a.prefix)
if a.prefix:
#print "APREFIX: ", a.prefix
look_for.append(('xmlns:%s' %a.prefix, a.namespaceURI))
for key,namespaceURI in look_for:
if ns_rendered.has_key(key):
if ns_rendered[key] == namespaceURI:
# Dont write out
pass
else:
#ns_to_render += [(key, namespaceURI)]
pass
elif (key,namespaceURI) in ns_to_render:
# Dont write out
pass
else:
# Unique write out, rewrite to render
ns_local[key] = namespaceURI
for a in self._exclusive:
if a.nodeName == key:
#self._do_attr(a.nodeName, a.value)
#ns_rendered[key] = namespaceURI
#break
ns_to_render += [(a.nodeName, a.value)]
break
elif key is None and a.nodeName == 'xmlns':
#print "DEFAULT: ", (a.nodeName, a.value)
ns_to_render += [(a.nodeName, a.value)]
break
#print "KEY: ", key
else:
#print "Look for: ", look_for
#print "NS_TO_RENDER: ", ns_to_render
#print "EXCLUSIVE NS: ", map(lambda f: (f.nodeName,f.value),self._exclusive)
raise RuntimeError, \
'can not find namespace (%s="%s") for exclusive canonicalization'\
%(key, namespaceURI)
#####################################
# Sort and render the ns, marking what was rendered.
ns_to_render.sort(c14n._sorter_ns)
for n,v in ns_to_render:
#XXX JRB, getting 'xmlns,None' here when xmlns=''
if v: self._do_attr(n, v)
else:
v = ''
self._do_attr(n, v)
ns_rendered[n]=v #0417
# If exclusive or the parent is in the subset, add the local xml attributes
# Else, add all local and ancestor xml attributes
# Sort and render the attributes.
if not c14n._inclusive(self) or c14n._in_subset(self.subset,node.parentNode): #0426
other_attrs.extend(xml_attrs_local.values())
else:
other_attrs.extend(xml_attrs.values())
#print "OTHER: ", other_attrs
other_attrs.sort(c14n._sorter)
for a in other_attrs:
self._do_attr(a.nodeName, a.value)
W('>')
# Push state, recurse, pop state.
state, self.state = self.state, (ns_local, ns_rendered, xml_attrs)
for c in c14n._children(node):
c14n._implementation.handlers[c.nodeType](self, c)
self.state = state
if name: W('</%s>' % name)
c14n._implementation.handlers[c14n.Node.ELEMENT_NODE] = _do_element
_IN_XML_NS = lambda n: n.namespaceURI == XMLNS.XML
# Does a document/PI has lesser/greater document order than the
# first element?
_LesserElement, _Element, _GreaterElement = range(3)
def _sorter(n1,n2):
'''_sorter(n1,n2) -> int
Sorting predicate for non-NS attributes.'''
i = cmp(n1.namespaceURI, n2.namespaceURI)
if i: return i
return cmp(n1.localName, n2.localName)
def _sorter_ns(n1,n2):
'''_sorter_ns((n,v),(n,v)) -> int
"(an empty namespace URI is lexicographically least)."'''
if n1[0] == 'xmlns': return -1
if n2[0] == 'xmlns': return 1
return cmp(n1[0], n2[0])
def _utilized(n, node, other_attrs, unsuppressedPrefixes):
'''_utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean
Return true if that nodespace is utilized within the node'''
if n.startswith('xmlns:'):
n = n[6:]
elif n.startswith('xmlns'):
n = n[5:]
if n == node.prefix or n in unsuppressedPrefixes: return 1
for attr in other_attrs:
if n == attr.prefix: return 1
return 0
_in_subset = lambda subset, node: not subset or node in subset
#
# JRB. Currently there is a bug in do_element, but since the underlying
# Data Structures in c14n have changed I can't just apply the
# _implementation2 patch above. But this will work OK for most uses,
# just not XML Signatures.
#
class _implementation:
'''Implementation class for C14N. This accompanies a node during it's
processing and includes the parameters and processing state.'''
# Handler for each node type; populated during module instantiation.
handlers = {}
def __init__(self, node, write, **kw):
'''Create and run the implementation.'''
self.write = write
self.subset = kw.get('subset')
if self.subset:
self.comments = kw.get('comments', 1)
else:
self.comments = kw.get('comments', 0)
self.unsuppressedPrefixes = kw.get('unsuppressedPrefixes')
nsdict = kw.get('nsdict', { 'xml': XMLNS.XML, 'xmlns': XMLNS.BASE })
# Processing state.
self.state = (nsdict, ['xml'], [])
if node.nodeType == Node.DOCUMENT_NODE:
self._do_document(node)
elif node.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
if self.unsuppressedPrefixes is not None:
self._do_element(node)
else:
inherited = self._inherit_context(node)
self._do_element(node, inherited)
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
else:
raise TypeError, str(node)
def _inherit_context(self, node):
'''_inherit_context(self, node) -> list
Scan ancestors of attribute and namespace context. Used only
for single element node canonicalization, not for subset
canonicalization.'''
# Collect the initial list of xml:foo attributes.
xmlattrs = filter(_IN_XML_NS, _attrs(node))
# Walk up and get all xml:XXX attributes we inherit.
inherited, parent = [], node.parentNode
while parent and parent.nodeType == Node.ELEMENT_NODE:
for a in filter(_IN_XML_NS, _attrs(parent)):
n = a.localName
if n not in xmlattrs:
xmlattrs.append(n)
inherited.append(a)
parent = parent.parentNode
return inherited
def _do_document(self, node):
'''_do_document(self, node) -> None
Process a document node. documentOrder holds whether the document
element has been encountered such that PIs/comments can be written
as specified.'''
self.documentOrder = _LesserElement
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
self._do_element(child)
self.documentOrder = _GreaterElement # After document element
elif child.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
self._do_pi(child)
elif child.nodeType == Node.COMMENT_NODE:
self._do_comment(child)
elif child.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
else:
raise TypeError, str(child)
handlers[Node.DOCUMENT_NODE] = _do_document
def _do_text(self, node):
'''_do_text(self, node) -> None
Process a text or CDATA node. Render various special characters
as their C14N entity representations.'''
if not _in_subset(self.subset, node): return
s = node.data \
.replace("&", "&") \
.replace("<", "<") \
.replace(">", ">") \
.replace("\015", "
")
if s: self.write(s)
handlers[Node.TEXT_NODE] = _do_text
handlers[Node.CDATA_SECTION_NODE] = _do_text
def _do_pi(self, node):
'''_do_pi(self, node) -> None
Process a PI node. Render a leading or trailing #xA if the
document order of the PI is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node): return
W = self.write
if self.documentOrder == _GreaterElement: W('\n')
W('<?')
W(node.nodeName)
s = node.data
if s:
W(' ')
W(s)
W('?>')
if self.documentOrder == _LesserElement: W('\n')
handlers[Node.PROCESSING_INSTRUCTION_NODE] = _do_pi
def _do_comment(self, node):
'''_do_comment(self, node) -> None
Process a comment node. Render a leading or trailing #xA if the
document order of the comment is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node): return
if self.comments:
W = self.write
if self.documentOrder == _GreaterElement: W('\n')
W('<!--')
W(node.data)
W('-->')
if self.documentOrder == _LesserElement: W('\n')
handlers[Node.COMMENT_NODE] = _do_comment
def _do_attr(self, n, value):
''''_do_attr(self, node) -> None
Process an attribute.'''
W = self.write
W(' ')
W(n)
W('="')
s = value \
.replace("&", "&") \
.replace("<", "<") \
.replace('"', '"') \
.replace('\011', '	') \
.replace('\012', '
') \
.replace('\015', '
')
W(s)
W('"')
def _do_element(self, node, initial_other_attrs = []):
'''_do_element(self, node, initial_other_attrs = []) -> None
Process an element (and its children).'''
# Get state (from the stack) make local copies.
# ns_parent -- NS declarations in parent
# ns_rendered -- NS nodes rendered by ancestors
# xml_attrs -- Attributes in XML namespace from parent
# ns_local -- NS declarations relevant to this element
ns_parent, ns_rendered, xml_attrs = \
self.state[0], self.state[1][:], self.state[2][:]
ns_local = ns_parent.copy()
# Divide attributes into NS, XML, and others.
other_attrs = initial_other_attrs[:]
in_subset = _in_subset(self.subset, node)
for a in _attrs(node):
if a.namespaceURI == XMLNS.BASE:
n = a.nodeName
if n == "xmlns:": n = "xmlns" # DOM bug workaround
ns_local[n] = a.nodeValue
elif a.namespaceURI == XMLNS.XML:
if self.unsuppressedPrefixes is None or in_subset:
xml_attrs.append(a)
else:
other_attrs.append(a)
# Render the node
W, name = self.write, None
if in_subset:
name = node.nodeName
W('<')
W(name)
# Create list of NS attributes to render.
ns_to_render = []
for n,v in ns_local.items():
pval = ns_parent.get(n)
# If default namespace is XMLNS.BASE or empty, skip
if n == "xmlns" \
and v in [ XMLNS.BASE, '' ] and pval in [ XMLNS.BASE, '' ]:
continue
# "omit namespace node with local name xml, which defines
# the xml prefix, if its string value is
# http://www.w3.org/XML/1998/namespace."
if n == "xmlns:xml" \
and v in [ 'http://www.w3.org/XML/1998/namespace' ]:
continue
# If different from parent, or parent didn't render
# and if not exclusive, or this prefix is needed or
# not suppressed
if (v != pval or n not in ns_rendered) \
and (self.unsuppressedPrefixes is None or \
_utilized(n, node, other_attrs, self.unsuppressedPrefixes)):
ns_to_render.append((n, v))
# Sort and render the ns, marking what was rendered.
ns_to_render.sort(_sorter_ns)
for n,v in ns_to_render:
self._do_attr(n, v)
ns_rendered.append(n)
# Add in the XML attributes (don't pass to children, since
# we're rendering them), sort, and render.
other_attrs.extend(xml_attrs)
xml_attrs = []
other_attrs.sort(_sorter)
for a in other_attrs:
self._do_attr(a.nodeName, a.value)
W('>')
# Push state, recurse, pop state.
state, self.state = self.state, (ns_local, ns_rendered, xml_attrs)
for c in _children(node):
_implementation.handlers[c.nodeType](self, c)
self.state = state
if name: W('</%s>' % name)
handlers[Node.ELEMENT_NODE] = _do_element
def Canonicalize(node, output=None, **kw):
'''Canonicalize(node, output=None, **kw) -> UTF-8
Canonicalize a DOM document/element node and all descendents.
Return the text; if output is specified then output.write will
be called to output the text and None will be returned
Keyword parameters:
nsdict: a dictionary of prefix:uri namespace entries
assumed to exist in the surrounding context
comments: keep comments if non-zero (default is 0)
subset: Canonical XML subsetting resulting from XPath
(default is [])
unsuppressedPrefixes: do exclusive C14N, and this specifies the
prefixes that should be inherited.
'''
if output:
if _implementation2 is None:
_implementation(node, output.write, **kw)
else:
apply(_implementation2, (node, output.write), kw)
else:
s = StringIO.StringIO()
if _implementation2 is None:
_implementation(node, s.write, **kw)
else:
apply(_implementation2, (node, s.write), kw)
return s.getvalue()
if __name__ == '__main__': print _copyright
|
gpl-2.0
| -1,016,785,352,330,602,400 | 38.691589 | 138 | 0.547116 | false |
egbertbouman/tribler-g
|
Tribler/Core/ProxyService/ProxyDownloader.py
|
1
|
23379
|
# Written by John Hoffman, George Milescu
# see LICENSE.txt for license information
import sys
from random import randint
from urlparse import urlparse
from httplib import HTTPConnection
import urllib
import time
from threading import Thread,currentThread,Lock
from traceback import print_exc, print_stack
from collections import deque
from Tribler.Core.BitTornado.__init__ import product_name,version_short
from Tribler.Core.BitTornado.bitfield import Bitfield
from Tribler.Core.Utilities.utilities import show_permid_short, show_permid
from Tribler.Core.BitTornado.CurrentRateMeasure import Measure
from Tribler.Core.Utilities.timeouturlopen import find_proxy
from Tribler.Core.simpledefs import *
from Tribler.Core.ProxyService.Doe import Doe
from Tribler.Core.ProxyService.Proxy import Proxy
from Tribler.Core.ProxyService.RatePredictor import ExpSmoothRatePredictor
DEBUG = False
PROXY_DLDR_PERIODIC_CHECK = 3 # the interval (in seconds) used to check if all requested pieces have arrived
EXPIRE_TIME = 20 # the minimal time (in seconds) each request has to be handled
SHORT_TERM_MEASURE_INTERVAL = 10 # the time interval (in seconds) for which a download is measured
MAX_NO_PROXIES = 4 # the maximum number of used proxies
VERSION = product_name+'/'+version_short
class haveComplete:
def complete(self):
return True
def __getitem__(self, x):
return True
haveall = haveComplete()
class SingleDownload():
def __init__(self, proxydownloader, proxy_permid):
self.downloader = proxydownloader
self.proxy_permid = proxy_permid
self.connection = None
self.measure = Measure(self.downloader.max_rate_period)
self.active_requests = {} # dictionary with all indexes currently being downloaded. Key: index, value: timestamp (the moment when the piece was requested)
self.piece_size = self.downloader.storage._piecelen(0)
self.total_len = self.downloader.storage.total_length
self.requests = {} # dictionary of lists: requests[index] contains a list of all reserved chunks
self.request_size = {} # dictionary of piece sizes
self.received_data = {} # a dictionary of piece data
self.endflag = False
self.error = None
self.retry_period = 0 #30
self._retry_period = None
self.errorcount = 0
self.active = False
self.cancelled = False
self.numpieces = self.downloader.numpieces
self.proxy_have = Bitfield(self.downloader.numpieces)
self.first_piece_request=True
# boudewijn: VOD needs a download measurement that is not
# averaged over a 'long' period. downloader.max_rate_period is
# (by default) 20 seconds because this matches the unchoke
# policy.
self.short_term_measure = Measure(SHORT_TERM_MEASURE_INTERVAL)
# boudewijn: each download maintains a counter for the number
# of high priority piece requests that did not get any
# responce within x seconds.
self.bad_performance_counter = 0
# HTTP Video Support
self.request_lock = Lock()
self.video_support_policy = False # TODO : get from constructor parameters
self.video_support_enabled = False # Don't start immediately with support
self.video_support_speed = 0.0 # Start with the faster rescheduling speed
self.video_support_slow_start = False # If enabled delay the first request (give chance to peers to give bandwidth)
# Arno, 2010-04-07: Wait 1 second before using HTTP seed. TODO good policy
# If Video Support policy is not eneabled then use Http seed normaly
if not self.video_support_policy:
self.resched(1)
def resched(self, len = None):
""" Schedule a new piece to be downloaded via proxy
@param len: schedule delay
"""
if len is None:
len = self.retry_period
if self.errorcount > 3:
len = min(1.0,len) * (self.errorcount - 2)
# Arno, 2010-04-07: If immediately, don't go via queue. Actual work is
# done by other thread, so no worries of hogging NetworkThread.
if len > 0:
self.downloader.rawserver.add_task(self.download, len)
else:
self.download()
def _want(self, index):
""" TODO:
@param index: TODO:
"""
# if the piece is downloading or already downloaded
if index in self.downloader.allocated_pieces.keys():
return False
#return self.downloader.storage.do_I_have_requests(index)
# TODO: endflag behavior
if self.endflag:
return self.downloader.storage.do_I_have_requests(index)
else:
return self.downloader.storage.is_unstarted(index)
def download(self):
""" Download one piece
"""
from Tribler.Core.Session import Session
session = Session.get_instance()
session.uch.perform_usercallback(self._download)
def _download(self):
""" Download one piece
"""
#self.request_lock.acquire()
if DEBUG:
print "proxy-sdownload: _download()"
if self.first_piece_request:
slots=self.numpieces/40 # 2.5%
self.first_piece_request = False
else:
slots=1
self.cancelled = False
for p in range(slots):
if self.downloader.picker.am_I_complete():
if self in self.downloader.downloads:
self.downloader.downloads.remove(self)
if DEBUG:
print "proxy-sdownload: _download: i_am_complete, return"
return
# Use the lock to make sure the same piece index is not generated simultaneously by two threads
self.downloader.get_next_piece_lock.acquire()
try:
new_index = self.downloader.picker.next(self.proxy_have, self._want, self)
if new_index is None:
self.endflag = False
self.first_piece_request = True
self.resched(1)
if DEBUG:
print "proxy-sdownload: _download: picker returned none, return"
return
else:
# save the index-permid pair
self.downloader.allocated_pieces[new_index] = self.proxy_permid
self.active_requests[new_index] = time.time()
# i have a valid index
# reserve the new_index piece
# reserve all available (previously not reserved by anyone) chunks in new_index
self._get_requests(new_index)
if DEBUG:
print "proxy-sdownload: _download: requesting piece", new_index, "to proxy"
# Send request to proxy
# Just overwrite other blocks and don't ask for ranges.
self._request(new_index)
finally:
self.downloader.get_next_piece_lock.release()
self.active = True
def _request(self, index):
""" Request the piece index to the proxy
"""
import encodings.ascii
import encodings.punycode
import encodings.idna
self.error = None
self.received_data[index] = None
try:
if DEBUG:
print >>sys.stderr, 'ProxyDownloader: _request: piece ', index
self.downloader.doe.send_download_piece(index, self.proxy_permid)
except Exception, e:
print_exc()
self.error = 'error accessing proxy seed: '+str(e)
def request_finished(self, index):
""" Called after the requested data arrived
Called from Doe.got_piece_data
"""
self.active = False
if self.error is not None:
self.errorcount += 1
if self.received_data[index]:
self.errorcount = 0
if not self._got_data(index):
self.received_data[index] = None
if not self.received_data[index]:
self._release_requests(index)
self.downloader.btdownloader.piece_flunked(index)
# TODO: handle robustness in a more elegant way
try:
del(self.active_requests[index])
except:
pass
try:
del(self.requests[index])
except:
pass
try:
del(self.request_size[index])
except:
pass
try:
del(self.received_data[index])
except:
pass
#self.request_lock.release()
if self._retry_period is not None:
self.resched(self._retry_period)
self._retry_period = None
return
self.resched()
def _got_data(self, index):
""" Pass the received data to the storage module and update the bittorrent engine data structures
"""
# Diego, 2010-04-16: retry_period set depending on the level of support asked by the MovieOnDemandTransporter
# TODO: update _retry_perion, if necessary
#self._retry_period = self.video_support_speed
if len(self.received_data[index]) != self.request_size[index]:
self.downloader.errorfunc('corrupt data from proxy - redownloading')
# unmark the piece to be redownloaded in the future
try:
del(self.downloader.allocated_pieces[index])
except:
pass
return False
self.measure.update_rate(len(self.received_data[index]))
self.short_term_measure.update_rate(len(self.received_data[index]))
self.downloader.measurefunc(len(self.received_data[index]))
if self.bad_performance_counter:
self.bad_performance_counter -= 1
if self.cancelled:
return False
if not self._fulfill_requests(index):
return False
if self.downloader.storage.do_I_have(index):
self.downloader.picker.complete(index)
self.downloader.peerdownloader.check_complete(index)
self.downloader.gotpiecefunc(index)
# Mark the piece as downloaded
self.downloader.allocated_pieces[index] = None
return True
def _get_requests(self, index):
""" Reserve all chunks in self.piece
"""
# Reserve all chunks in the index piece
self.requests[index] = []
self.request_size[index] = 0L
# reserve all available (previously not reserved by anyone) chunks
while self.downloader.storage.do_I_have_requests(index):
# reserve another chunk
r = self.downloader.storage.new_request(index)
self.requests[index].append(r)
self.request_size[index] += r[1]
self.requests[index].sort()
def _fulfill_requests(self, index):
""" Save the received data on the disk using the storage module interface
"""
if len(self.requests[index]) == 0:
return False
start = 0L
success = True
while self.requests[index]:
begin, length = self.requests[index].pop(0)
if not self.downloader.storage.piece_came_in(index, begin, [], self.received_data[index][start:start+length], length):
success = False
break
start += length
return success
def _release_requests(self, index):
""" Cancel the reservation for all chunks in self.piece
"""
for begin, length in self.requests[index]:
self.downloader.storage.request_lost(index, begin, length)
self.requests[index] = []
def slow_start_wake_up(self):
""" TODO:
"""
self.video_support_slow_start = False
self.resched(0)
def is_slow_start(self):
""" TODO:
"""
return self.video_support_slow_start
def start_video_support(self, level = 0.0, sleep_time = None):
""" Level indicates how fast a new request is scheduled and therefore the level of support required.
0 = maximum support. (immediate rescheduling)
1 ~= 0.01 seconds between each request
2 ~= 0.1 seconds between each request
and so on... at the moment just level 0 is asked. To be noted that level is a float!
"""
if DEBUG:
print >>sys.stderr,"GetRightHTTPDownloader: START"
self.video_support_speed = 0.001 * ((10 ** level)-1)
if not self.video_support_enabled:
self.video_support_enabled = True
if sleep_time:
if not self.video_support_slow_start:
self.video_support_slow_start = True
self.downloader.rawserver.add_task(self.slow_start_wake_up, sleep_time)
else:
self.resched(self.video_support_speed)
def stop_video_support(self):
""" TODO:
"""
if DEBUG:
print >>sys.stderr,"GetRightHTTPDownloader: STOP"
if not self.video_support_enabled:
return
self.video_support_enabled = False
def is_video_support_enabled(self):
""" TODO:
"""
return self.video_support_enabled
def get_rate(self):
""" TODO:
"""
return self.measure.get_rate()
def get_short_term_rate(self):
""" TODO:
"""
return self.short_term_measure.get_rate()
class ProxyDownloader:
""" This class manages connects the doe and the proxy components with the BitTorrent engine.
"""
def __init__(self, bt1_download, storage, picker, rawserver,
finflag, errorfunc, btdownloader,
max_rate_period, infohash, measurefunc, gotpiecefunc, dlinstance, scheduler):
self.storage = storage
self.picker = picker
self.rawserver = rawserver
self.finflag = finflag
self.errorfunc = errorfunc
self.btdownloader = btdownloader
self.peerdownloader = btdownloader
self.infohash = infohash
self.max_rate_period = max_rate_period
self.gotpiecefunc = gotpiecefunc
self.measurefunc = measurefunc
self.downloads = []
self.seedsfound = 0
self.video_support_enabled = False
self.bt1_download = bt1_download
self.numpieces = btdownloader.numpieces
self.storage = btdownloader.storage
self.scheduler = scheduler
self.proxy = None
self.doe = None
self.rate_predictor = None
self.dlinstance = dlinstance
# allcoated_pieces maps each piece index to the permid the piece was requested to
# if index in allocated_pieces.keys(): piece is downloading or downloaded
# if allocated_pieces[index] = None: piece was already downloaded
# if allocated_pieces[index] = permid: piece is currently downloading
self.allocated_pieces = {}
self.get_next_piece_lock = Lock()
if DEBUG:
print >>sys.stderr,"ProxyDownloader: proxyservice_role is",self.bt1_download.config['proxyservice_role']
# Create the Doe object for this download
self.doe = Doe(self.infohash, self.bt1_download.len_pieces, self.btdownloader, self, self.bt1_download.encoder)
# Create the Proxy object
self.proxy = Proxy(self.infohash, self.bt1_download.len_pieces, self.btdownloader, self, self.bt1_download.encoder)
self.bt1_download.encoder.set_proxy(self.proxy)
self.rate_predictor = ExpSmoothRatePredictor(self.bt1_download.rawserver, self.bt1_download.downmeasure, self.bt1_download.config['max_download_rate'])
self.bt1_download.picker.set_rate_predictor(self.rate_predictor)
self.rate_predictor.update()
if DEBUG:
print >>sys.stderr,"ProxyDownloader: loading complete"
# notify the proxydownloader finished loading
from Tribler.Core.Session import Session
session = Session.get_instance()
session.uch.notify(NTFY_PROXYDOWNLOADER, NTFY_STARTED, None, self.infohash)
# get proxy_permids from the ProxyPeerManager and call the Doe to send Relay Requests
self.check_proxy_supply()
self.scheduler(self.dlr_periodic_check, PROXY_DLDR_PERIODIC_CHECK)
def proxy_connection_closed(self, proxy_permid):
""" Handles the connection closed event.
Called by ProxyPeerManager.ol_connection_created_or_closed()
@param proxy_permid: the permid of the proxy node for which the connection was closed
"""
if DEBUG:
print >>sys.stderr, "ProxyDownloader: proxy_connection_closed for", show_permid_short(proxy_permid)
if proxy_permid in self.doe.confirmed_proxies:
if DEBUG:
print >> sys.stderr, "ProxyDownloader: ol_connection_created_or_closed: confirmed proxy ol connection closed"
dl_object = None
for download in self.downloads:
if download.proxy_permid == proxy_permid:
dl_object = download
# 08/06/11 boudewijn: this may not always find a dl_object
if dl_object:
cancel_requests = {} #key=piece number, value=proxy permid
for piece_index,time_of_request in dl_object.active_requests.items():
dl_object.bad_performance_counter += 1
cancel_requests[piece_index] = dl_object.proxy_permid
# Cancel all requests that did not arrive yet
if cancel_requests:
for index in cancel_requests:
try:
dl_object._release_requests(index)
except:
pass
try:
del(dl_object.active_requests[index])
except:
pass
try:
del(self.allocated_pieces[index])
except:
pass
self.doe.remove_unreachable_proxy(proxy_permid)
if proxy_permid in self.doe.asked_proxies:
if DEBUG:
print >> sys.stderr, "ProxyDownloader: ol_connection_created_or_closed: asked proxy ol connection closed"
self.doe.remove_unreachable_proxy(proxy_permid)
def dlr_periodic_check(self):
""" Calls the check_outstanding_requests function and then reschedules itself
"""
if self.dlinstance.get_proxyservice_role() != PROXYSERVICE_ROLE_DOE:
return
self.check_outstanding_requests(self.downloads)
self.check_proxy_supply()
self.scheduler(self.dlr_periodic_check, PROXY_DLDR_PERIODIC_CHECK)
def check_proxy_supply(self):
""" Get proxy_permids from the ProxyPeerManager and call the Doe to send Relay Requests
"""
if self.dlinstance.get_proxyservice_role() != PROXYSERVICE_ROLE_DOE:
return
if len(self.doe.confirmed_proxies) >= MAX_NO_PROXIES:
return
proxy_list = []
from Tribler.Core.Overlay.OverlayApps import OverlayApps
overlay_apps = OverlayApps.getInstance()
for i in range(MAX_NO_PROXIES-len(self.doe.confirmed_proxies)):
proxy_permid = overlay_apps.proxy_peer_manager.request_proxy(self)
if proxy_permid is not None:
proxy_list.append(proxy_permid)
if len(proxy_list) != 0:
self.doe.send_relay_request(proxy_list)
def check_outstanding_requests(self, downloads):
now = time.time()
for download in downloads:
cancel_requests = {} #key=piece number, value=proxy permid
download_rate = download.get_short_term_rate()
for piece_index,time_of_request in download.active_requests.items():
# each request must be allowed at least some minimal time to be handled
if now < time_of_request + EXPIRE_TIME:
continue
if download_rate == 0:
# we have not received anything in the last min_delay seconds
if DEBUG:
print >>sys.stderr, "ProxyDownloader: download_rate is 0 for this connection. Canceling all piece requests"
download.bad_performance_counter += 1
cancel_requests[piece_index] = download.proxy_permid
# Cancel all requests that did not arrive yet
if cancel_requests:
for index in cancel_requests:
try:
download._release_requests(index)
except:
pass
try:
del(download.active_requests[index])
except:
pass
try:
del(self.allocated_pieces[index])
except:
pass
self.doe.send_cancel_downloading_piece(index, cancel_requests[index])
def make_download(self, proxy_permid):
""" Ads a new data channel with a proxy node.
Used for the doe component.
@param permid: The permid of the proxy
"""
self.downloads.append(SingleDownload(self, proxy_permid))
return self.downloads[-1]
def get_downloads(self):
""" Returns the list of proxy data channels (downloads)
"""
if self.finflag.isSet():
return []
return self.downloads
def cancel_piece_download(self, pieces):
""" TODO:
@param pieces: TODO:
"""
for d in self.downloads:
if d.active and d.index in pieces:
d.cancelled = True
# Diego : wrap each single http download
def start_video_support(self, level = 0.0, sleep_time = None):
""" TODO:
@param level: TODO:
@param sleep_time: TODO:
"""
for d in self.downloads:
d.start_video_support(level, sleep_time)
self.video_support_enabled = True
def stop_video_support(self):
""" TODO:
"""
for d in self.downloads:
d.stop_video_support()
self.video_support_enabled = False
def is_video_support_enabled(self):
""" TODO:
"""
return self.video_support_enabled
def is_slow_start(self):
""" TODO:
"""
for d in self.downloads:
if d.is_slow_start():
return True
return False
|
lgpl-2.1
| -7,412,642,661,146,545,000 | 35.759434 | 163 | 0.582831 | false |
wdv4758h/ZipPy
|
lib-python/3/test/test_types.py
|
1
|
22083
|
# Python test set -- part 6, built-in types
from test.support import run_unittest, run_with_locale, impl_detail
import unittest
import sys
import locale
class TypesTests(unittest.TestCase):
def test_truth_values(self):
if None: self.fail('None is true instead of false')
if 0: self.fail('0 is true instead of false')
if 0.0: self.fail('0.0 is true instead of false')
if '': self.fail('\'\' is true instead of false')
if not 1: self.fail('1 is false instead of true')
if not 1.0: self.fail('1.0 is false instead of true')
if not 'x': self.fail('\'x\' is false instead of true')
if not {'x': 1}: self.fail('{\'x\': 1} is false instead of true')
def f(): pass
class C: pass
x = C()
if not f: self.fail('f is false instead of true')
if not C: self.fail('C is false instead of true')
if not sys: self.fail('sys is false instead of true')
if not x: self.fail('x is false instead of true')
def test_boolean_ops(self):
if 0 or 0: self.fail('0 or 0 is true instead of false')
if 1 and 1: pass
else: self.fail('1 and 1 is false instead of true')
if not 1: self.fail('not 1 is true instead of false')
def test_comparisons(self):
if 0 < 1 <= 1 == 1 >= 1 > 0 != 1: pass
else: self.fail('int comparisons failed')
if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 != 1.0: pass
else: self.fail('float comparisons failed')
if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass
else: self.fail('string comparisons failed')
if None is None: pass
else: self.fail('identity test failed')
def test_float_constructor(self):
self.assertRaises(ValueError, float, '')
self.assertRaises(ValueError, float, '5\0')
def test_zero_division(self):
try: 5.0 / 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 / 0.0 didn't raise ZeroDivisionError")
try: 5.0 // 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 // 0.0 didn't raise ZeroDivisionError")
try: 5.0 % 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 % 0.0 didn't raise ZeroDivisionError")
try: 5 / 0
except ZeroDivisionError: pass
else: self.fail("5 / 0 didn't raise ZeroDivisionError")
try: 5 // 0
except ZeroDivisionError: pass
else: self.fail("5 // 0 didn't raise ZeroDivisionError")
try: 5 % 0
except ZeroDivisionError: pass
else: self.fail("5 % 0 didn't raise ZeroDivisionError")
def test_numeric_types(self):
if 0 != 0.0 or 1 != 1.0 or -1 != -1.0:
self.fail('int/float value not equal')
# calling built-in types without argument must return 0
if int() != 0: self.fail('int() does not return 0')
if float() != 0.0: self.fail('float() does not return 0.0')
if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass
else: self.fail('int() does not round properly')
if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass
else: self.fail('float() does not work properly')
def test_float_to_string(self):
def test(f, result):
self.assertEqual(f.__format__('e'), result)
self.assertEqual('%e' % f, result)
# test all 2 digit exponents, both with __format__ and with
# '%' formatting
for i in range(-99, 100):
test(float('1.5e'+str(i)), '1.500000e{0:+03d}'.format(i))
# test some 3 digit exponents
self.assertEqual(1.5e100.__format__('e'), '1.500000e+100')
self.assertEqual('%e' % 1.5e100, '1.500000e+100')
self.assertEqual(1.5e101.__format__('e'), '1.500000e+101')
self.assertEqual('%e' % 1.5e101, '1.500000e+101')
self.assertEqual(1.5e-100.__format__('e'), '1.500000e-100')
self.assertEqual('%e' % 1.5e-100, '1.500000e-100')
self.assertEqual(1.5e-101.__format__('e'), '1.500000e-101')
self.assertEqual('%e' % 1.5e-101, '1.500000e-101')
self.assertEqual('%g' % 1.0, '1')
self.assertEqual('%#g' % 1.0, '1.00000')
def test_normal_integers(self):
# Ensure the first 256 integers are shared
a = 256
b = 128*2
if a is not b: self.fail('256 is not shared')
if 12 + 24 != 36: self.fail('int op')
if 12 + (-24) != -12: self.fail('int op')
if (-12) + 24 != 12: self.fail('int op')
if (-12) + (-24) != -36: self.fail('int op')
if not 12 < 24: self.fail('int op')
if not -24 < -12: self.fail('int op')
# Test for a particular bug in integer multiply
xsize, ysize, zsize = 238, 356, 4
if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912):
self.fail('int mul commutativity')
# And another.
m = -sys.maxsize - 1
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor
prod = divisor * j
if prod != m:
self.fail("%r * %r == %r != %r" % (divisor, j, prod, m))
if type(prod) is not int:
self.fail("expected type(prod) to be int, not %r" %
type(prod))
# Check for unified integral type
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor - 1
prod = divisor * j
if type(prod) is not int:
self.fail("expected type(%r) to be int, not %r" %
(prod, type(prod)))
# Check for unified integral type
m = sys.maxsize
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor + 1
prod = divisor * j
if type(prod) is not int:
self.fail("expected type(%r) to be int, not %r" %
(prod, type(prod)))
x = sys.maxsize
self.assertIsInstance(x + 1, int,
"(sys.maxsize + 1) should have returned int")
self.assertIsInstance(-x - 1, int,
"(-sys.maxsize - 1) should have returned int")
self.assertIsInstance(-x - 2, int,
"(-sys.maxsize - 2) should have returned int")
try: 5 << -5
except ValueError: pass
else: self.fail('int negative shift <<')
try: 5 >> -5
except ValueError: pass
else: self.fail('int negative shift >>')
def test_floats(self):
if 12.0 + 24.0 != 36.0: self.fail('float op')
if 12.0 + (-24.0) != -12.0: self.fail('float op')
if (-12.0) + 24.0 != 12.0: self.fail('float op')
if (-12.0) + (-24.0) != -36.0: self.fail('float op')
if not 12.0 < 24.0: self.fail('float op')
if not -24.0 < -12.0: self.fail('float op')
def test_strings(self):
if len('') != 0: self.fail('len(\'\')')
if len('a') != 1: self.fail('len(\'a\')')
if len('abcdef') != 6: self.fail('len(\'abcdef\')')
if 'xyz' + 'abcde' != 'xyzabcde': self.fail('string concatenation')
if 'xyz'*3 != 'xyzxyzxyz': self.fail('string repetition *3')
if 0*'abcde' != '': self.fail('string repetition 0*')
if min('abc') != 'a' or max('abc') != 'c': self.fail('min/max string')
if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass
else: self.fail('in/not in string')
x = 'x'*103
if '%s!'%x != x+'!': self.fail('nasty string formatting bug')
#extended slices for strings
a = '0123456789'
self.assertEqual(a[::], a)
self.assertEqual(a[::2], '02468')
self.assertEqual(a[1::2], '13579')
self.assertEqual(a[::-1],'9876543210')
self.assertEqual(a[::-2], '97531')
self.assertEqual(a[3::-2], '31')
self.assertEqual(a[-100:100:], a)
self.assertEqual(a[100:-100:-1], a[::-1])
self.assertEqual(a[-100:100:2], '02468')
def test_type_function(self):
self.assertRaises(TypeError, type, 1, 2)
self.assertRaises(TypeError, type, 1, 2, 3, 4)
def test_int__format__(self):
def test(i, format_spec, result):
# just make sure we have the unified type for integers
assert type(i) == int
assert type(format_spec) == str
self.assertEqual(i.__format__(format_spec), result)
test(123456789, 'd', '123456789')
test(123456789, 'd', '123456789')
test(1, 'c', '\01')
# sign and aligning are interdependent
test(1, "-", '1')
test(-1, "-", '-1')
test(1, "-3", ' 1')
test(-1, "-3", ' -1')
test(1, "+3", ' +1')
test(-1, "+3", ' -1')
test(1, " 3", ' 1')
test(-1, " 3", ' -1')
test(1, " ", ' 1')
test(-1, " ", '-1')
# hex
test(3, "x", "3")
test(3, "X", "3")
test(1234, "x", "4d2")
test(-1234, "x", "-4d2")
test(1234, "8x", " 4d2")
test(-1234, "8x", " -4d2")
test(1234, "x", "4d2")
test(-1234, "x", "-4d2")
test(-3, "x", "-3")
test(-3, "X", "-3")
test(int('be', 16), "x", "be")
test(int('be', 16), "X", "BE")
test(-int('be', 16), "x", "-be")
test(-int('be', 16), "X", "-BE")
# octal
test(3, "o", "3")
test(-3, "o", "-3")
test(65, "o", "101")
test(-65, "o", "-101")
test(1234, "o", "2322")
test(-1234, "o", "-2322")
test(1234, "-o", "2322")
test(-1234, "-o", "-2322")
test(1234, " o", " 2322")
test(-1234, " o", "-2322")
test(1234, "+o", "+2322")
test(-1234, "+o", "-2322")
# binary
test(3, "b", "11")
test(-3, "b", "-11")
test(1234, "b", "10011010010")
test(-1234, "b", "-10011010010")
test(1234, "-b", "10011010010")
test(-1234, "-b", "-10011010010")
test(1234, " b", " 10011010010")
test(-1234, " b", "-10011010010")
test(1234, "+b", "+10011010010")
test(-1234, "+b", "-10011010010")
# alternate (#) formatting
test(0, "#b", '0b0')
test(0, "-#b", '0b0')
test(1, "-#b", '0b1')
test(-1, "-#b", '-0b1')
test(-1, "-#5b", ' -0b1')
test(1, "+#5b", ' +0b1')
test(100, "+#b", '+0b1100100')
test(100, "#012b", '0b0001100100')
test(-100, "#012b", '-0b001100100')
test(0, "#o", '0o0')
test(0, "-#o", '0o0')
test(1, "-#o", '0o1')
test(-1, "-#o", '-0o1')
test(-1, "-#5o", ' -0o1')
test(1, "+#5o", ' +0o1')
test(100, "+#o", '+0o144')
test(100, "#012o", '0o0000000144')
test(-100, "#012o", '-0o000000144')
test(0, "#x", '0x0')
test(0, "-#x", '0x0')
test(1, "-#x", '0x1')
test(-1, "-#x", '-0x1')
test(-1, "-#5x", ' -0x1')
test(1, "+#5x", ' +0x1')
test(100, "+#x", '+0x64')
test(100, "#012x", '0x0000000064')
test(-100, "#012x", '-0x000000064')
test(123456, "#012x", '0x000001e240')
test(-123456, "#012x", '-0x00001e240')
test(0, "#X", '0X0')
test(0, "-#X", '0X0')
test(1, "-#X", '0X1')
test(-1, "-#X", '-0X1')
test(-1, "-#5X", ' -0X1')
test(1, "+#5X", ' +0X1')
test(100, "+#X", '+0X64')
test(100, "#012X", '0X0000000064')
test(-100, "#012X", '-0X000000064')
test(123456, "#012X", '0X000001E240')
test(-123456, "#012X", '-0X00001E240')
test(123, ',', '123')
test(-123, ',', '-123')
test(1234, ',', '1,234')
test(-1234, ',', '-1,234')
test(123456, ',', '123,456')
test(-123456, ',', '-123,456')
test(1234567, ',', '1,234,567')
test(-1234567, ',', '-1,234,567')
# issue 5782, commas with no specifier type
test(1234, '010,', '00,001,234')
# Unified type for integers
test(10**100, 'd', '1' + '0' * 100)
test(10**100+100, 'd', '1' + '0' * 97 + '100')
# make sure these are errors
# precision disallowed
self.assertRaises(ValueError, 3 .__format__, "1.3")
# sign not allowed with 'c'
self.assertRaises(ValueError, 3 .__format__, "+c")
# format spec must be string
self.assertRaises(TypeError, 3 .__format__, None)
self.assertRaises(TypeError, 3 .__format__, 0)
# can't have ',' with 'n'
self.assertRaises(ValueError, 3 .__format__, ",n")
# can't have ',' with 'c'
self.assertRaises(ValueError, 3 .__format__, ",c")
# ensure that only int and float type specifiers work
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'bcdoxXeEfFgGn%':
self.assertRaises(ValueError, 0 .__format__, format_spec)
self.assertRaises(ValueError, 1 .__format__, format_spec)
self.assertRaises(ValueError, (-1) .__format__, format_spec)
# ensure that float type specifiers work; format converts
# the int to a float
for format_spec in 'eEfFgG%':
for value in [0, 1, -1, 100, -100, 1234567890, -1234567890]:
self.assertEqual(value.__format__(format_spec),
float(value).__format__(format_spec))
# Issue 6902
test(123456, "0<20", '12345600000000000000')
test(123456, "1<20", '12345611111111111111')
test(123456, "*<20", '123456**************')
test(123456, "0>20", '00000000000000123456')
test(123456, "1>20", '11111111111111123456')
test(123456, "*>20", '**************123456')
test(123456, "0=20", '00000000000000123456')
test(123456, "1=20", '11111111111111123456')
test(123456, "*=20", '**************123456')
@run_with_locale('LC_NUMERIC', 'en_US.UTF8')
def test_float__format__locale(self):
# test locale support for __format__ code 'n'
for i in range(-10, 10):
x = 1234567890.0 * (10.0 ** i)
self.assertEqual(locale.format('%g', x, grouping=True), format(x, 'n'))
self.assertEqual(locale.format('%.10g', x, grouping=True), format(x, '.10n'))
@run_with_locale('LC_NUMERIC', 'en_US.UTF8')
def test_int__format__locale(self):
# test locale support for __format__ code 'n' for integers
x = 123456789012345678901234567890
for i in range(0, 30):
self.assertEqual(locale.format('%d', x, grouping=True), format(x, 'n'))
# move to the next integer to test
x = x // 10
rfmt = ">20n"
lfmt = "<20n"
cfmt = "^20n"
for x in (1234, 12345, 123456, 1234567, 12345678, 123456789, 1234567890, 12345678900):
self.assertEqual(len(format(0, rfmt)), len(format(x, rfmt)))
self.assertEqual(len(format(0, lfmt)), len(format(x, lfmt)))
self.assertEqual(len(format(0, cfmt)), len(format(x, cfmt)))
def test_float__format__(self):
def test(f, format_spec, result):
self.assertEqual(f.__format__(format_spec), result)
self.assertEqual(format(f, format_spec), result)
test(0.0, 'f', '0.000000')
# the default is 'g', except for empty format spec
test(0.0, '', '0.0')
test(0.01, '', '0.01')
test(0.01, 'g', '0.01')
# test for issue 3411
test(1.23, '1', '1.23')
test(-1.23, '1', '-1.23')
test(1.23, '1g', '1.23')
test(-1.23, '1g', '-1.23')
test( 1.0, ' g', ' 1')
test(-1.0, ' g', '-1')
test( 1.0, '+g', '+1')
test(-1.0, '+g', '-1')
test(1.1234e200, 'g', '1.1234e+200')
test(1.1234e200, 'G', '1.1234E+200')
test(1.0, 'f', '1.000000')
test(-1.0, 'f', '-1.000000')
test( 1.0, ' f', ' 1.000000')
test(-1.0, ' f', '-1.000000')
test( 1.0, '+f', '+1.000000')
test(-1.0, '+f', '-1.000000')
# Python versions <= 3.0 switched from 'f' to 'g' formatting for
# values larger than 1e50. No longer.
f = 1.1234e90
for fmt in 'f', 'F':
# don't do a direct equality check, since on some
# platforms only the first few digits of dtoa
# will be reliable
result = f.__format__(fmt)
self.assertEqual(len(result), 98)
self.assertEqual(result[-7], '.')
self.assertIn(result[:12], ('112340000000', '112339999999'))
f = 1.1234e200
for fmt in 'f', 'F':
result = f.__format__(fmt)
self.assertEqual(len(result), 208)
self.assertEqual(result[-7], '.')
self.assertIn(result[:12], ('112340000000', '112339999999'))
test( 1.0, 'e', '1.000000e+00')
test(-1.0, 'e', '-1.000000e+00')
test( 1.0, 'E', '1.000000E+00')
test(-1.0, 'E', '-1.000000E+00')
test(1.1234e20, 'e', '1.123400e+20')
test(1.1234e20, 'E', '1.123400E+20')
# No format code means use g, but must have a decimal
# and a number after the decimal. This is tricky, because
# a totaly empty format specifier means something else.
# So, just use a sign flag
test(1e200, '+g', '+1e+200')
test(1e200, '+', '+1e+200')
test(1.1e200, '+g', '+1.1e+200')
test(1.1e200, '+', '+1.1e+200')
# 0 padding
test(1234., '010f', '1234.000000')
test(1234., '011f', '1234.000000')
test(1234., '012f', '01234.000000')
test(-1234., '011f', '-1234.000000')
test(-1234., '012f', '-1234.000000')
test(-1234., '013f', '-01234.000000')
test(-1234.12341234, '013f', '-01234.123412')
test(-123456.12341234, '011.2f', '-0123456.12')
# issue 5782, commas with no specifier type
test(1.2, '010,.2', '0,000,001.2')
# 0 padding with commas
test(1234., '011,f', '1,234.000000')
test(1234., '012,f', '1,234.000000')
test(1234., '013,f', '01,234.000000')
test(-1234., '012,f', '-1,234.000000')
test(-1234., '013,f', '-1,234.000000')
test(-1234., '014,f', '-01,234.000000')
test(-12345., '015,f', '-012,345.000000')
test(-123456., '016,f', '-0,123,456.000000')
test(-123456., '017,f', '-0,123,456.000000')
test(-123456.12341234, '017,f', '-0,123,456.123412')
test(-123456.12341234, '013,.2f', '-0,123,456.12')
# % formatting
test(-1.0, '%', '-100.000000%')
# format spec must be string
self.assertRaises(TypeError, 3.0.__format__, None)
self.assertRaises(TypeError, 3.0.__format__, 0)
# other format specifiers shouldn't work on floats,
# in particular int specifiers
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'eEfFgGn%':
self.assertRaises(ValueError, format, 0.0, format_spec)
self.assertRaises(ValueError, format, 1.0, format_spec)
self.assertRaises(ValueError, format, -1.0, format_spec)
self.assertRaises(ValueError, format, 1e100, format_spec)
self.assertRaises(ValueError, format, -1e100, format_spec)
self.assertRaises(ValueError, format, 1e-100, format_spec)
self.assertRaises(ValueError, format, -1e-100, format_spec)
# Alternate float formatting
test(1.0, '.0e', '1e+00')
test(1.0, '#.0e', '1.e+00')
test(1.0, '.0f', '1')
test(1.0, '#.0f', '1.')
test(1.1, 'g', '1.1')
test(1.1, '#g', '1.10000')
test(1.0, '.0%', '100%')
test(1.0, '#.0%', '100.%')
# Issue 7094: Alternate formatting (specified by #)
test(1.0, '0e', '1.000000e+00')
test(1.0, '#0e', '1.000000e+00')
test(1.0, '0f', '1.000000' )
test(1.0, '#0f', '1.000000')
test(1.0, '.1e', '1.0e+00')
test(1.0, '#.1e', '1.0e+00')
test(1.0, '.1f', '1.0')
test(1.0, '#.1f', '1.0')
test(1.0, '.1%', '100.0%')
test(1.0, '#.1%', '100.0%')
# Issue 6902
test(12345.6, "0<20", '12345.60000000000000')
test(12345.6, "1<20", '12345.61111111111111')
test(12345.6, "*<20", '12345.6*************')
test(12345.6, "0>20", '000000000000012345.6')
test(12345.6, "1>20", '111111111111112345.6')
test(12345.6, "*>20", '*************12345.6')
test(12345.6, "0=20", '000000000000012345.6')
test(12345.6, "1=20", '111111111111112345.6')
test(12345.6, "*=20", '*************12345.6')
def test_format_spec_errors(self):
# int, float, and string all share the same format spec
# mini-language parser.
# Check that we can't ask for too many digits. This is
# probably a CPython specific test. It tries to put the width
# into a C long.
self.assertRaises(ValueError, format, 0, '1'*10000 + 'd')
# Similar with the precision.
self.assertRaises(ValueError, format, 0, '.' + '1'*10000 + 'd')
# And may as well test both.
self.assertRaises(ValueError, format, 0, '1'*1000 + '.' + '1'*10000 + 'd')
# Make sure commas aren't allowed with various type codes
for code in 'xXobns':
self.assertRaises(ValueError, format, 0, ',' + code)
@impl_detail("PyPy has no object.__basicsize__", pypy=False)
def test_internal_sizes(self):
self.assertGreater(object.__basicsize__, 0)
self.assertGreater(tuple.__itemsize__, 0)
def test_main():
run_unittest(TypesTests)
if __name__ == '__main__':
test_main()
|
bsd-3-clause
| 7,613,874,855,356,535,000 | 37.272097 | 94 | 0.502332 | false |
espenak/enkel
|
enkel/exml/formgen.py
|
1
|
4443
|
# This file is part of the Enkel web programming library.
#
# Copyright (C) 2007 Espen Angell Kristiansen (espen@wsgi.net)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from enkel.model.field.base import *
from enkel.model.formgen import Form
from enkel.model.ds import One, Many, DatasourceField
from enkel.xmlutils.writer import XmlWriter
from info import XMLNS_FORM
class Form(Form):
""" EXML Form generator.
@ivar xmlns: The xml namespace used on the root tag of the
result xml node. Defaults to L{info.XMLNS_FORM}.
If None, no xmlns is used.
@ivar pretty: Make readable xml. Inserts extra whitespace,
which might lead to problems with certain (buggy)
parsers.
"""
xmlns = XMLNS_FORM
pretty = False
def start_form(self):
kw = {}
if self.xmlns:
kw["xmlns"] = self.xmlns
if self.id:
kw["id"] = self.id
self.w.start_element("form",
action=self.action, method=self.method,
submit_label=self.submit_label, **kw)
def start_group(self, title):
self.w.start_element("group", title=title)
def handle_field(self, prefix, fieldname, field, value,
uvalue, meta, display):
if display.get("hidden"):
if isinstance(field, Many):
values = uvalue
else:
values = [uvalue]
for v in values:
self.w.start_element("hidden", id=prefix+fieldname)
self.w.text_node(v)
self.w.end_element()
return
readonly = display.get("readonly")
if readonly:
if isinstance(field, DatasourceField):
raise ValueError(
"%s: DatasourceField cannot be readonly." % fieldname)
name = None
for ttype, name in (
(String, "string"),
(Int, "int"),
(Long, "long"),
(Float, "float"),
(Text, "text"),
(Date, "date"),
(DateTime, "datetime"),
(Time, "time"),
(Many, "many"),
(One, "one"),
(Bool, "bool")
):
if isinstance(field, ttype):
break
if not name:
raise ValueError(
"""All form fields must be instances of one of the base field
types defined in enkel.model.field.base. Or one of the two datasource
fields defined in enkel.model.ds. """)
elif name == "string" and \
field.maxlength > field.LONG_STRING:
name = "longstring"
elif readonly:
name = "readonly"
if field.required:
required = "yes"
else:
required = "no"
self.w.start_element(name, # start field element
id = prefix + fieldname,
typehint = field.__class__.__name__,
required = required)
self.w.start_element("label")
self.w.text_node(meta.get("label", fieldname))
self.w.end_element()
self.w.start_element("tooltip")
self.w.text_node(meta.get("shorthelp", ""))
self.w.end_element()
if isinstance(field, One):
datasource = field.datasource
self.w.start_element("onevalue")
for val, label in datasource.ds_iter_unicode():
if val == uvalue:
name = "sel_item"
else:
name = "item"
self.w.start_element(name, value=val)
self.w.text_node(label)
self.w.end_element()
self.w.end_element()
elif isinstance(field, Many):
datasource = field.datasource
self.w.start_element("manyvalue")
for val, label in datasource.ds_iter_unicode():
if val in uvalue:
name = "sel_item"
else:
name = "item"
self.w.start_element(name, value=val)
self.w.text_node(label)
self.w.end_element()
self.w.end_element()
else:
self.w.start_element("value")
self.w.text_node(uvalue)
self.w.end_element()
error = display.get("error")
if error:
self.w.start_element("error")
self.w.text_node(error)
self.w.end_element()
self.w.end_element() # end field element
def end_group(self, title):
self.w.end_element()
def end_form(self):
self.w.end_element()
def create(self):
self.w = XmlWriter(pretty=self.pretty)
super(Form, self).create()
return self.w.create()
|
gpl-2.0
| -2,090,161,680,316,545,300 | 25.446429 | 81 | 0.674994 | false |
rossella/neutron
|
quantum/openstack/common/jsonutils.py
|
1
|
5314
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import inspect
import itertools
import json
import xmlrpclib
from quantum.openstack.common import timeutils
def to_primitive(value, convert_instances=False, level=0):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
for test in nasty:
if test(value):
return unicode(value)
# value of itertools.count doesn't get caught by inspects
# above and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return unicode(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > 3:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if isinstance(value, (list, tuple)):
o = []
for v in value:
o.append(to_primitive(v, convert_instances=convert_instances,
level=level))
return o
elif isinstance(value, dict):
o = {}
for k, v in value.iteritems():
o[k] = to_primitive(v, convert_instances=convert_instances,
level=level)
return o
elif isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif hasattr(value, 'iteritems'):
return to_primitive(dict(value.iteritems()),
convert_instances=convert_instances,
level=level + 1)
elif hasattr(value, '__iter__'):
return to_primitive(list(value),
convert_instances=convert_instances,
level=level)
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return to_primitive(value.__dict__,
convert_instances=convert_instances,
level=level + 1)
else:
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return unicode(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
|
apache-2.0
| 3,052,417,602,733,729,300 | 34.905405 | 78 | 0.625894 | false |
adybbroe/atrain_match
|
atrain_match/reshaped_files_scr/plot_ctth_boxplots_mlvl2_temperature_pressure_height.py
|
1
|
16002
|
"""Read all matched data and make some plotting
"""
import os
import re
from glob import glob
import numpy as np
from matchobject_io import (readCaliopImagerMatchObj,
CalipsoImagerTrackObject)
from plot_kuipers_on_area_util import (PerformancePlottingObject,
ppsMatch_Imager_CalipsoObject)
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams.update({'font.size': 16})
from utils.get_flag_info import get_calipso_clouds_of_type_i
from utils.get_flag_info import (get_semi_opaque_info_pps2014,
get_calipso_high_clouds,
get_calipso_medium_clouds,
get_calipso_low_clouds)
from my_dir import ADIR
def make_boxplot(caObj, name, month="xx", modis_lvl2=False, use_m2_pix=True):
low_clouds = get_calipso_low_clouds(caObj)
high_clouds = get_calipso_high_clouds(caObj)
medium_clouds = get_calipso_medium_clouds(caObj)
height_c = 1000*caObj.calipso.all_arrays['layer_top_altitude'][:,0]
cloud_elevation = 1000*caObj.calipso.all_arrays['layer_top_altitude'][:,0]-caObj.calipso.all_arrays['elevation']
if modis_lvl2:
height_imager = caObj.modis.all_arrays['height']
else:
height_imager = caObj.imager.all_arrays['imager_ctth_m_above_seasurface']
if height_imager is None:
height_imager = caObj.imager.all_arrays['ctth_height']+caObj.calipso.all_arrays['elevation']
use = np.logical_and(height_imager >-1,
height_c>=0)
use = np.logical_and(height_imager <45000,use)
USE_ONLY_PIXELS_WHERE_PPS_AND_MODIS_C6_HAVE_VALUES=use_m2_pix
if USE_ONLY_PIXELS_WHERE_PPS_AND_MODIS_C6_HAVE_VALUES:
height_mlvl2 = caObj.modis.all_arrays['height']
height_pps = caObj.imager.all_arrays['imager_ctth_m_above_seasurface']
use = np.logical_and(use, height_mlvl2>-1)
use = np.logical_and(use, height_mlvl2<45000)
use = np.logical_and(use, height_pps>-1)
use = np.logical_and(use, height_pps<45000)
thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.30,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
very_thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.10,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
thin_top = np.logical_and(caObj.calipso.all_arrays['number_layers_found']>1, thin)
thin_1_lay = np.logical_and(caObj.calipso.all_arrays['number_layers_found']==1, thin)
low = np.logical_and(low_clouds,use)
medium = np.logical_and(medium_clouds,use)
high = np.logical_and(high_clouds,use)
c_all = np.logical_or(high,np.logical_or(low,medium))
high_very_thin = np.logical_and(high, very_thin)
high_thin = np.logical_and(high, np.logical_and(~very_thin,thin))
high_thick = np.logical_and(high, ~thin)
#print "thin, thick high", np.sum(high_thin), np.sum(high_thick)
bias = height_imager - height_c
abias = np.abs(bias)
#abias[abias>2000]=2000
print name.ljust(30, " "), "%3.1f"%(np.mean(abias[c_all])), "%3.1f"%(np.mean(abias[low])),"%3.1f"%(np.mean(abias[medium])),"%3.1f"%(np.mean(abias[high]))
c_all = np.logical_or(np.logical_and(~very_thin,high),np.logical_or(low,medium))
number_of = np.sum(c_all)
MAE = np.mean(abias[c_all])
#print name.ljust(30, " "), "%3.1f"%(np.sum(abias[c_all]<250)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<2000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<3000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<4000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<5000)*100.0/number_of)
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
fig = plt.figure(figsize = (6,9))
ax = fig.add_subplot(111)
plt.xticks(rotation=70)
ax.fill_between(np.arange(0,8),-500,500, facecolor='green', alpha=0.6)
ax.fill_between(np.arange(0,8),-1000,1000, facecolor='green', alpha=0.4)
ax.fill_between(np.arange(0,8),-1500,1500, facecolor='green', alpha=0.2)
ax.fill_between(np.arange(0,8),2000,15000, facecolor='red', alpha=0.2)
ax.fill_between(np.arange(0,8),-2000,-15000, facecolor='red', alpha=0.2)
for y_val in [-5,-4,-3,-2,2,3,4,5]:
plt.plot(np.arange(0,8), y_val*1000 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), -10*1000 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), 0 + 0*np.arange(0,8),':k', alpha=0.4)
bplot = ax.boxplot([bias[low],bias[medium],bias[high],bias[high_thick],bias[high_thin],bias[high_very_thin]],whis=[5, 95],sym='',
labels=["low","medium","high-all","high-thick\n od>0.4","high-thin \n 0.1<od<0.4","high-vthin\n od<0.1"],showmeans=True, patch_artist=True)
ax.set_ylim(-14000,8000)
for box in bplot['boxes']:
box.set_facecolor('0.9')
plt.title("%s MAE = %3.0f"%(name,MAE))
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/CTTH_BOX/ctth_box_plot_%s_5_95_filt.png"%(name))
elevation_zero = np.logical_and(use,caObj.calipso.all_arrays['elevation']>5000)
low_clouds = height_c<2500
medium_clouds = np.logical_and(height_c>=2500, height_c<=5000)
high_clouds = height_c>5000
low = np.logical_and(low_clouds,use)
medium = np.logical_and(medium_clouds,use)
high = np.logical_and(high_clouds,use)
fig = plt.figure(figsize = (6,9))
ax = fig.add_subplot(111)
plt.xticks(rotation=50)
ax.fill_between(np.arange(0,8),-500,500, facecolor='green', alpha=0.6)
ax.fill_between(np.arange(0,8),-1000,1000, facecolor='green', alpha=0.4)
ax.fill_between(np.arange(0,8),-1500,1500, facecolor='green', alpha=0.2)
ax.fill_between(np.arange(0,8),2000,15000, facecolor='red', alpha=0.2)
ax.fill_between(np.arange(0,8),-2000,-15000, facecolor='red', alpha=0.2)
for y_val in [-5,-4,-3,-2,2,3,4,5]:
plt.plot(np.arange(0,8), y_val*1000 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), -10*1000 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), 0 + 0*np.arange(0,8),':k', alpha=0.4)
bplot = ax.boxplot([bias[low],bias[medium],bias[high], bias[elevation_zero]],whis=[5, 95],sym='',
labels=["low <2.5km","medium","high>5km", "ground>5km"],
showmeans=True, patch_artist=True)
ax.set_ylim(-8000,8000)
for box in bplot['boxes']:
box.set_facecolor('0.9')
plt.title("Calipso %s \nHeight bias comparison MAE= %3.0f"%(name, MAE))
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/CTTH_BOX/ctth_box_plot_hkm_%s_5_95_filt.png"%(name))
def make_boxplot_temperature(caObj, name, modis_lvl2=False):
low_clouds = get_calipso_low_clouds(caObj)
high_clouds = get_calipso_high_clouds(caObj)
medium_clouds = get_calipso_medium_clouds(caObj)
temp_c = caObj.calipso.all_arrays['layer_top_temperature'][:,0] +273.15
if modis_lvl2:
temp_pps = caObj.modis.all_arrays['temperature']
else:
temp_pps = caObj.imager.all_arrays['ctth_temperature']
if modis_lvl2:
height_pps = caObj.modis.all_arrays['height']
else:
height_pps = caObj.imager.all_arrays['ctth_height']
thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.30,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
very_thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.10,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
thin_top = np.logical_and(caObj.calipso.all_arrays['number_layers_found']>1, thin)
thin_1_lay = np.logical_and(caObj.calipso.all_arrays['number_layers_found']==1, thin)
use = np.logical_and(temp_pps >100,
caObj.calipso.all_arrays['layer_top_altitude'][:,0]>=0)
use = np.logical_and(height_pps <45000,use)
low = np.logical_and(low_clouds,use)
medium = np.logical_and(medium_clouds,use)
high = np.logical_and(high_clouds,use)
c_all = np.logical_or(high,np.logical_or(low,medium))
high_very_thin = np.logical_and(high, very_thin)
high_thin = np.logical_and(high, np.logical_and(~very_thin,thin))
high_thick = np.logical_and(high, ~thin)
#print "thin, thick high", np.sum(high_thin), np.sum(high_thick)
bias = temp_pps - temp_c
abias = np.abs(bias)
#abias[abias>2000]=2000
print name.ljust(30, " "), "%3.1f"%(np.mean(abias[c_all])), "%3.1f"%(np.mean(abias[low])),"%3.1f"%(np.mean(abias[medium])),"%3.1f"%(np.mean(abias[high]))
c_all = np.logical_or(np.logical_and(~very_thin,high),np.logical_or(low,medium))
number_of = np.sum(c_all)
#print name.ljust(30, " "), "%3.1f"%(np.sum(abias[c_all]<250)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<2000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<3000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<4000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<5000)*100.0/number_of)
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
fig = plt.figure(figsize = (6,9))
ax = fig.add_subplot(111)
plt.xticks(rotation=70)
ax.fill_between(np.arange(0,8),-2.5,2.5, facecolor='green', alpha=0.6)
ax.fill_between(np.arange(0,8),-5,5, facecolor='green', alpha=0.4)
ax.fill_between(np.arange(0,8),-7.5,7.5, facecolor='green', alpha=0.2)
ax.fill_between(np.arange(0,8),10,150, facecolor='red', alpha=0.2)
ax.fill_between(np.arange(0,8),-20,-10, facecolor='red', alpha=0.2)
for y_val in [-5,-4,-3,-2,-1,1,2,3,4,5]:
plt.plot(np.arange(0,8), y_val*20 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), 0 + 0*np.arange(0,8),':k', alpha=0.4)
bplot = ax.boxplot([bias[low],bias[medium],bias[high],bias[high_thick],bias[high_thin],bias[high_very_thin]],whis=[5, 95],sym='',
labels=["low","medium","high-all","high-thick\n od>0.4","high-thin \n 0.1<od<0.4","high-vthin\n od<0.1"],showmeans=True, patch_artist=True)
ax.set_ylim(-20,100)
for box in bplot['boxes']:
box.set_facecolor('0.9')
plt.title(name)
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/CTTH_BOX/ctth_box_plot_temperature_%s_5_95_filt.png"%(name))
def make_boxplot_pressure(caObj, name, modis_lvl2=False):
low_clouds = get_calipso_low_clouds(caObj)
high_clouds = get_calipso_high_clouds(caObj)
medium_clouds = get_calipso_medium_clouds(caObj)
pressure_c = caObj.calipso.all_arrays['layer_top_pressure'][:,0]
if modis_lvl2:
pressure_pps = caObj.modis.all_arrays['pressure']
else:
pressure_pps = 0.01*caObj.imager.all_arrays['ctth_pressure']
if modis_lvl2:
height_pps = caObj.modis.all_arrays['height']
else:
height_pps = caObj.imager.all_arrays['ctth_height']
thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.30,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
very_thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.10,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
thin_top = np.logical_and(caObj.calipso.all_arrays['number_layers_found']>1, thin)
thin_1_lay = np.logical_and(caObj.calipso.all_arrays['number_layers_found']==1, thin)
use = np.logical_and(pressure_pps >0,
caObj.calipso.all_arrays['layer_top_altitude'][:,0]>=0)
low = np.logical_and(low_clouds,use)
medium = np.logical_and(medium_clouds,use)
high = np.logical_and(high_clouds,use)
c_all = np.logical_or(high,np.logical_or(low,medium))
high_very_thin = np.logical_and(high, very_thin)
high_thin = np.logical_and(high, np.logical_and(~very_thin,thin))
high_thick = np.logical_and(high, ~thin)
#print "thin, thick high", np.sum(high_thin), np.sum(high_thick)
bias = pressure_pps - pressure_c
abias = np.abs(bias)
#abias[abias>2000]=2000
print name.ljust(30, " "), "%3.1f"%(np.mean(abias[c_all])), "%3.1f"%(np.mean(abias[low])),"%3.1f"%(np.mean(abias[medium])),"%3.1f"%(np.mean(abias[high]))
c_all = np.logical_or(np.logical_and(~very_thin,high),np.logical_or(low,medium))
number_of = np.sum(c_all)
#print name.ljust(30, " "), "%3.1f"%(np.sum(abias[c_all]<250)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<2000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<3000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<4000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<5000)*100.0/number_of)
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
fig = plt.figure(figsize = (6,9))
ax = fig.add_subplot(111)
plt.xticks(rotation=70)
ax.fill_between(np.arange(0,8),-50,50, facecolor='green', alpha=0.6)
ax.fill_between(np.arange(0,8),-100,100, facecolor='green', alpha=0.4)
ax.fill_between(np.arange(0,8),-150,150, facecolor='green', alpha=0.2)
ax.fill_between(np.arange(0,8),200,2000, facecolor='red', alpha=0.2)
ax.fill_between(np.arange(0,8),-2000,-200, facecolor='red', alpha=0.2)
for y_val in [-6,-4,-2,2,4,6,8,-8]:
plt.plot(np.arange(0,8), y_val*100 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), 0 + 0*np.arange(0,8),':k', alpha=0.4)
bplot = ax.boxplot([bias[low],bias[medium],bias[high],bias[high_thick],bias[high_thin],bias[high_very_thin]],whis=[5, 95],sym='',
labels=["low","medium","high-all","high-thick\n od>0.4","high-thin \n 0.1<od<0.4","high-vthin\n od<0.1"],showmeans=True, patch_artist=True)
ax.set_ylim(-1000,800)
for box in bplot['boxes']:
box.set_facecolor('0.9')
plt.title(name)
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/CTTH_BOX/ctth_box_plot_pressure_%s_5_95_filt.png"%(name))
def investigate_nn_ctth_modis_lvl2():
#november
ROOT_DIR_MODIS_nn_imager = (
ADIR + "/DATA_MISC/reshaped_files/"
"global_modis_14th_created20170324/Reshaped_Files_merged/eos2/1km/2010/%s/*h5")
ROOT_DIR_MODIS_old = (
ADIR + "/DATA_MISC/reshaped_files/"
"global_modis_14th_created20161108/Reshaped_Files/merged/*%s*h5")
for month in [ "06", "09", "01"]:
for ROOT_DIR, name in zip(
[ROOT_DIR_MODIS_nn_imager,
ROOT_DIR_MODIS_nn_imager,
ROOT_DIR_MODIS_old],
["modis_nnIMAGER",
"modis_lvl2_C6",
"modis_CTTHold"]):
name = "%s_%s"%(name, month)
print ROOT_DIR
files = glob(ROOT_DIR%(month))
caObj = CalipsoImagerTrackObject()
for filename in files:
#print filename
caObj += readCaliopImagerMatchObj(filename)
modis_lvl2 = False
if "modis_lvl2" in name:
modis_lvl2 = True
use_m2_pix=True
if "old" in name:
use_m2_pix=False
make_boxplot(caObj, name, month = month, modis_lvl2=modis_lvl2, use_m2_pix=use_m2_pix)
make_boxplot_pressure(caObj, name, modis_lvl2=modis_lvl2)
make_boxplot_temperature(caObj, name, modis_lvl2=modis_lvl2)
if __name__ == "__main__":
investigate_nn_ctth_modis_lvl2()
|
gpl-3.0
| -7,393,755,009,320,824,000 | 55.946619 | 453 | 0.624734 | false |
duythanhphan/qt-creator
|
tests/system/shared/project_explorer.py
|
1
|
15291
|
#############################################################################
##
## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://qt.digia.com/licensing. For further information
## use the contact form at http://qt.digia.com/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
import re;
# this function switches the MainWindow of creator to the specified view
def switchViewTo(view):
# make sure that no tooltip is shown, so move the mouse away and wait until all disappear
mouseMove(waitForObject(':Qt Creator_Core::Internal::MainWindow'), -20, -20)
waitFor("not QToolTip.isVisible()", 15000)
if view < ViewConstants.WELCOME or view > ViewConstants.LAST_AVAILABLE:
return
tabBar = waitForObject("{type='Core::Internal::FancyTabBar' unnamed='1' visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}")
mouseMove(tabBar, 20, 20 + 52 * view)
if waitFor("QToolTip.isVisible()", 10000):
text = str(QToolTip.text())
else:
test.warning("Waiting for ToolTip timed out.")
text = ""
pattern = ViewConstants.getToolTipForViewTab(view)
if re.match(pattern, unicode(text), re.UNICODE):
test.passes("ToolTip verified")
else:
test.warning("ToolTip does not match", "Expected pattern: %s\nGot: %s" % (pattern, text))
mouseClick(waitForObject("{type='Core::Internal::FancyTabBar' unnamed='1' visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}"), 20, 20 + 52 * view, 0, Qt.LeftButton)
# this function is used to make sure that simple building prerequisites are met
# param targetCount specifies how many build targets had been selected (it's important that this one is correct)
# param currentTarget specifies which target should be selected for the next build (zero based index)
# param setReleaseBuild defines whether the current target(s) will be set to a Release or a Debug build
# param disableShadowBuild defines whether to disable shadow build or leave it unchanged (no matter what is defined)
# param setForAll defines whether to set Release or Debug and ShadowBuild option for all targets or only for the currentTarget
def prepareBuildSettings(targetCount, currentTarget, setReleaseBuild=True, disableShadowBuild=True, setForAll=True):
switchViewTo(ViewConstants.PROJECTS)
success = True
for current in range(targetCount):
if setForAll or current == currentTarget:
switchToBuildOrRunSettingsFor(targetCount, current, ProjectSettings.BUILD)
# TODO: Improve selection of Release/Debug version
if setReleaseBuild:
chooseThis = "Release"
else:
chooseThis = "Debug"
editBuildCfg = waitForObject("{leftWidget={text='Edit build configuration:' type='QLabel' "
"unnamed='1' visible='1'} unnamed='1' type='QComboBox' visible='1'}")
selectFromCombo(editBuildCfg, chooseThis)
ensureChecked("{name='shadowBuildCheckBox' type='QCheckBox' visible='1'}", not disableShadowBuild)
# get back to the current target
if currentTarget < 0 or currentTarget >= targetCount:
test.warning("Parameter currentTarget is out of range - will be ignored this time!")
else:
switchToBuildOrRunSettingsFor(targetCount, currentTarget, ProjectSettings.BUILD)
switchViewTo(ViewConstants.EDIT)
return success
# this function switches to the build or the run settings (inside the Projects view)
# if you haven't already switched to the Projects view this will fail and return False
# param currentTarget specifies the target for which to switch into the specified settings (zero based index)
# param targetCount specifies the number of targets currently defined (must be correct!)
# param projectSettings specifies where to switch to (must be one of ProjectSettings.BUILD or ProjectSettings.RUN)
def switchToBuildOrRunSettingsFor(targetCount, currentTarget, projectSettings, isQtQuickUI=False):
try:
targetSel = waitForObject("{type='ProjectExplorer::Internal::TargetSelector' unnamed='1' "
"visible='1' window=':Qt Creator_Core::Internal::MainWindow'}", 5000)
except LookupError:
if isQtQuickUI:
if projectSettings == ProjectSettings.RUN:
mouseClick(waitForObject(":*Qt Creator.DoubleTabWidget_ProjectExplorer::Internal::DoubleTabWidget"), 70, 44, 0, Qt.LeftButton)
return True
else:
test.fatal("Don't know what you're trying to switch to")
return False
# there's only one target defined so use the DoubleTabWidget instead
if projectSettings == ProjectSettings.RUN:
mouseClick(waitForObject(":*Qt Creator.DoubleTabWidget_ProjectExplorer::Internal::DoubleTabWidget"), 170, 44, 0, Qt.LeftButton)
elif projectSettings == ProjectSettings.BUILD:
mouseClick(waitForObject(":*Qt Creator.DoubleTabWidget_ProjectExplorer::Internal::DoubleTabWidget"), 70, 44, 0, Qt.LeftButton)
else:
test.fatal("Don't know what you're trying to switch to")
return False
return True
ADD_BUTTON_WIDTH = 27 # bad... (taken from source)
selectorWidth = (targetSel.width - 3 - 2 * (ADD_BUTTON_WIDTH + 1)) / targetCount - 1
yToClick = targetSel.height * 3 / 5 + 5
if projectSettings == ProjectSettings.RUN:
xToClick = ADD_BUTTON_WIDTH + (selectorWidth + 1) * currentTarget - 2 + selectorWidth / 2 + 15
elif projectSettings == ProjectSettings.BUILD:
xToClick = ADD_BUTTON_WIDTH + (selectorWidth + 1) * currentTarget - 2 + selectorWidth / 2 - 15
else:
test.fatal("Don't know what you're trying to switch to")
return False
mouseClick(targetSel, xToClick, yToClick, 0, Qt.LeftButton)
return True
# this function switches "Run in terminal" on or off in a project's run settings
# param targetCount specifies the number of targets currently defined (must be correct!)
# param currentTarget specifies the target for which to switch into the specified settings (zero based index)
# param runInTerminal specifies if "Run in terminal should be turned on (True) or off (False)
def setRunInTerminal(targetCount, currentTarget, runInTerminal=True):
switchViewTo(ViewConstants.PROJECTS)
switchToBuildOrRunSettingsFor(targetCount, currentTarget, ProjectSettings.RUN)
ensureChecked("{window=':Qt Creator_Core::Internal::MainWindow' text='Run in terminal'\
type='QCheckBox' unnamed='1' visible='1'}", runInTerminal)
switchViewTo(ViewConstants.EDIT)
# helper function to get some Qt information for the current (already configured) project
# param kitCount is the number of kits cofigured for the current project
# param alreadyOnProjectsBuildSettings if set to True you have to make sure that you're
# on the Projects view on the Build settings page (otherwise this function will end
# up in a ScriptError)
# param afterSwitchTo if you want to leave the Projects view/Build settings when returning
# from this function you can set this parameter to one of the ViewConstants
# this function returns an array of 4 elements (all could be None):
# * the first element holds the Qt version
# * the second element holds the mkspec
# * the third element holds the Qt bin path
# * the fourth element holds the Qt lib path
# of the current active project
def getQtInformationForBuildSettings(kitCount, alreadyOnProjectsBuildSettings=False, afterSwitchTo=None):
if not alreadyOnProjectsBuildSettings:
switchViewTo(ViewConstants.PROJECTS)
switchToBuildOrRunSettingsFor(kitCount, 0, ProjectSettings.BUILD)
clickButton(waitForObject(":Qt Creator_SystemSettings.Details_Utils::DetailsButton"))
model = waitForObject(":scrollArea_QTableView").model()
qtDir = None
for row in range(model.rowCount()):
index = model.index(row, 0)
text = str(model.data(index).toString())
if text == "QTDIR":
qtDir = str(model.data(model.index(row, 1)).toString())
break
if qtDir == None:
test.fatal("UI seems to have changed - couldn't get QTDIR for this configuration.")
return None, None, None, None
qmakeCallLabel = waitForObject("{text?='<b>qmake:</b> qmake*' type='QLabel' unnamed='1' visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}")
mkspec = __getMkspecFromQMakeCall__(str(qmakeCallLabel.text))
qtVersion = getQtInformationByQMakeCall(qtDir, QtInformation.QT_VERSION)
qtLibPath = getQtInformationByQMakeCall(qtDir, QtInformation.QT_LIBPATH)
qtBinPath = getQtInformationByQMakeCall(qtDir, QtInformation.QT_BINPATH)
if afterSwitchTo:
if ViewConstants.WELCOME <= afterSwitchTo <= ViewConstants.LAST_AVAILABLE:
switchViewTo(afterSwitchTo)
else:
test.warning("Don't know where you trying to switch to (%s)" % afterSwitchTo)
return qtVersion, mkspec, qtBinPath, qtLibPath
def getQtInformationForQmlProject():
fancyToolButton = waitForObject(":*Qt Creator_Core::Internal::FancyToolButton")
kit = __getTargetFromToolTip__(str(fancyToolButton.toolTip))
if not kit:
test.fatal("Could not figure out which kit you're using...")
return None, None, None, None
test.log("Searching for Qt information for kit '%s'" % kit)
invokeMenuItem("Tools", "Options...")
waitForObjectItem(":Options_QListView", "Build & Run")
clickItem(":Options_QListView", "Build & Run", 14, 15, 0, Qt.LeftButton)
clickOnTab(":Options.qt_tabwidget_tabbar_QTabBar", "Kits")
targetsTreeView = waitForObject(":Kits_Or_Compilers_QTreeView")
if not __selectTreeItemOnBuildAndRun__(targetsTreeView, "%s(\s\(default\))?" % kit, True):
test.fatal("Found no matching kit - this shouldn't happen.")
clickButton(waitForObject(":Options.Cancel_QPushButton"))
return None, None, None, None
qtVersionStr = str(waitForObject(":Kits_QtVersion_QComboBox").currentText)
test.log("Kit '%s' uses Qt Version '%s'" % (kit, qtVersionStr))
clickOnTab(":Options.qt_tabwidget_tabbar_QTabBar", "Qt Versions")
treeWidget = waitForObject(":QtSupport__Internal__QtVersionManager.qtdirList_QTreeWidget")
if not __selectTreeItemOnBuildAndRun__(treeWidget, qtVersionStr):
test.fatal("Found no matching Qt Version for kit - this shouldn't happen.")
clickButton(waitForObject(":Options.Cancel_QPushButton"))
return None, None, None, None
qmake = str(waitForObject(":QtSupport__Internal__QtVersionManager.qmake_QLabel").text)
test.log("Qt Version '%s' uses qmake at '%s'" % (qtVersionStr, qmake))
qtDir = os.path.dirname(os.path.dirname(qmake))
qtVersion = getQtInformationByQMakeCall(qtDir, QtInformation.QT_VERSION)
qtLibPath = getQtInformationByQMakeCall(qtDir, QtInformation.QT_LIBPATH)
mkspec = __getMkspecFromQmake__(qmake)
clickButton(waitForObject(":Options.Cancel_QPushButton"))
return qtVersion, mkspec, qtLibPath, qmake
def __selectTreeItemOnBuildAndRun__(treeViewOrWidget, itemText, isRegex=False):
model = treeViewOrWidget.model()
test.compare(model.rowCount(), 2, "Verifying expected section count")
autoDetected = model.index(0, 0)
test.compare(autoDetected.data().toString(), "Auto-detected", "Verifying label for section")
manual = model.index(1, 0)
test.compare(manual.data().toString(), "Manual", "Verifying label for section")
if isRegex:
pattern = re.compile(itemText)
found = False
for section in [autoDetected, manual]:
for dumpedItem in dumpItems(model, section):
if (isRegex and pattern.match(dumpedItem)
or itemText == dumpedItem):
found = True
item = ".".join([str(section.data().toString()),
dumpedItem.replace(".", "\\.").replace("_", "\\_")])
clickItem(treeViewOrWidget, item, 5, 5, 0, Qt.LeftButton)
break
if found:
break
return found
def __getTargetFromToolTip__(toolTip):
if toolTip == None or not isinstance(toolTip, (str, unicode)):
test.warning("Parameter toolTip must be of type str or unicode and can't be None!")
return None
pattern = re.compile(".*<b>Kit:</b>(.*)<b>Deploy.*")
target = pattern.match(toolTip)
if target == None:
test.fatal("UI seems to have changed - expected ToolTip does not match.",
"ToolTip: '%s'" % toolTip)
return None
return target.group(1).split("<br/>")[0].strip()
def __getMkspecFromQMakeCall__(qmakeCall):
qCall = qmakeCall.split("</b>")[1].strip()
tmp = qCall.split()
for i in range(len(tmp)):
if tmp[i] == '-spec' and i + 1 < len(tmp):
return tmp[i + 1]
test.fatal("Couldn't get mkspec from qmake call '%s'" % qmakeCall)
return None
# this function queries information from qmake
# param qtDir set this to a path that holds a valid Qt
# param which set this to one of the QtInformation "constants"
# the function will return the wanted information or None if something went wrong
def getQtInformationByQMakeCall(qtDir, which):
qmake = os.path.join(qtDir, "bin", "qmake")
if platform.system() in ('Microsoft', 'Windows'):
qmake += ".exe"
if not os.path.exists(qmake):
test.fatal("Given Qt directory does not exist or does not contain bin/qmake.",
"Constructed path: '%s'" % qmake)
return None
query = ""
if which == QtInformation.QT_VERSION:
query = "QT_VERSION"
elif which == QtInformation.QT_BINPATH:
query = "QT_INSTALL_BINS"
elif which == QtInformation.QT_LIBPATH:
query = "QT_INSTALL_LIBS"
else:
test.fatal("You're trying to fetch an unknown information (%s)" % which)
return None
return getOutputFromCmdline("%s -query %s" % (qmake, query)).strip()
|
lgpl-2.1
| 179,521,143,003,143,680 | 54.402174 | 142 | 0.683539 | false |
ecederstrand/exchangelib
|
exchangelib/services/get_user_configuration.py
|
1
|
1729
|
from .common import EWSAccountService
from ..properties import UserConfiguration
from ..util import create_element, set_xml_value
ID = 'Id'
DICTIONARY = 'Dictionary'
XML_DATA = 'XmlData'
BINARY_DATA = 'BinaryData'
ALL = 'All'
PROPERTIES_CHOICES = {ID, DICTIONARY, XML_DATA, BINARY_DATA, ALL}
class GetUserConfiguration(EWSAccountService):
"""MSDN:
https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getuserconfiguration-operation
"""
SERVICE_NAME = 'GetUserConfiguration'
def call(self, user_configuration_name, properties):
if properties not in PROPERTIES_CHOICES:
raise ValueError("'properties' %r must be one of %s" % (properties, PROPERTIES_CHOICES))
for elem in self._get_elements(payload=self.get_payload(
user_configuration_name=user_configuration_name, properties=properties
)):
if isinstance(elem, Exception):
yield elem
continue
yield UserConfiguration.from_xml(elem=elem, account=self.account)
@classmethod
def _get_elements_in_container(cls, container):
return container.findall(UserConfiguration.response_tag())
def get_payload(self, user_configuration_name, properties):
getuserconfiguration = create_element('m:%s' % self.SERVICE_NAME)
set_xml_value(getuserconfiguration, user_configuration_name, version=self.account.version)
user_configuration_properties = create_element('m:UserConfigurationProperties')
set_xml_value(user_configuration_properties, properties, version=self.account.version)
getuserconfiguration.append(user_configuration_properties)
return getuserconfiguration
|
bsd-2-clause
| -5,405,332,260,484,910,000 | 41.170732 | 115 | 0.712551 | false |
mghweb/sublime-miva-ide
|
mvt-toggle-comment.py
|
1
|
7319
|
import sublime, sublime_plugin
def advance_to_first_non_white_space_on_line(view, pt):
while True:
c = view.substr(pt)
if c == " " or c == "\t":
pt += 1
else:
break
return pt
def has_non_white_space_on_line(view, pt):
while True:
c = view.substr(pt)
if c == " " or c == "\t":
pt += 1
else:
return c != "\n"
def build_comment_data(view, pt):
shell_vars = view.meta_info("shellVariables", pt)
if not shell_vars:
return ([], [])
# transform the list of dicts into a single dict
all_vars = {}
for v in shell_vars:
if 'name' in v and 'value' in v:
all_vars[v['name']] = v['value']
line_comments = []
block_comments = []
# transform the dict into a single array of valid comments
suffixes = [""] + ["_" + str(i) for i in range(1, 10)]
for suffix in suffixes:
start = all_vars.setdefault("TM_MVT_COMMENT_START" + suffix)
end = all_vars.setdefault("TM_MVT_COMMENT_END" + suffix)
mode = all_vars.setdefault("TM_MVT_COMMENT_MODE" + suffix)
disable_indent = all_vars.setdefault("TM_MVT_COMMENT_DISABLE_INDENT" + suffix)
if start and end:
block_comments.append((start, end, disable_indent == 'yes'))
block_comments.append((start.strip(), end.strip(), disable_indent == 'yes'))
elif start:
line_comments.append((start, disable_indent == 'yes'))
line_comments.append((start.strip(), disable_indent == 'yes'))
return (line_comments, block_comments)
class ToggleMvtCommentCommand(sublime_plugin.TextCommand):
def remove_block_comment(self, view, edit, comment_data, region):
(line_comments, block_comments) = comment_data
# Call extract_scope from the midpoint of the region, as calling it
# from the start can give false results if the block comment begin/end
# markers are assigned their own scope, as is done in HTML.
whole_region = view.extract_scope(region.begin() + region.size() / 2)
for c in block_comments:
(start, end, disable_indent) = c
start_region = sublime.Region(whole_region.begin(),
whole_region.begin() + len(start))
end_region = sublime.Region(whole_region.end() - len(end),
whole_region.end())
if view.substr(start_region) == start and view.substr(end_region) == end:
# It's faster to erase the start region first
view.erase(edit, start_region)
end_region = sublime.Region(
end_region.begin() - start_region.size(),
end_region.end() - start_region.size())
view.erase(edit, end_region)
return True
return False
def remove_line_comment(self, view, edit, comment_data, region):
(line_comments, block_comments) = comment_data
found_line_comment = False
start_positions = [advance_to_first_non_white_space_on_line(view, r.begin())
for r in view.lines(region)]
start_positions.reverse()
for pos in start_positions:
for c in line_comments:
(start, disable_indent) = c
comment_region = sublime.Region(pos,
pos + len(start))
if view.substr(comment_region) == start:
view.erase(edit, comment_region)
found_line_comment = True
break
return found_line_comment
def is_entirely_line_commented(self, view, comment_data, region):
(line_comments, block_comments) = comment_data
start_positions = [advance_to_first_non_white_space_on_line(view, r.begin())
for r in view.lines(region)]
start_positions = list(filter(lambda p: has_non_white_space_on_line(view, p),
start_positions))
if len(start_positions) == 0:
return False
for pos in start_positions:
found_line_comment = False
for c in line_comments:
(start, disable_indent) = c
comment_region = sublime.Region(pos,
pos + len(start))
if view.substr(comment_region) == start:
found_line_comment = True
if not found_line_comment:
return False
return True
def block_comment_region(self, view, edit, block_comment_data, region):
(start, end, disable_indent) = block_comment_data
if region.empty():
# Silly buggers to ensure the cursor doesn't end up after the end
# comment token
view.replace(edit, sublime.Region(region.end()), 'x')
view.insert(edit, region.end() + 1, end)
view.replace(edit, sublime.Region(region.end(), region.end() + 1), '')
view.insert(edit, region.begin(), start)
else:
view.insert(edit, region.end(), end)
view.insert(edit, region.begin(), start)
def line_comment_region(self, view, edit, line_comment_data, region):
(start, disable_indent) = line_comment_data
start_positions = [r.begin() for r in view.lines(region)]
start_positions.reverse()
# Remove any blank lines from consideration, they make getting the
# comment start markers to line up challenging
non_empty_start_positions = list(filter(lambda p: has_non_white_space_on_line(view, p),
start_positions))
# If all the lines are blank however, just comment away
if len(non_empty_start_positions) != 0:
start_positions = non_empty_start_positions
if not disable_indent:
min_indent = None
# This won't work well with mixed spaces and tabs, but really,
# don't do that!
for pos in start_positions:
indent = advance_to_first_non_white_space_on_line(view, pos) - pos
if min_indent == None or indent < min_indent:
min_indent = indent
if min_indent != None and min_indent > 0:
start_positions = [r + min_indent for r in start_positions]
for pos in start_positions:
view.insert(edit, pos, start)
def add_comment(self, view, edit, comment_data, prefer_block, region):
(line_comments, block_comments) = comment_data
if len(line_comments) == 0 and len(block_comments) == 0:
return
if len(block_comments) == 0:
prefer_block = False
if len(line_comments) == 0:
prefer_block = True
if region.empty():
if prefer_block:
# add the block comment
self.block_comment_region(view, edit, block_comments[0], region)
else:
# comment out the line
self.line_comment_region(view, edit, line_comments[0], region)
else:
if prefer_block:
# add the block comment
self.block_comment_region(view, edit, block_comments[0], region)
else:
# add a line comment to each line
self.line_comment_region(view, edit, line_comments[0], region)
def run(self, edit, block=False):
for region in self.view.sel():
comment_data = build_comment_data(self.view, region.begin())
if (region.end() != self.view.size() and
build_comment_data(self.view, region.end()) != comment_data):
# region spans languages, nothing we can do
continue
if self.remove_block_comment(self.view, edit, comment_data, region):
continue
if self.is_entirely_line_commented(self.view, comment_data, region):
self.remove_line_comment(self.view, edit, comment_data, region)
continue
has_line_comment = len(comment_data[0]) > 0
if not has_line_comment and not block and region.empty():
# Use block comments to comment out the line
line = self.view.line(region.a)
line = sublime.Region(
advance_to_first_non_white_space_on_line(self.view, line.a),
line.b)
# Try and remove any existing block comment now
if self.remove_block_comment(self.view, edit, comment_data, line):
continue
self.add_comment(self.view, edit, comment_data, block, line)
continue
# Add a comment instead
self.add_comment(self.view, edit, comment_data, block, region)
|
mit
| -864,045,968,172,918,800 | 30.416309 | 89 | 0.679328 | false |
PnMercantour/autorisations_circulation
|
auth_circu/routes/export.py
|
1
|
5388
|
import unicodedata
from io import BytesIO
from datetime import datetime
import flask_excel
import weasyprint
from flask import request, send_file, g, make_response, jsonify
from secretary import Renderer
from werkzeug.exceptions import BadRequest, abort
from sqlalchemy.orm.exc import NoResultFound
from pypnusershub.routes import check_auth
from ..conf import app
from ..db.models import AuthDocTemplate, AuthRequest
from ..db.utils import get_object_or_abort
odt_renderer = Renderer()
@app.route('/exports/authorizations', methods=['POST'])
@check_auth(1)
def export_authorizations():
try:
authorizations = request.json['authorizations']
except KeyError:
raise BadRequest('"Authorizations" must be provided')
header = [
'#',
'Dates',
'Auteur',
'Adresse',
'Lieux',
'Véhicules',
]
rows = [header]
for auth in authorizations:
dates = ''
start = auth.get('auth_start_date', '')
if start:
dates = f"Début: {start}\n"
end = auth.get('auth_end_date', '')
if end:
dates += f'Fin: {end}'
name = {
'm': 'M. ',
'f': 'Mme. ',
'na': ''
}[auth.get('author_gender', '')]
name += auth.get('author_name') or ''
rows.append([
auth.get('number', '?'),
dates,
name,
auth.get('author_address') or '',
', '.join(place['name'] for place in auth.get('places', [])),
', '.join(auth.get('vehicules', []))
])
if request.args.get('format', 'ods') != 'pdf':
return flask_excel.make_response_from_array(rows, "ods")
else:
template = app.jinja_env.get_template('pdf-export.html')
now = f'{datetime.now():%d/%m/%Y}'
rendered = template.render(auth_requests=rows, date=now)
pdf = BytesIO(weasyprint.HTML(string=rendered).write_pdf())
return send_file(
pdf,
attachment_filename=f'autorisations - {now}.pdf',
as_attachment=True
)
def json_abort(message, status_code=400):
""" Like flask's abord, but in a JSON format """
msg = jsonify(message=message)
return abort(make_response(msg, 400))
@app.route('/exports/authorizations/<auth_id>', methods=['POST', 'GET'])
@check_auth(2)
def generate_auth_doc(auth_id):
auth_req = get_object_or_abort(AuthRequest, AuthRequest.id == auth_id)
if not auth_req.valid:
return json_abort("Les brouillons ne peuvent être imprimés")
legal_contact = g.user.role.legal_contact
if not legal_contact or not legal_contact.content:
return json_abort(
"Votre compte n'a pas de coordonnées de contact associées "
"et ne peut donc imprimer un document. Veuilez demander à un "
"administrateur de vous les rajouter."
)
template = auth_req.template
if not template:
template_filter = AuthDocTemplate.default_for == "letter_other"
if auth_req.category == "salese":
template_filter = AuthDocTemplate.default_for == "letter_salese"
if auth_req.category == "agropasto":
template_filter = AuthDocTemplate.default_for == "letter_agropasto"
try:
template_filter &= (AuthDocTemplate.active == True)
template = AuthDocTemplate.query.filter(template_filter).one()
except NoResultFound:
return json_abort(
"Il manque un template de document. Veuillez contactez un "
"administrateur pour qu'il les mettent en ligne."
)
prefix = auth_req.author_prefix
if prefix:
prefix += " "
places = [place.name for place in auth_req.places]
vehicules = list(auth_req.vehicules)
auth_start_date = auth_req.auth_start_date.strftime('%d/%m/%Y')
auth_end_date = auth_req.auth_end_date.strftime('%d/%m/%Y')
# agro pasto letters don't display the day for those dates
if auth_req.category == "agropasto":
auth_start_date = auth_start_date[3:]
auth_end_date = auth_end_date[3:]
# generate an easy to manipulate data structure for building the
# authorizations cards. It's easier to do that here than in the template:
# we can now always act like we have several cards even if we have only
# one and always loop.
if auth_req.group_vehicules_on_doc:
cards = [vehicules]
else:
cards = [[v] for v in vehicules]
data = odt_renderer.render(
template.abs_path,
author_prefix=prefix,
auth_req=auth_req,
request_date=auth_req.request_date.strftime('%d/%m/%Y'),
feminin=auth_req.author_gender == "f",
auth_start_date=auth_start_date,
auth_end_date=auth_end_date,
places=places,
places_count=len(places),
vehicules=vehicules,
vehicules_count=len(vehicules),
doc_creation_date=datetime.now().strftime("%d %B %Y"),
legal_contact=legal_contact.content,
cards=cards
)
filename = f'{auth_req.author_name} - {datetime.now():%d/%m/%Y}.odt'
filename = unicodedata.normalize('NFKD', filename).encode('ascii', 'ignore')
return send_file(
BytesIO(data),
attachment_filename=filename.decode('ascii'),
as_attachment=True
)
|
gpl-3.0
| 7,626,577,335,625,211,000 | 30.652941 | 80 | 0.606207 | false |
davecroll/data-tasks
|
test/TestDataEntityManager.py
|
1
|
1130
|
# test\TestDataEntityManager.py
import unittest
from unittest import TestCase
from datatasks.DataEntityManager import DataEntityManager
from datatasks.DataEntity import DataEntity
from datatasks.sources import FileSource
from datatasks.destinations import DatabaseDestination
class DataEntityManagerTests(TestCase):
def test_init(self):
dem = DataEntityManager(managed_class=DataEntity)
dem1 = DataEntityManager(managed_class=DataEntity)
self.assertEqual(dem, dem1)
dem.clear()
def test_basic_func(self):
dem = DataEntityManager(managed_class=DataEntity)
DataEntity('entity1')
DataEntity('entity2')
self.assertIsInstance(dem.get_entity('entity1'), DataEntity)
dem.clear()
def test_three(self):
dem = DataEntityManager(managed_class=DataEntity)
file_src = FileSource('file_source', 'amzn.csv')
DatabaseDestination('db_dest', 'datatasks_test', 'amzn', source=file_src)
self.assertEqual(len(dem.all()), 2)
self.assertEqual(len(dem.destinations()), 1)
if __name__ == '__main__':
unittest.main()
|
mit
| -1,377,128,645,068,313,600 | 28.736842 | 81 | 0.7 | false |
voicesauce/opensauce-python
|
opensauce/helpers.py
|
1
|
3298
|
"""Helper functions for OpenSauce
"""
# Licensed under Apache v2 (see LICENSE)
from __future__ import division
import math
import fileinput
import numpy as np
from scipy.io import wavfile
def wavread(fn):
"""Read in a 16-bit integer PCM WAV file for processing
Args:
fn - filename of WAV file [string]
Returns:
y_float - Audio samples in float format [NumPy vector]
y_int - Audio samples in int format [NumPy vector]
Fs - Sampling frequency in Hz [integer]
Emulate the parts of the Matlab wavread function that we need.
Matlab's wavread is used by voicesauce to read in the wav files for
processing. As a consequence, all the translated algorithms assume the
data from the wav file is in matlab form, which in this case means a double
precision float between -1 and 1. The corresponding scipy function returns
the actual integer PCM values from the file, which range between -32768 and
32767. (matlab's wavread *can* return the integers, but does not by
default and voicesauce uses the default). Consequently, after reading the
data using scipy's io.wavfile, we convert to float by dividing each integer
by 32768.
Also, save the 16-bit integer data in another NumPy vector.
The input WAV file is assumed to be in 16-bit integer PCM format.
"""
# For reference, I figured this out from:
# http://mirlab.org/jang/books/audiosignalprocessing/matlab4waveRead.asp?title=4-2%20Reading%20Wave%20Files
# XXX: if we need to handle 8 bit files we'll need to detect them and
# special case them here.
try:
Fs, y = wavfile.read(fn)
except ValueError:
raise
if y.dtype != 'int16':
raise IOError('Input WAV file must be in 16-bit integer PCM format')
return y/np.float64(32768.0), y, Fs
def round_half_away_from_zero(x):
"""Rounds a number according to round half away from zero method
Args:
x - number [float]
Returns:
q - rounded number [integer]
For example:
round_half_away_from_zero(3.5) = 4
round_half_away_from_zero(3.2) = 3
round_half_away_from_zero(-2.7) = -3
round_half_away_from_zero(-4.3) = -4
The reason for writing our own rounding function is that NumPy uses the
round-half-to-even method. There is a Python round() function, but it
doesn't work on NumPy vectors. So we wrote our own
round-half-away-from-zero method here.
"""
q = np.int_(np.sign(x) * np.floor(np.abs(x) + 0.5))
return q
def remove_empty_lines_from_file(fn):
""" Remove empty lines from a text file
Args:
fn - filename [string]
Returns: nothing
Has side effect of removing empty lines from file specified by fn
"""
f = fileinput.FileInput(fn, inplace=True)
for line in f:
stripped_line = line.rstrip()
if stripped_line:
print(stripped_line)
f.close()
def convert_boolean_for_praat(b):
""" Convert Python boolean for use in Praat
Praat uses "yes"/"no" or 1/0 values instead of True/False.
Convert True to "yes", False to "no"
"""
if b == True:
return "yes"
elif b == False:
return "no"
else:
raise ValueError('Input must be a Boolean')
|
apache-2.0
| -4,575,236,972,586,572,300 | 28.711712 | 111 | 0.661007 | false |
gtesei/fast-furious
|
competitions/tgs-salt-identification-challenge/solutions/9_place/models/inplace_abn/bn.py
|
1
|
3567
|
import torch
from queue import Queue
from .functions import *
from .abn import ABN
class InPlaceABN(ABN):
"""InPlace Activated Batch Normalization"""
def forward(self, x):
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / self.num_batches_tracked.item()
else: # use exponential moving average
exponential_average_factor = self.momentum
return inplace_abn(x, self.weight, self.bias, self.running_mean, self.running_var,
self.training or not self.track_running_stats,
exponential_average_factor, self.eps, self.activation, self.slope)
class InPlaceABNSync(ABN):
"""InPlace Activated Batch Normalization with cross-GPU synchronization
This assumes that it will be replicated across GPUs using the same mechanism as in `nn.DataParallel`.
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True,
activation="leaky_relu", slope=0.01, devices=None):
"""Creates a synchronized, InPlace Activated Batch Normalization module
Parameters
----------
num_features : int
Number of feature channels in the input and output.
devices : list of int or None
IDs of the GPUs that will run the replicas of this module.
eps : float
Small constant to prevent numerical issues.
momentum : float
Momentum factor applied to compute running statistics as.
affine : bool
If `True` apply learned scale and shift transformation after normalization.
activation : str
Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
slope : float
Negative slope for the `leaky_relu` activation.
"""
super().__init__(num_features=num_features, eps=eps, momentum=momentum, affine=affine,
track_running_stats=track_running_stats, activation=activation, slope=slope)
self.devices = devices if devices else list(range(torch.cuda.device_count()))
# Initialize queues
self.worker_ids = self.devices[1:]
self.master_queue = Queue(len(self.worker_ids))
self.worker_queues = [Queue(1) for _ in self.worker_ids]
def forward(self, x):
if len(self.devices) < 2:
# fallback for CPU mode or single GPU mode
return super().forward(x)
if x.get_device() == self.devices[0]:
# Master mode
extra = {
"is_master": True,
"master_queue": self.master_queue,
"worker_queues": self.worker_queues,
"worker_ids": self.worker_ids
}
else:
# Worker mode
extra = {
"is_master": False,
"master_queue": self.master_queue,
"worker_queue": self.worker_queues[self.worker_ids.index(x.get_device())]
}
return inplace_abn_sync(x, self.weight, self.bias, self.running_mean, self.running_var,
extra, self.training, self.momentum, self.eps, self.activation, self.slope)
def extra_repr(self):
rep = super().extra_repr()
rep += ', devices={devices}'.format(**self.__dict__)
return rep
|
mit
| 2,949,862,532,384,423,000 | 38.633333 | 107 | 0.596299 | false |
zielmicha/satori
|
satori.web/satori/web/utils/xmlparams.py
|
1
|
1156
|
from satori.client.common import want_import
want_import(globals(), '*')
from datetime import datetime
from xml.dom import minidom
from django import forms
from satori.web.utils import forms as satoriforms
from satori.tools.params import *
class ParamsForm(forms.Form):
fieldtypes = {
OaTypeText : forms.CharField,
OaTypeSize : satoriforms.SatoriSizeField,
OaTypeTime : satoriforms.SatoriTimedeltaField,
OaTypeInteger : forms.IntegerField,
OaTypeFloat : forms.FloatField,
OaTypeDatetime : satoriforms.SatoriDateTimeField,
OaTypeBoolean : forms.BooleanField,
# OaTypeBlob : forms.FileField
}
def __init__(self,parser,*args,**kwargs):
super(ParamsForm,self).__init__(*args,**kwargs)
for f in parser.params:
ftype = ParamsForm.fieldtypes[f.type_]
if f.type_=='bool':
req = False
else:
req = f.required
self.fields[f.name] = ftype(label=f.description,required=req)
|
mit
| 6,455,497,379,610,604,000 | 37.566667 | 73 | 0.583045 | false |
sallyom/atomic
|
Atomic/verify.py
|
1
|
11302
|
from . import util
from . import Atomic
import os
from docker.errors import NotFound
from operator import itemgetter
class Verify(Atomic):
DEBUG = False
def verify(self):
"""
Primary def for atomic verify
:return: None
"""
def fix_layers(layers):
"""
Takes the input of layers (get_layers()) and adds a key
and value for index. Also checks if the Tag value is not
blank but name is, puts tag into name.
:param layers:
:return: updated list of layers
"""
for layer in layers:
layer['index'] = layers.index(layer)
if layer['Tag'] is not "" and layer['Name'] is "":
layer['Name'] = layer['Tag']
return layers
layers = fix_layers(self.get_layers())
if self.DEBUG:
for l in layers:
util.output_json(l)
uniq_names = list(set(x['Name'] for x in layers if x['Name'] != ''))
base_images = self.get_tagged_images(uniq_names, layers)
if self.DEBUG:
for b in base_images:
util.output_json(b)
if self.args.verbose:
self._print_verify_verbose(base_images, self.image)
# Do we have any layers that are not up to date?
elif not all([False for x in base_images if x['local_nvr'] != x['latest_nvr']]):
self._print_verify(base_images, self.image)
else:
# Check if any of the base_images do not have versioning information
versions = [x['local_nvr'] for x in base_images] + [x['latest_nvr'] for x in base_images]
if 'Version unavailable' in versions:
util.writeOut("\nWARNING: One or more of the image layers does not have")
util.writeOut("{}versioning information. Printing each image layer".format(" " * 9))
util.writeOut("{}verbosely.".format(" " * 9))
self._print_verify_verbose(base_images, self.image)
else:
# Didn't detect any version differences, do nothing
pass
def get_tagged_images(self, names, layers):
"""
Returns a dict with image names and its tag name.
:param names:
:param layers:
:return: list of sorted dicts (by index)
"""
base_images = []
for name in names:
remote = False
_match = next((x for x in layers if x['Name'] == name and x['Tag'] is not ''), None)
local_nvr = ""
if _match is not None:
if self.is_repo_from_local_registry(_match['Id']):
local_nvr = latest_version = _match['Version']
remote = True
else:
latest_version = self.get_latest_remote_version(_match['Tag'])
no_version = (latest_version == "")
iid = _match["Id"]
tag = _match['Tag']
_index = self.get_index(name, layers, iid)
else:
_index = self.get_index(name, layers)
layer = layers[_index]
if layer["Version"] is not "" and layer['Name'] is not "":
iid = layer['Id']
local_nvr = layer['Version']
no_version = False
image = self.d.inspect_image(iid)
labels = image.get('Config', []).get('Labels', [])
if 'Authoritative_Registry' in labels and 'Name' in labels:
tag = os.path.join(labels['Authoritative_Registry'], labels['Name'])
if self.is_repo_from_local_registry(iid):
# Inspect again by tag in case the image isnt the latest
try:
latest_version = self.d.inspect_image(tag)['Version']
except NotFound:
latest_version = layer['Version']
else:
# Do a remote inspect of images
latest_version = self.get_latest_remote_version(tag)
remote = True
else:
tag = "Unknown"
try:
latest_version = self.get_latest_remote_version(name)
except NotFound:
latest_version = "Unknown"
else:
iid = "Unknown"
latest_version = self.get_local_latest_version(name)
local_nvr = name
tag = "Unknown"
remote = False
no_version = True
base_images.append({'iid': iid,
'name': name,
'local_nvr': local_nvr,
'latest_nvr': latest_version,
'remote': remote,
'no_version': no_version,
'tag': tag,
'index': _index
})
return sorted(base_images, key=itemgetter('index'))
def is_repo_from_local_registry(self, input_repo):
"""
Determine is a given repo comes from a local-only registry
:param input_repo: str repository name
:return: bool
"""
# We need to check if there are other images with the
# the same IID because the input might not be fully
# qualified
iid = self.d.inspect_image(input_repo)['Id']
# Get a flat list of repo names associated with the iid
similar = [_repo for repos in [x['RepoTags'] for x in self.d.images()
if x['Id'] == iid] for _repo in repos]
results = []
for repo_ in similar:
(reg, repo, tag) = util._decompose(repo_)
results.append(self.is_registry_local(reg))
return False if not all(results) else True
def is_registry_local(self, registry):
"""
Determine if a given registry is local only
:param registry: str registry name
:return: bool
"""
return False if registry in self.get_registries() else True
def get_registries(self):
"""
Gets the names of the registries per /etc/sysconfig/conf
:return: a list of the registries
"""
registries = []
docker_info = self.d.info()
if 'RegistryConfig' not in docker_info:
raise ValueError("This docker version does not export its registries.")
for _index in docker_info['RegistryConfig']['IndexConfigs']:
registries.append(_index)
return registries
@staticmethod
def _print_verify(base_images, image):
"""
The standard non-verbose print for atomic verify
:param base_images:
:param image:
:return: None
"""
util.writeOut("\n{} contains images or layers that have updates:".format(image))
for _image in base_images:
local = _image['local_nvr']
latest = _image['latest_nvr']
if local != latest:
util.writeOut("\n{0} '{1}' has an update to '{2}'"
.format(" " * 5, local, latest))
util.writeOut("\n")
@staticmethod
def _print_verify_verbose(base_images, image):
"""
Implements a verbose printout of layers. Can be called with
atomic verify -v or if we detect some layer does not have
versioning information.
:param base_images:
:param image:
:return: None
"""
def max_name(base_images):
no_version_match = [len(x['tag']) + len(x['local_nvr']) + 5 for x in base_images if x['no_version']]
return max([len(x['local_nvr']) for x in base_images] + no_version_match)
_max = max_name(base_images)
_max_name = 30 if _max < 30 else _max
three_col = " {0:" + \
str(_max_name) + "} {1:" + \
str(_max_name) + "} {2:1}"
util.writeOut("\n{} contains the following images:\n".format(image))
for _image in base_images:
local = _image['local_nvr']
latest = _image['latest_nvr']
if _image['no_version']:
tag = _image['tag']
local = "{0} ({1})".format(tag, local)
latest = "{0} ({1})".format(tag, latest)
remote = "*" if local != latest else ""
util.writeOut(three_col.format(local, latest, remote))
util.writeOut("\n * = version difference\n")
@staticmethod
def get_index(name, layers, _id="0"):
"""
Adds indexs to the base_image dict and returns sorted
:param name:
:param layers:
:param _id:
:return: sorted list of base_images
"""
try:
try:
_match = (x for x in layers if x["Id"] == _id).__next__()
except:
_match = (x for x in layers if x["Id"] == _id).next()
except StopIteration:
# We were unable to associate IDs due to the local image being set
# to intermediate by docker bc it is outdated. Therefore we find
# the first instance by name for the index
try:
_match = (x for x in layers if x["Name"] == name).__next__()
except:
_match = (x for x in layers if x["Name"] == name).next()
return _match['index']
def get_local_latest_version(self, name):
"""
Obtain the latest version of a local image
:param name:
:return: str of vnr
"""
images = self.get_images()
for image in images:
if 'Labels' in image and image['Labels'] is not None:
if self.pull_label(image, 'Name') == name:
return self.assemble_nvr(image)
else:
continue
def get_latest_remote_version(self, tag):
r_inspect = self.d.remote_inspect(tag)
if 'Labels' in r_inspect['Config'] \
and r_inspect['Config']['Labels'] is not None:
latest_version = self.assemble_nvr(r_inspect['Config'])
else:
latest_version = "Version unavailable"
return latest_version
def assemble_nvr(self, image):
"""
Simple formatting def for NVR
:param image:
:return: str
"""
nvr = "%s-%s-%s" % (self.pull_label(image, 'Name'),
self.pull_label(image, 'Version'),
self.pull_label(image, 'Release'))
return nvr
@staticmethod
def get_local_version(name, layers):
for layer in layers:
if layer['Name'] is name:
return layer['Version'] if 'Version' in layer \
else "Version unavailable"
@staticmethod
def pull_label(image, key):
if key in image["Labels"]:
return image['Labels'][key]
|
lgpl-2.1
| 7,556,177,246,161,963,000 | 39.508961 | 112 | 0.500442 | false |
hmtai6/universe_NeonRace-v0
|
DQN_breakout/DQN.py
|
1
|
9601
|
import argparse
import logging
import sys
import gc
import cv2
import matplotlib.pyplot as plt
import gym
import universe # register the universe environments
from universe import wrappers
from collections import deque
from skimage.color import rgb2gray
from skimage.transform import resize
import numpy as np
import tensorflow as tf
import time
import gym, time, random, threading
from keras.models import *
from keras.layers import *
from keras import backend as K
from keras.models import load_model
LEARNING_RATE = 0.005
MOMENTUM = 0.2
MIN_GRAD = 0.0001
ENV_NAME = 'break_out'
SAVE_SUMMARY_PATH = './logs'
SAVE_NETWORK_PATH = './network'
LOAD_NETWOROK = False
INITIAL_REPLAY_SIZE = 200000 # Nb steps for memory, before training
NUM_REPLAY_MEMORY = 400000 # Number of replay memory the agent uses for training
TRAIN_INTERVAL = 1000
GAMMA = 0.99 # Discount factor
STATE_LENGTH = 4 # Number of most recent frames to produce the input to the network
FRAME_WIDTH = 84
FRAME_HEIGHT = 84
class DQN:
def __init__(self, input_shape, nb_actions,
init_epsilon=1.0,
final_epsilon=0.1,
exploration_steps=1000000):
self.input_shape = input_shape
self.nb_actions = nb_actions
self.final_epsilon = final_epsilon
self.epsilon = init_epsilon
self.epsilon_step = (init_epsilon - final_epsilon) / exploration_steps
self.t = 0
# Parameters used for summary
self.total_reward = 0
self.total_q_max = 0
self.total_loss = 0
self.duration = 0
self.episode = 0
# create replay memory
self.replay_memory = deque()
# create network
self.state, self.q_vals, self.network = self._build_network()
q_network_weights = self.network.trainable_weights
# create target network
self.state_t, self.q_vals_t, self.network_t = self._build_network()
q_network_weights_t = self.network_t.trainable_weights
# define copy operation
self.update_target_network = [q_network_weights_t[i].assign(q_network_weights[i]) for i in range(len(q_network_weights_t))]
# Define loss and gradient update operation
self.a, self.y, self.loss, self.grads_update = self._build_train_op(q_network_weights)
self.sess = tf.InteractiveSession()
self.saver = tf.train.Saver(q_network_weights)
self.summary_placeholders, self.update_ops, self.summary_op = self._build_summary()
self.summary_writer = tf.summary.FileWriter(SAVE_SUMMARY_PATH, self.sess.graph)
if not os.path.exists(SAVE_NETWORK_PATH):
os.makedirs(SAVE_NETWORK_PATH)
self.sess.run(tf.global_variables_initializer())
if LOAD_NETWOROK:
self._load_netowrk()
self.sess.run(self.update_target_network)
def _build_network(self):
model = Sequential()
model.add(Conv2D(32, 8, strides=(4, 4), activation='relu', input_shape=[self.input_shape[0], self.input_shape[1], self.input_shape[2]]))
model.add(Conv2D(64, 4, strides=(2, 2), activation='relu'))
model.add(Conv2D(64, 3, strides=(1, 1), activation='relu'))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(self.nb_actions))
state = tf.placeholder(tf.float32, [None, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
q_vals = model(state)
return state, q_vals, model
def _build_train_op(self, network_weights):
a = tf.placeholder(tf.int64, [None])
y = tf.placeholder(tf.float32, [None])
# convert into to one hot
a_one_hot = tf.one_hot(a, self.nb_actions, 1.0, 0.)
q_value = tf.reduce_sum(tf.multiply(self.q_vals, a_one_hot), reduction_indices=1)
# clip the error
error = tf.abs(y - q_value)
clipped = tf.clip_by_value(error, 0.0, 1.0)
linear = error - clipped
loss = tf.reduce_mean(0.5 * tf.square(clipped) + linear)
rms_optimizer = tf.train.RMSPropOptimizer(LEARNING_RATE, momentum=MOMENTUM, epsilon=MIN_GRAD)
grads_update = rms_optimizer.minimize(loss, var_list=network_weights)
return a, y, loss, grads_update
def get_initial_state(self, observation, last_observation):
processed_observation = np.maximum(observation, last_observation)
processed_observation = np.uint8(resize(rgb2gray(processed_observation), (FRAME_WIDTH, FRAME_HEIGHT)) * 255)
state = [processed_observation for _ in range(STATE_LENGTH)]
return np.stack(state, axis=0)
def _build_summary(self):
# Parameters used for summary
self.total_reward = 0
self.total_q_max = 0
self.total_loss = 0
self.duration = 0
self.episode = 0
episode_total_reward = tf.Variable(0.)
tf.summary.scalar(ENV_NAME + '/Total Reward/Episode', episode_total_reward)
episode_avg_max_q = tf.Variable(0.)
tf.summary.scalar(ENV_NAME + '/Average Max Q/Episode', episode_avg_max_q)
episode_duration = tf.Variable(0.)
tf.summary.scalar(ENV_NAME + '/Duration/Episode', episode_duration)
episode_avg_loss = tf.Variable(0.)
tf.summary.scalar(ENV_NAME + '/Average Loss/Episode', episode_avg_loss)
summary_vars = [episode_total_reward, episode_avg_max_q, episode_duration, episode_avg_loss]
summary_placeholders = [tf.placeholder(tf.float32) for _ in range(len(summary_vars))]
update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]
summary_op = tf.summary.merge_all()
return summary_placeholders, update_ops, summary_op
def load_network(self):
checkpoint = tf.train.get_checkpoint_state(SAVE_NETWORK_PATH)
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
print('Successfully loaded: ' + checkpoint.model_checkpoint_path)
else:
print('Training new network...')
def get_action_test(self, state):
return np.argmax(self.q_values.eval(feed_dict={self.s: [np.float32(state / 255.0)]}))
def get_action(self, state):
if self.epsilon >= random.random() or self.t < INITIAL_REPLAY_SIZE:
action = random.randrange(self.nb_actions)
else:
action = np.argmax(self.q_values.eval(feed_dict={self.s: [np.float32(state / 255.0)]}))
# Anneal epsilon linearly over time
if self.epsilon > self.final_epsilon and self.t >= INITIAL_REPLAY_SIZE:
self.epsilon -= self.epsilon_step
return action
def _train(self):
s_batch = []
a_batch = []
r_batch = []
s__batch = []
t_batch = []
y_batch = []
# sample from memory
minibatch = random.sample(self.replay_memory, BATCH_SIZE)
for data in minibatch:
s_batch.append(data[0])
a_batch.append(data[1])
r_batch.append(data[2])
s__batch.append(data[3])
t_batch.append(data[4])
# bool to int
t_batch = np.array(t_batch) + 0
next_actions_batch = np.argmax(self.q_vals.eval(feed_dict={self.s: s__batch}), axis=1)
target_q_values_batch = self.q_vals_t.eval(feed_dict={self.s_t: s__batch})
for i in range(len(minibatch)):
y_batch.append(r_batch[i] + (1 - t_batch[i]) * GAMMA * target_q_values_batch[i][next_actions_batch[i]])
loss, _ = self.sess.run([self.loss, self.grads_update], feed_dict={
self.s: np.float32(np.array(s_batch) / 255.0),
self.a: a_batch,
self.y: y_batch
})
self.total_loss += loss
def add_memory(self, s, a, r, t, s_):
next_state = np.append(s[1:, :, :], s_, axis=0)
# clip reward into -1,1
reward = np.clip(r, -1, 1)
# add into replay memory
self.replay_memory.append((s, a, next_state, t))
if len(self.replay_memory) > NUM_REPLAY_MEMORY :
self.replay_memory.popleft()
if self.t > INITIAL_REPLAY_SIZE:
# train network
if self.t % TRAIN_INTERVAL == 0:
self._train()
# update target network
if self.t % TARGET_UPDATE_INTERVAL == 0:
self.sess.run(self.update_target_network)
# save network
if self.t % SAVE_INTERVAL == 0:
s_path = self.saver.save(self.sess, SAVE_NETWORK_PATH, global_step=self.t)
print('saved network')
self.total_reward += reward
self.total_q_max += np.max(self.q_vals.eval(feed_dict={self.s: [np.float32(s / 255.0)]}))
self.duration += 1
if t:
# write summary
if self.t >= INITIAL_REPLAY_SIZE:
stats = [self.total_reward, self.total_q_max/float(self.duration),
self.duration, self.total_loss/ (float(self.duration)/ float(TRAIN_INTERVAL))]
for i in range(len(stats)):
self.sess.run(self.update_ops[i], feed_dict={self.summary_placeholders[i]: float(stats[i])})
summary_str = self.sess.run(self.summary_op)
self.summary_writer.add_summary(summary_str, self.episode + 1)
self.total_reward = 0
self.total_q_max = 0
self.total_loss = 0
self.duration = 0
self.episode += 1
self.t += 1
return next_state
|
mit
| -7,813,668,082,382,251,000 | 35.371212 | 144 | 0.611915 | false |
gstarnberger/paasta
|
paasta_tools/monitoring/check_classic_service_replication.py
|
1
|
8785
|
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import pysensu_yelp
import requests
from sensu_plugin import SensuPluginCheck
from service_configuration_lib import read_services_configuration
from paasta_tools.monitoring.check_synapse_replication import (
check_replication,
)
from paasta_tools.monitoring.config_providers import (
extract_monitoring_info
)
from paasta_tools.monitoring.replication_utils import (
get_replication_for_services
)
from paasta_tools.utils import load_system_paasta_config
def read_key(key):
with open("/nail/etc/{0}".format(key)) as fd:
return fd.read().strip()
def report_event(event_dict):
assert event_dict['team'] is not None
pysensu_yelp.send_event(**event_dict)
def do_replication_check(service, monitoring_config, service_replication):
"""Do a replication check on the provided service and generate
notification events based on the information in monitoring_config and
service_replication. Note that the caller must provide replication data
:param service: The name of the service to send an event for
:param monitoring_config: A dictionary conforming to the mandatory
monitoring keys (as defined by extract_replication_info) and
optionally providing additional keys:
- runbook ("no runbook"): The runbook to refer oncall members to
- tip ("no tip"): A tip for oncall members
- page (false): Whether to page the provided team on failure
- alert_after ("0s"): How many minutes before going critical
- realert_every (-1): How many events before you trigger a realert
-1 indicates an exponential backoff
- extra.replication.key ("habitat"): The file in /nail/etc to inspect
to figure out which value to lookup in map
- extra.replication.default (1): The default number of instances to
check for
- extra.replication.map ({}): A lookup that maps the replication keys to
the appropriate minimum replication value
:param service_replication: An int that represents the present replication. The default
behavior is to send emails to a team if their service reaches 0 replicas, although teams
can fine tune this to their needs
:returns: A dictionary that conforms to the expected sensu event API. Note that this function
does NOT send it to Sensu
"""
replication_config = monitoring_config.get('extra', {}).get(
'replication', {})
replication_key = replication_config.get('key', 'habitat')
replication_default = replication_config.get('default', 1)
replication_map = replication_config.get('map', {})
try:
goal_replication = replication_map[read_key(replication_key)]
except (IOError, KeyError):
# Either the /nail/etc/{key} file didn't exist or the result didn't
# appear in the replication_map, either way use the default
goal_replication = replication_default
warn_range = (goal_replication, sys.maxint)
crit_range = warn_range
status_code, message = check_replication(service,
service_replication,
warn_range, crit_range)
return {
'name': "replication_{0}".format(service),
'status': status_code,
'output': message,
'team': monitoring_config['team'],
'notification_email': monitoring_config['notification_email'],
'runbook': monitoring_config['runbook'] or 'no runbook',
'tip': monitoring_config['tip'] or 'no tip',
'page': monitoring_config['page'] or False,
'check_every': '1m',
'alert_after': monitoring_config['alert_after'] or '0s',
'realert_every': monitoring_config['realert_every'] or -1,
}
def extract_replication_info(service_config):
"""Extract monitoring information from yelpsoa-configs
To be monitored a service *must* supply a team.
Mandatory keys:
team: The team to send pages to
notification_email: The email to send emails to
service_type: Must be "classic" for this check to run
:param service_config: The configuration dictionary for the service
:returns (do_monitoring, monitoring_config): Which is a tuple of a bool
and a monitoring dictionary that has keys specified by
config_providers.monitoring_keys
"""
monitoring_config = extract_monitoring_info('classic', service_config)
# If we do not meet required information, do nothing
if not (monitoring_config['team'] and
monitoring_config.get('service_type') == 'classic'):
return False, {}
return True, monitoring_config
class ClassicServiceReplicationCheck(SensuPluginCheck):
log = logging.getLogger(__name__)
log.addHandler(logging.StreamHandler(sys.stdout))
def setup_logging(self):
if self.options.debug:
self.log.setLevel(logging.DEBUG)
else:
self.log.setLevel(logging.WARNING)
def setup(self):
self.parser.add_argument('-d', '--debug', default=False,
action='store_true',
help='Turn on debug output')
def get_service_replication(self, all_services, synapse_host, synapse_port, synapse_haproxy_url_format):
# Get the replication data once for performance
synapse_host_port = "%s:%s" % (synapse_host, synapse_port)
self.log.debug(
"Gathering replication information from {0}".
format(synapse_host_port))
service_replication = {}
try:
service_replication = get_replication_for_services(
synapse_host,
synapse_port,
synapse_haproxy_url_format,
['%s.main' % name for name in all_services]
)
except requests.exceptions.ConnectionError:
self.log.error(
'Failed to connect synapse haproxy on {0}'.
format(synapse_host_port))
self.critical(
'Failed to connect synapse haproxy on {0}'.
format(synapse_host_port))
except Exception as e:
self.log.error(
'Unable to collect replication information on {0}: {1}'.
format(synapse_host_port, e.message))
self.critical(
'Unable to collect replication information: {0}'.
format(e.message))
self.log.debug(
"Finished gathering replication information from {0}".
format(synapse_host_port))
return service_replication
def run(self):
self.setup_logging()
all_service_config = read_services_configuration()
system_config = load_system_paasta_config()
service_replication = self.get_service_replication(
all_services=all_service_config.keys(),
synapse_host=system_config.get_default_synapse_host(),
synapse_port=system_config.get_synapse_port(),
synapse_haproxy_url_format=system_config.get_synapse_haproxy_url_format(),
)
checked_services = []
for service, service_config in all_service_config.iteritems():
do_monitoring, monitoring_config = extract_replication_info(
service_config
)
if do_monitoring:
self.log.debug("Checking {0}".format(service))
replication = service_replication.get('%s.main' % service, 0)
event = do_replication_check(service, monitoring_config,
replication)
checked_services.append(service)
self.log.debug("Result for {0}: {1}".format(service,
event['output']))
report_event(event)
else:
self.log.debug("Not checking {0}".format(service))
self.ok("Finished checking services: {0}".format(checked_services))
if __name__ == "__main__":
# The act of making the object calls ends up calling the run method via
# SensuPluginCheck
check = ClassicServiceReplicationCheck()
|
apache-2.0
| -4,242,378,001,048,526,000 | 39.671296 | 108 | 0.64189 | false |
zenr/ippy
|
segmentation/shanbhag.py
|
1
|
1545
|
import numpy as np
import scipy.misc
def shanbhag(imgdata) :
"""Returns a binary segmentation threshold using the Shanbhag algorithm
given imgdata as a grayscale image of type numpy.ndarray
"""
# get normalized histogram
hist, bins = np.histogram(imgdata,range(0,257),density=True)
# calculate cumulative ditribution function (and inverse)
P1 = np.cumsum(hist) # cdf
P2 = 1 - P1 # inverse cdf
# find first and last non-zero bins
f = np.nonzero(P1)
first_bin = f[0][0]
last_bin = f[0][-1]
# initialize minimum entropy to +infinity
min_ent = float("inf")
for i in range(first_bin, last_bin) :
# calculate background entropy
ent_back = 0
term = 0.5 / P1[i]
for j in range(1, i) :
ent_back -= hist[j] * np.log(1 - term*P1[j-1])
ent_back *= term
# calculate foreground entropy
ent_fore = 0
term = 0.5 / P2[i]
for j in range(i+1, 256) :
ent_fore -= hist[j] * np.log(1 - term*P2[j-1])
ent_fore *= term
# set threshold to value where difference in entropy is minimal
tot_ent = abs(ent_back - ent_fore)
if (tot_ent < min_ent) :
min_ent = tot_ent
threshold = i
return threshold
# test case
from scipy.misc.pilutil import Image
a = Image.open('Rat_Hippocampal_Neuron.png').convert('L')
adata = scipy.misc.fromimage(a)
outimg = scipy.misc.toimage(adata > shanbhag(adata))
outimg.show()
|
gpl-3.0
| 4,705,636,846,058,423,000 | 28.711538 | 75 | 0.586408 | false |
codebox/star-charts
|
coord_calc.py
|
1
|
4668
|
# -*- coding: utf-8 -*-
from math import sin, cos, degrees, radians, pi
class CoordCalc:
def __init__(self, star_data_list, area, diagram_size):
self.star_data_list = star_data_list
self.area = area
self.center_ra_angle = self._ra_to_angle((area.ra_min + area.ra_max)/2)
if area.ra_max - area.ra_min >= 12:
self.center_dec_angle = self._dec_to_angle(90 if abs(area.dec_min) < abs(area.dec_max) else -90)
else:
self.center_dec_angle = self._dec_to_angle((area.dec_min + area.dec_max)/2)
self.diagram_size = diagram_size
def _ra_to_angle(self, ra):
# convert right-ascension (0 -> 24) into angle (0 -> 2π)
return pi * 2 * (1 - ra / 24)
def _dec_to_angle(self, dec):
# convert declination (-90 -> +90) into angle (-π/2 -> +π/2)
return radians(dec)
def _populate_angles(self):
for star_data in self.star_data_list.data:
star_data.ra_angle = self._ra_to_angle(star_data.ra)
star_data.dec_angle = self._dec_to_angle(star_data.dec)
def _angle_to_xy(self, ra_angle, dec_angle):
# http://www.projectpluto.com/project.htm
delta_ra = ra_angle - self.center_ra_angle
x = cos(dec_angle) * sin(delta_ra)
y = sin(dec_angle) * cos(self.center_dec_angle) - cos(dec_angle) * cos(delta_ra) * sin(self.center_dec_angle)
return x,y
def _populate_xy(self):
for star_data in self.star_data_list.data:
star_data.x, star_data.y = self._angle_to_xy(star_data.ra_angle, star_data.dec_angle)
def _offset_and_scale_xy(self):
min_x = min([sd.x for sd in self.star_data_list.data])
min_y = min([sd.y for sd in self.star_data_list.data])
max_x = max([sd.x for sd in self.star_data_list.data])
max_y = max([sd.y for sd in self.star_data_list.data])
x_range = max_x - min_x
y_range = max_y - min_y
max_range = max(x_range, y_range)
self.magnification = self.diagram_size / max_range
def offset_and_scale_x(x):
return (x - min_x) * self.magnification
def offset_and_scale_y(y):
return (y - min_y) * self.magnification
def offset_and_scale(star_data):
star_data.x = offset_and_scale_x(star_data.x)
star_data.y = offset_and_scale_y(star_data.y)
self.star_data_list.min_x = offset_and_scale_x(min_x)
self.star_data_list.min_y = offset_and_scale_y(min_y)
self.star_data_list.max_x = offset_and_scale_x(max_x)
self.star_data_list.max_y = offset_and_scale_y(max_y)
self.offset_and_scale_x = offset_and_scale_x
self.offset_and_scale_y = offset_and_scale_y
list(map(offset_and_scale, self.star_data_list.data))
def process(self):
self._populate_angles()
self._populate_xy()
self._offset_and_scale_xy()
def _ra_dec_to_x_y(self, ra, dec):
ra_angle = self._ra_to_angle(ra)
dec_angle = self._dec_to_angle(dec)
base_x, base_y = self._angle_to_xy(ra_angle, dec_angle)
return self.offset_and_scale_x(base_x), self.offset_and_scale_y(base_y)
def calc_ra_curve(self, ra, steps):
points = []
dec_min = self.area.dec_min
dec_max = self.area.dec_max
dec_step = (dec_max - dec_min) / steps
for i in range(steps+1):
x, y = self._ra_dec_to_x_y(ra, dec_min + dec_step * i)
points.append((x, y))
return points
def calc_dec_curve(self, dec, steps):
points = []
ra_min = self.area.ra_min
ra_max = self.area.ra_max
ra_step = (ra_max - ra_min) / steps
for i in range(steps+1):
x, y = self._ra_dec_to_x_y(ra_min + ra_step * i, dec)
points.append((x,y))
return points
def calc_curves(self, ra_steps=100, dec_steps=100):
curves = []
curves.append(self.calc_ra_curve(self.area.ra_min, ra_steps))
ra = round(self.area.ra_min)
while ra < self.area.ra_max:
if ra > self.area.ra_min:
curves.append(self.calc_ra_curve(ra, ra_steps))
ra += 1
curves.append(self.calc_ra_curve(self.area.ra_max, ra_steps))
curves.append(self.calc_dec_curve(self.area.dec_min, dec_steps))
dec = round(self.area.dec_min / 10) * 10
while dec < self.area.dec_max:
if dec > self.area.dec_min:
curves.append(self.calc_dec_curve(dec, dec_steps))
dec += 10
curves.append(self.calc_dec_curve(self.area.dec_max, dec_steps))
return curves
|
mit
| -2,826,641,857,710,505,000 | 35.732283 | 117 | 0.572347 | false |
eort/OpenSesame
|
libqtopensesame/widgets/qtitem_splitter.py
|
2
|
1578
|
#-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.py3compat import *
from qtpy import QtCore, QtWidgets
from libqtopensesame.misc.base_subcomponent import base_subcomponent
class qtitem_splitter(base_subcomponent, QtWidgets.QSplitter):
"""
desc:
Implements a splitter for the edit and script view in an item tab. This
custom is mostly necessary, because the default QSplitter.sizeHint()
is too large.
"""
def __init__(self, item):
"""
desc:
Constructor.
item:
desc: The item.
type: qtitem
"""
super(qtitem_splitter, self).__init__(QtCore.Qt.Vertical,
item.main_window)
self.item = item
self.setup(item.main_window)
self.addWidget(self.item._edit_widget)
self.addWidget(self.item._script_widget)
def minimumSizeHint(self):
"""
returns:
type: QSize
"""
return QtCore.QSize(100, 100)
def sizeHint(self):
"""
returns:
type: QSize
"""
return QtCore.QSize(100, 100)
|
gpl-3.0
| -4,027,054,382,903,953,000 | 22.205882 | 73 | 0.727503 | false |
bolidozor/RTbolidozor
|
handlers/__init__.py
|
1
|
2316
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import MySQLdb as mdb
import pymysql.cursors
import os
import tornado
from requests_oauthlib import OAuth2Session
import smtplib
#from email.MIMEMultipart import MIMEMultipart
#from email.MIMEBase import MIMEBase
#from email.MIMEText import MIMEText
#from email.Utils import COMMASPACE, formatdate
#from email import Encoders
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
from email import encoders
import os
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
#print self.options()
#print help(self.options())
print("----------------------")
login = self.get_secure_cookie("login")
token = self.get_secure_cookie("token")
if not login:
return None
else:
return login
def get_user(self):
login = self.get_secure_cookie("login")
if not login:
return None
else:
return login
def sendMail(to, subject = "MLABvo", text = "No content"):
message="""From: MLAB distributed measurement systems <dms@mlab.cz>
To: %s
MIME-Version: 1.0
Content-type: text/html
Subject: %s
""" %(to, subject)
message += text
print("----- email")
print(to)
print(message)
print("-----end")
smtp = smtplib.SMTP('localhost')
smtp.sendmail("MLAB distributed measurement systems <dms@mlab.cz>", to, message )
smtp.close()
def _sql(query, read=False, db="MLABvo"):
#print "#>", query
connection = pymysql.connect(host="localhost", user="root", passwd="root", db=db, use_unicode=True, charset="utf8", cursorclass=pymysql.cursors.DictCursor)
try:
cursorobj = connection.cursor()
result = None
cursorobj.execute(query)
result = cursorobj.fetchall()
if not read:
connection.commit()
except Exception as e:
print("Err", e)
connection.close()
#print result
return result
def wwwCleanName(string):
return ''.join( c for c in string if c not in '?:!/;-_#$%^!@., (){}[]' )
def loged():
pass
|
gpl-3.0
| -5,035,603,752,302,982,000 | 26.903614 | 163 | 0.617444 | false |
ityaptin/ceilometer
|
ceilometer/tests/unit/image/test_glance.py
|
1
|
5111
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import fixture as fixture_config
from ceilometer.agent import manager
from ceilometer.image import glance
import ceilometer.tests.base as base
IMAGE_LIST = [
type('Image', (object,),
{u'status': u'active',
u'tags': [],
u'kernel_id': u'fd24d91a-dfd5-4a3c-b990-d4563eb27396',
u'container_format': u'ami',
u'min_ram': 0,
u'ramdisk_id': u'd629522b-ebaa-4c92-9514-9e31fe760d18',
u'updated_at': u'2016-06-20T13: 34: 41Z',
u'visibility': u'public',
u'owner': u'6824974c08974d4db864bbaa6bc08303',
u'file': u'/v2/images/fda54a44-3f96-40bf-ab07-0a4ce9e1761d/file',
u'min_disk': 0,
u'virtual_size': None,
u'id': u'fda54a44-3f96-40bf-ab07-0a4ce9e1761d',
u'size': 25165824,
u'name': u'cirros-0.3.4-x86_64-uec',
u'checksum': u'eb9139e4942121f22bbc2afc0400b2a4',
u'created_at': u'2016-06-20T13: 34: 40Z',
u'disk_format': u'ami',
u'protected': False,
u'schema': u'/v2/schemas/image'}),
type('Image', (object,),
{u'status': u'active',
u'tags': [],
u'container_format': u'ari',
u'min_ram': 0,
u'updated_at': u'2016-06-20T13: 34: 38Z',
u'visibility': u'public',
u'owner': u'6824974c08974d4db864bbaa6bc08303',
u'file': u'/v2/images/d629522b-ebaa-4c92-9514-9e31fe760d18/file',
u'min_disk': 0,
u'virtual_size': None,
u'id': u'd629522b-ebaa-4c92-9514-9e31fe760d18',
u'size': 3740163,
u'name': u'cirros-0.3.4-x86_64-uec-ramdisk',
u'checksum': u'be575a2b939972276ef675752936977f',
u'created_at': u'2016-06-20T13: 34: 37Z',
u'disk_format': u'ari',
u'protected': False,
u'schema': u'/v2/schemas/image'}),
type('Image', (object,),
{u'status': u'active',
u'tags': [],
u'container_format': u'aki',
u'min_ram': 0,
u'updated_at': u'2016-06-20T13: 34: 35Z',
u'visibility': u'public',
u'owner': u'6824974c08974d4db864bbaa6bc08303',
u'file': u'/v2/images/fd24d91a-dfd5-4a3c-b990-d4563eb27396/file',
u'min_disk': 0,
u'virtual_size': None,
u'id': u'fd24d91a-dfd5-4a3c-b990-d4563eb27396',
u'size': 4979632,
u'name': u'cirros-0.3.4-x86_64-uec-kernel',
u'checksum': u'8a40c862b5735975d82605c1dd395796',
u'created_at': u'2016-06-20T13: 34: 35Z',
u'disk_format': u'aki',
u'protected': False,
u'schema': u'/v2/schemas/image'}),
]
class TestImagePollsterPageSize(base.BaseTestCase):
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def setUp(self):
super(TestImagePollsterPageSize, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.manager = manager.AgentManager(0, self.CONF)
self.pollster = glance.ImageSizePollster()
def test_image_pollster(self):
image_samples = list(
self.pollster.get_samples(self.manager, {}, resources=IMAGE_LIST))
self.assertEqual(3, len(image_samples))
self.assertEqual('image.size', image_samples[0].name)
self.assertEqual(25165824, image_samples[0].volume)
self.assertEqual('6824974c08974d4db864bbaa6bc08303',
image_samples[0].project_id)
self.assertEqual('fda54a44-3f96-40bf-ab07-0a4ce9e1761d',
image_samples[0].resource_id)
class TestImagePageSize(base.BaseTestCase):
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def setUp(self):
super(TestImagePageSize, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.manager = manager.AgentManager(0, self.CONF)
self.pollster = glance.ImagePollster()
def test_image_pollster(self):
image_samples = list(
self.pollster.get_samples(self.manager, {}, resources=IMAGE_LIST))
self.assertEqual(3, len(image_samples))
self.assertEqual('image', image_samples[0].name)
self.assertEqual(1, image_samples[0].volume)
self.assertEqual('6824974c08974d4db864bbaa6bc08303',
image_samples[0].project_id)
self.assertEqual('fda54a44-3f96-40bf-ab07-0a4ce9e1761d',
image_samples[0].resource_id)
|
apache-2.0
| -5,967,410,848,596,420,000 | 40.552846 | 78 | 0.615144 | false |
valerymelou/cookiecutter-django-gulp
|
{{cookiecutter.project_slug}}/config/settings/production.py
|
1
|
5098
|
# -*- coding: utf-8 -*-
"""
Production Configurations
- Use Redis for cache
"""
from __future__ import absolute_import, unicode_literals
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/1.9/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/ja/1.9/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['{{cookiecutter.domain_name}}'])
# END SITE CONFIGURATION
INSTALLED_APPS += ['gunicorn']
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='{{cookiecutter.project_name}} <noreply@{{cookiecutter.domain_name}}>')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[{{cookiecutter.project_name}}] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader']),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
# CACHING
# ------------------------------------------------------------------------------
REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0)
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_LOCATION,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Custom Admin URL, use {% raw %}{% url 'admin:index' %}{% endraw %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
|
bsd-3-clause
| -3,370,520,679,117,669,400 | 37.330827 | 117 | 0.557081 | false |
TatsuyaOGth/PepperScripts
|
LittlePerformance/module/_tmp/random_move.py
|
1
|
1596
|
import time
import math
import random
from naoqi import ALProxy
#IP = '127.0.0.1'
#port = 49951
IP = '192.168.3.9'
port = 9559
motion_proxy = ALProxy('ALMotion',IP,port)
part = 'Body'
time.sleep(3.0)
for i in range(200):
motion_proxy.setStiffnesses(part, 1.0)
body_names = motion_proxy.getBodyNames(part)
if i !=0:
third_name = first_name
else :
third_name = random.choice(body_names)
first_name = random.choice(body_names)
while True:
if first_name != third_name:
break
else:
first_name = random.choice(body_names)
second_name = random.choice(body_names)
while True:
if (second_name != first_name) and (second_name != third_name):
break
else:
second_name = random.choice(body_names)
limits = motion_proxy.getLimits(first_name)[0]
#limits of the specific joint
first_target_angle = (limits[1]-limits[0]) * random.random() + limits[0]
limits = motion_proxy.getLimits(second_name)[0]
second_target_angle = (limits[1]-limits[0]) * random.random() + limits[0]
limits = motion_proxy.getLimits(third_name)[0]
third_target_angle = (limits[1]-limits[0]) * random.random() + limits[0]
fractionMaxSpeed = 0.99
names = [first_name,second_name,third_name]
target_angles = [first_target_angle,second_target_angle,third_target_angle]
#motion_proxy.changeAngles(names, changes, fractionMaxSpeed) #adds the angle to
motion_proxy.setAngles(names, target_angles, fractionMaxSpeed)
time.sleep(0.05)
# print 'joint name: '+ names
# print math.degrees(motion_proxy.getAngles(names,True)[0])
# print ''
motion_proxy.setStiffnesses(part, 0.0)
|
cc0-1.0
| -5,709,509,332,995,824,000 | 20.863014 | 81 | 0.703634 | false |
terna/SLAPP3
|
6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/start.py
|
1
|
4599
|
# start 6 objectSwarmObserverAgentsAESOP.py
# 'project' is by default both the name of the application and of the subfolder
# that contains its code; the subfolder is supposed to be placed within the
# SLAPP tree
# the folder can can be placed outside the SLAPP tree if we place a file
# project.txt in the folder "6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX"
# the file has to contain the path and the name of the folder of the project
def runSLAPP():
global start_pyDir
print("\nSLAPP v3.3.10 build 20201030\n")
import os
confirm = "n"
found = False
start_pyDir = os.getcwd()
names1 = os.listdir("./")
names2 = os.listdir("../")
name = False
if "project.txt" in names1: #main folder
name = "project.txt"
if "project.txt" in names2: # folder 6...
name = "../project.txt"
if name:
currentProject = open(name, "r")
pathAndProject = currentProject.readline()
if pathAndProject[-1] == "\n" or pathAndProject[-1] == "\r":
pathAndProject = pathAndProject[0:-1]
if pathAndProject[-1] == "\n" or pathAndProject[-1] == "\r":
pathAndProject = pathAndProject[0:-1]
# -1 means: last character
# [0:-1] means: the string but the last caracter
# the last caracter is eliminated in the given case (twice) to avoid
# interferences between the control characters within the file and the
# path definition
print("path and project = " + pathAndProject)
confirm = input("do you confirm? ([y]/n): ")
if confirm == "y" or confirm == "Y" or confirm == "":
found = True
currentProject.close()
if confirm == "y" or confirm == "Y" or confirm == "":
project = pathAndProject
else:
p=None
while p==None:
project = input("Project name? ")
if project not in names1:
print("Project " + project + " not found")
else: p=project
else:
found = True
project = "./" + project
if found:
import sys
sys.path.append("./$$slapp$$")
if confirm != "y" and confirm != "Y" and confirm != "":
sys.path.append(project)
else:
sys.path.append(pathAndProject)
import commonVar as common
# if kernelUsesNumpyRandom is defined (True or False) in commonVar.py
# of the project, no other action is requested
try:
common.kernelUsesNumpyRandom
# if the definition is missing, we do it here
except BaseException:
common.kernelUsesNumpyRandom = False
#print("kernelUsesNumpyRandom =", common.kernelUsesNumpyRandom)
import Tools as tl
import graphicControl as gc
gc.graphicControl()
# print common.graphicStatus
# project reported in common for possible uses in other SLAPP segments or
# applications
# it contains (i) or the path (relative to the start.py position) of a project
# existing within the SLAPP hierarchy, or (ii) the absolute path to a project
# placed outside
common.project = project
common.IPython = tl.checkRunningIn()
if common.IPython:
print("running in IPython")
else:
print("running in Python")
import ObserverSwarm as obs
# if debug is defined (True or False) in commonVar.py of the project, no
# other action is requested
try:
common.debug
# if the definition is missing, we do it here
except BaseException:
common.debug = False # if debug il True a large part of the try/except
# structures will be bypassed, so the errors will
# be managed directly by the Python interpreter
# this choice can be useful when you build a new project
# and as an expert user you want to check the errors
# in a basic way
print("debug =", common.debug)
observerSwarm = obs.ObserverSwarm(project)
common.pro = project # to be used within the oActions.py and
# mActions.py extensions
# create objects
observerSwarm.buildObjects()
# create actions
observerSwarm.buildActions()
# run
observerSwarm.run()
if common.IPython:
print(
"End of the run! TO RUN AGAIN IN JUPYTER REMEMBER TO RESTART THE KERNEL")
# running alone
if __name__ == "__main__":
runSLAPP()
|
cc0-1.0
| -7,428,399,567,447,264,000 | 33.320896 | 89 | 0.599043 | false |
darkopevec/kivy
|
kivy/uix/widget.py
|
1
|
46002
|
'''
Widget class
============
The :class:`Widget` class is the base class required for creating Widgets.
This widget class was designed with a couple of principles in mind:
* *Event Driven*
Widget interaction is built on top of events that occur. If a property
changes, the widget can respond to the change in the 'on_<propname>'
callback. If nothing changes, nothing will be done. That's the main
goal of the :class:`~kivy.properties.Property` class.
* *Separation Of Concerns (the widget and its graphical representation)*
Widgets don't have a `draw()` method. This is done on purpose: The idea
is to allow you to create your own graphical representation outside the
widget class.
Obviously you can still use all the available properties to do that, so
that your representation properly reflects the widget's current state.
Every widget has its own :class:`~kivy.graphics.Canvas` that you
can use to draw. This separation allows Kivy to run your
application in a very efficient manner.
* *Bounding Box / Collision*
Often you want to know if a certain point is within the bounds of your
widget. An example would be a button widget where you only want to
trigger an action when the button itself is actually touched.
For this, you can use the :meth:`~Widget.collide_point` method, which
will return True if the point you pass to it is inside the axis-aligned
bounding box defined by the widget's position and size.
If a simple AABB is not sufficient, you can override the method to
perform the collision checks with more complex shapes, e.g. a polygon.
You can also check if a widget collides with another widget with
:meth:`~Widget.collide_widget`.
We also have some default values and behaviors that you should be aware of:
* A :class:`Widget` is not a :class:`~kivy.uix.layout.Layout`: it will not
change the position or the size of its children. If you want control over
positioning or sizing, use a :class:`~kivy.uix.layout.Layout`.
* The default size of a widget is (100, 100). This is only changed if the
parent is a :class:`~kivy.uix.layout.Layout`.
For example, if you add a :class:`Label` inside a
:class:`Button`, the label will not inherit the button's size or position
because the button is not a *Layout*: it's just another *Widget*.
* The default size_hint is (1, 1). If the parent is a :class:`Layout`, then the
widget size will be the parent layout's size.
* :meth:`~Widget.on_touch_down`, :meth:`~Widget.on_touch_move`,
:meth:`~Widget.on_touch_up` don't do any sort of collisions. If you want to
know if the touch is inside your widget, use :meth:`~Widget.collide_point`.
Using Properties
----------------
When you read the documentation, all properties are described in the format::
<name> is a <property class> and defaults to <default value>.
e.g.
:attr:`~kivy.uix.label.Label.text` is a
:class:`~kivy.properties.StringProperty` and defaults to ''.
If you want to be notified when the pos attribute changes, i.e. when the
widget moves, you can bind your own callback function like this::
def callback_pos(instance, value):
print('The widget', instance, 'moved to', value)
wid = Widget()
wid.bind(pos=callback_pos)
Read more about :doc:`/api-kivy.properties`.
Basic drawing
-------------
Widgets support a range of drawing instructions that you can use to customize
the look of your widgets and layouts. For example, to draw a background image
for your widget, you can do the following:
.. code-block:: python
def redraw(self, args):
self.bg_rect.size = self.size
self.bg_rect.pos = self.pos
widget = Widget()
with widget.canvas:
widget.bg_rect = Rectangle(source="cover.jpg", pos=self.pos, \
size=self.size)
widget.bind(pos=redraw, size=redraw)
To draw a background in kv:
.. code-block:: kv
Widget:
canvas:
Rectangle:
source: "cover.jpg"
size: self.size
pos: self.pos
These examples only scratch the surface. Please see the :mod:`kivy.graphics`
documentation for more information.
.. _widget-event-bubbling:
Widget touch event bubbling
---------------------------
When you catch touch events between multiple widgets, you often
need to be aware of the order in which these events are propagated. In Kivy,
events bubble up from the first child upwards through the other children.
If a widget has children, the event is passed through its children before
being passed on to the widget after it.
As the :meth:`~kivy.uix.widget.Widget.on_touch_up` method inserts widgets at
index 0 by default, this means the event goes from the most recently added
widget back to the first one added. Consider the following:
.. code-block:: python
box = BoxLayout()
box.add_widget(Label(text="a"))
box.add_widget(Label(text="b"))
box.add_widget(Label(text="c"))
The label with text "c" gets the event first, "b" second and "a" last. You can
reverse this order by manually specifying the index:
.. code-block:: python
box = BoxLayout()
box.add_widget(Label(text="a"), index=0)
box.add_widget(Label(text="b"), index=1)
box.add_widget(Label(text="c"), index=2)
Now the order would be "a", "b" then "c". One thing to keep in mind when using
kv is that declaring a widget uses the
:meth:`~kivy.uix.widget.Widget.add_widget` method for insertion. Hence, using
.. code-block:: kv
BoxLayout:
MyLabel:
text: "a"
MyLabel:
text: "b"
MyLabel:
text: "c"
would result in the event order "c", "b" then "a" as "c" was actually the last
added widget. It thus has index 0, "b" index 1 and "a" index 2. Effectively,
the child order is the reverse of its listed order.
This ordering is the same for the :meth:`~kivy.uix.widget.Widget.on_touch_move`
and :meth:`~kivy.uix.widget.Widget.on_touch_up` events.
In order to stop this event bubbling, a method can return `True`. This tells
Kivy the event has been handled and the event propagation stops. For example:
.. code-block:: python
class MyWidget(Widget):
def on_touch_down(self, touch):
If <some_condition>:
# Do stuff here and kill the event
return True
else:
return super(MyWidget, self).on_touch_down(touch)
This approach gives you good control over exactly how events are dispatched
and managed. Sometimes, however, you may wish to let the event be completely
propagated before taking action. You can use the
:class:`~kivy.clock.Clock` to help you here:
.. code-block:: python
class MyWidget(Label):
def on_touch_down(self, touch, after=False):
if after:
print "Fired after the event has been dispatched!"
else:
Clock.schedule_once(lambda dt: self.on_touch_down(touch, True))
return super(MyWidget, self).on_touch_down(touch)
Usage of :attr:`Widget.center`, :attr:`Widget.right`, and :attr:`Widget.top`
----------------------------------------------------------------------------
A common mistake when using one of the computed properties such as
:attr:`Widget.right` is to use it to make a widget follow its parent with a
KV rule such as `right: self.parent.right`. Consider, for example:
.. code-block:: kv
FloatLayout:
id: layout
width: 100
Widget:
id: wid
right: layout.right
The (mistaken) expectation is that this rule ensures that wid's right will
always be whatever layout's right is - that is wid.right and layout.right will
always be identical. In actual fact, this rule only says that "whenever
layout's `right` changes, wid's right will be set to that value". The
difference being that as long as `layout.right` doesn't change, `wid.right`
could be anything, even a value that will make them different.
Specifically, for the KV code above, consider the following example::
>>> print(layout.right, wid.right)
(100, 100)
>>> wid.x = 200
>>> print(layout.right, wid.right)
(100, 300)
As can be seen, initially they are in sync, however, when we change `wid.x`
they go out of sync because `layout.right` is not changed and the rule is not
triggered.
The proper way to make the widget follow its parent's right is to use
:attr:`Widget.pos_hint`. If instead of `right: layout.right` we did
`pos_hint: {'right': 1}`, then the widgets right will always be set to be
at the parent's right at each layout update.
'''
__all__ = ('Widget', 'WidgetException')
from kivy.event import EventDispatcher
from kivy.factory import Factory
from kivy.properties import (
NumericProperty, StringProperty, AliasProperty, ReferenceListProperty,
ObjectProperty, ListProperty, DictProperty, BooleanProperty)
from kivy.graphics import (
Canvas, Translate, Fbo, ClearColor, ClearBuffers, Scale)
from kivy.graphics.transformation import Matrix
from kivy.base import EventLoop
from kivy.lang import Builder
from kivy.context import get_current_context
from kivy.weakproxy import WeakProxy
from functools import partial
from itertools import islice
# References to all the widget destructors (partial method with widget uid as
# key).
_widget_destructors = {}
def _widget_destructor(uid, r):
# Internal method called when a widget is deleted from memory. the only
# thing we remember about it is its uid. Clear all the associated callbacks
# created in kv language.
del _widget_destructors[uid]
Builder.unbind_widget(uid)
class WidgetException(Exception):
'''Fired when the widget gets an exception.
'''
pass
class WidgetMetaclass(type):
'''Metaclass to automatically register new widgets for the
:class:`~kivy.factory.Factory`.
.. warning::
This metaclass is used by the Widget. Do not use it directly!
'''
def __init__(mcs, name, bases, attrs):
super(WidgetMetaclass, mcs).__init__(name, bases, attrs)
Factory.register(name, cls=mcs)
#: Base class used for Widget, that inherits from :class:`EventDispatcher`
WidgetBase = WidgetMetaclass('WidgetBase', (EventDispatcher, ), {})
class Widget(WidgetBase):
'''Widget class. See module documentation for more information.
:Events:
`on_touch_down`:
Fired when a new touch event occurs
`on_touch_move`:
Fired when an existing touch moves
`on_touch_up`:
Fired when an existing touch disappears
.. warning::
Adding a `__del__` method to a class derived from Widget with Python
prior to 3.4 will disable automatic garbage collection for instances
of that class. This is because the Widget class creates reference
cycles, thereby `preventing garbage collection
<https://docs.python.org/2/library/gc.html#gc.garbage>`_.
.. versionchanged:: 1.0.9
Everything related to event properties has been moved to the
:class:`~kivy.event.EventDispatcher`. Event properties can now be used
when contructing a simple class without subclassing :class:`Widget`.
.. versionchanged:: 1.5.0
The constructor now accepts on_* arguments to automatically bind
callbacks to properties or events, as in the Kv language.
'''
__metaclass__ = WidgetMetaclass
__events__ = ('on_touch_down', 'on_touch_move', 'on_touch_up')
_proxy_ref = None
def __init__(self, **kwargs):
# Before doing anything, ensure the windows exist.
EventLoop.ensure_window()
# Assign the default context of the widget creation.
if not hasattr(self, '_context'):
self._context = get_current_context()
no_builder = '__no_builder' in kwargs
if no_builder:
del kwargs['__no_builder']
on_args = {k: v for k, v in kwargs.items() if k[:3] == 'on_'}
for key in on_args:
del kwargs[key]
super(Widget, self).__init__(**kwargs)
# Create the default canvas if it does not exist.
if self.canvas is None:
self.canvas = Canvas(opacity=self.opacity)
# Apply all the styles.
if not no_builder:
Builder.apply(self, ignored_consts=self._kwargs_applied_init)
# Bind all the events.
if on_args:
self.bind(**on_args)
@property
def proxy_ref(self):
'''Return a proxy reference to the widget, i.e. without creating a
reference to the widget. See `weakref.proxy
<http://docs.python.org/2/library/weakref.html?highlight\
=proxy#weakref.proxy>`_ for more information.
.. versionadded:: 1.7.2
'''
_proxy_ref = self._proxy_ref
if _proxy_ref is not None:
return _proxy_ref
f = partial(_widget_destructor, self.uid)
self._proxy_ref = _proxy_ref = WeakProxy(self, f)
# Only f should be enough here, but it appears that is a very
# specific case, the proxy destructor is not called if both f and
# _proxy_ref are not together in a tuple.
_widget_destructors[self.uid] = (f, _proxy_ref)
return _proxy_ref
def __hash__(self):
return id(self)
@property
def __self__(self):
return self
#
# Collision
#
def collide_point(self, x, y):
'''
Check if a point (x, y) is inside the widget's axis aligned bounding
box.
:Parameters:
`x`: numeric
x position of the point (in window coordinates)
`y`: numeric
y position of the point (in window coordinates)
:Returns:
A bool. True if the point is inside the bounding box, False
otherwise.
.. code-block:: python
>>> Widget(pos=(10, 10), size=(50, 50)).collide_point(40, 40)
True
'''
return self.x <= x <= self.right and self.y <= y <= self.top
def collide_widget(self, wid):
'''
Check if another widget collides with this widget. This function
performs an axis-aligned bounding box intersection test by default.
:Parameters:
`wid`: :class:`Widget` class
Widget to collide with.
:Returns:
bool. True if the other widget collides with this widget, False
otherwise.
.. code-block:: python
>>> wid = Widget(size=(50, 50))
>>> wid2 = Widget(size=(50, 50), pos=(25, 25))
>>> wid.collide_widget(wid2)
True
>>> wid2.pos = (55, 55)
>>> wid.collide_widget(wid2)
False
'''
if self.right < wid.x:
return False
if self.x > wid.right:
return False
if self.top < wid.y:
return False
if self.y > wid.top:
return False
return True
#
# Default event handlers
#
def on_touch_down(self, touch):
'''Receive a touch down event.
:Parameters:
`touch`: :class:`~kivy.input.motionevent.MotionEvent` class
Touch received. The touch is in parent coordinates. See
:mod:`~kivy.uix.relativelayout` for a discussion on
coordinate systems.
:Returns: bool
If True, the dispatching of the touch event will stop.
If False, the event will continue to be dispatched to the rest
of the widget tree.
'''
if self.disabled and self.collide_point(*touch.pos):
return True
for child in self.children[:]:
if child.dispatch('on_touch_down', touch):
return True
def on_touch_move(self, touch):
'''Receive a touch move event. The touch is in parent coordinates.
See :meth:`on_touch_down` for more information.
'''
if self.disabled:
return
for child in self.children[:]:
if child.dispatch('on_touch_move', touch):
return True
def on_touch_up(self, touch):
'''Receive a touch up event. The touch is in parent coordinates.
See :meth:`on_touch_down` for more information.
'''
if self.disabled:
return
for child in self.children[:]:
if child.dispatch('on_touch_up', touch):
return True
def on_disabled(self, instance, value):
for child in self.children:
child.disabled = value
#
# Tree management
#
def add_widget(self, widget, index=0, canvas=None):
'''Add a new widget as a child of this widget.
:Parameters:
`widget`: :class:`Widget`
Widget to add to our list of children.
`index`: int, defaults to 0
Index to insert the widget in the list. Notice that the default
of 0 means the widget is inserted at the beginning of the list
and will thus be drawn on top of other sibling widgets. For a
full discussion of the index and widget hierarchy, please see
the :doc:`Widgets Programming Guide <guide/widgets>`.
.. versionadded:: 1.0.5
`canvas`: str, defaults to None
Canvas to add widget's canvas to. Can be 'before', 'after' or
None for the default canvas.
.. versionadded:: 1.9.0
.. code-block:: python
>>> from kivy.uix.button import Button
>>> from kivy.uix.slider import Slider
>>> root = Widget()
>>> root.add_widget(Button())
>>> slider = Slider()
>>> root.add_widget(slider)
'''
if not isinstance(widget, Widget):
raise WidgetException(
'add_widget() can be used only with instances'
' of the Widget class.')
widget = widget.__self__
if widget is self:
raise WidgetException(
'Widget instances cannot be added to themselves.')
parent = widget.parent
# Check if the widget is already a child of another widget.
if parent:
raise WidgetException('Cannot add %r, it already has a parent %r'
% (widget, parent))
widget.parent = parent = self
# Child will be disabled if added to a disabled parent.
if parent.disabled:
widget.disabled = True
canvas = self.canvas.before if canvas == 'before' else \
self.canvas.after if canvas == 'after' else self.canvas
if index == 0 or len(self.children) == 0:
self.children.insert(0, widget)
canvas.add(widget.canvas)
else:
canvas = self.canvas
children = self.children
if index >= len(children):
index = len(children)
next_index = 0
else:
next_child = children[index]
next_index = canvas.indexof(next_child.canvas)
if next_index == -1:
next_index = canvas.length()
else:
next_index += 1
children.insert(index, widget)
# We never want to insert widget _before_ canvas.before.
if next_index == 0 and canvas.has_before:
next_index = 1
canvas.insert(next_index, widget.canvas)
def remove_widget(self, widget):
'''Remove a widget from the children of this widget.
:Parameters:
`widget`: :class:`Widget`
Widget to remove from our children list.
.. code-block:: python
>>> from kivy.uix.button import Button
>>> root = Widget()
>>> button = Button()
>>> root.add_widget(button)
>>> root.remove_widget(button)
'''
if widget not in self.children:
return
self.children.remove(widget)
if widget.canvas in self.canvas.children:
self.canvas.remove(widget.canvas)
elif widget.canvas in self.canvas.after.children:
self.canvas.after.remove(widget.canvas)
elif widget.canvas in self.canvas.before.children:
self.canvas.before.remove(widget.canvas)
widget.parent = None
def clear_widgets(self, children=None):
'''
Remove all (or the specified) :attr:`~Widget.children` of this widget.
If the 'children' argument is specified, it should be a list (or
filtered list) of children of the current widget.
.. versionchanged:: 1.8.0
The `children` argument can be used to specify the children you
want to remove.
'''
if not children:
children = self.children
remove_widget = self.remove_widget
for child in children[:]:
remove_widget(child)
def export_to_png(self, filename, *args):
'''Saves an image of the widget and its children in png format at the
specified filename. Works by removing the widget canvas from its
parent, rendering to an :class:`~kivy.graphics.fbo.Fbo`, and calling
:meth:`~kivy.graphics.texture.Texture.save`.
.. note::
The image includes only this widget and its children. If you want
to include widgets elsewhere in the tree, you must call
:meth:`~Widget.export_to_png` from their common parent, or use
:meth:`~kivy.core.window.WindowBase.screenshot` to capture the whole
window.
.. note::
The image will be saved in png format, you should include the
extension in your filename.
.. versionadded:: 1.9.0
'''
if self.parent is not None:
canvas_parent_index = self.parent.canvas.indexof(self.canvas)
if canvas_parent_index > -1:
self.parent.canvas.remove(self.canvas)
fbo = Fbo(size=self.size, with_stencilbuffer=True)
with fbo:
ClearColor(0, 0, 0, 1)
ClearBuffers()
Scale(1, -1, 1)
Translate(-self.x, -self.y - self.height, 0)
fbo.add(self.canvas)
fbo.draw()
fbo.texture.save(filename, flipped=False)
fbo.remove(self.canvas)
if self.parent is not None and canvas_parent_index > -1:
self.parent.canvas.insert(canvas_parent_index, self.canvas)
return True
def get_root_window(self):
'''Return the root window.
:Returns:
Instance of the root window. Can be a
:class:`~kivy.core.window.WindowBase` or
:class:`Widget`.
'''
if self.parent:
return self.parent.get_root_window()
def get_parent_window(self):
'''Return the parent window.
:Returns:
Instance of the parent window. Can be a
:class:`~kivy.core.window.WindowBase` or
:class:`Widget`.
'''
if self.parent:
return self.parent.get_parent_window()
def _walk(self, restrict=False, loopback=False, index=None):
# We pass index only when we are going on the parent
# so don't yield the parent as well.
if index is None:
index = len(self.children)
yield self
for child in reversed(self.children[:index]):
for walk_child in child._walk(restrict=True):
yield walk_child
# If we want to continue with our parent, just do it.
if not restrict:
parent = self.parent
try:
if parent is None or not isinstance(parent, Widget):
raise ValueError
index = parent.children.index(self)
except ValueError:
# Self is root, if we want to loopback from the first element:
if not loopback:
return
# If we started with root (i.e. index==None), then we have to
# start from root again, so we return self again. Otherwise, we
# never returned it, so return it now starting with it.
parent = self
index = None
for walk_child in parent._walk(loopback=loopback, index=index):
yield walk_child
def walk(self, restrict=False, loopback=False):
''' Iterator that walks the widget tree starting with this widget and
goes forward returning widgets in the order in which layouts display
them.
:Parameters:
`restrict`: bool, defaults to False
If True, it will only iterate through the widget and its
children (or children of its children etc.). Defaults to False.
`loopback`: bool, defaults to False
If True, when the last widget in the tree is reached,
it'll loop back to the uppermost root and start walking until
we hit this widget again. Naturally, it can only loop back when
`restrict` is False. Defaults to False.
:return:
A generator that walks the tree, returning widgets in the
forward layout order.
For example, given a tree with the following structure:
.. code-block:: kv
GridLayout:
Button
BoxLayout:
id: box
Widget
Button
Widget
walking this tree:
.. code-block:: python
>>> # Call walk on box with loopback True, and restrict False
>>> [type(widget) for widget in box.walk(loopback=True)]
[<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>,
<class 'Widget'>, <class 'GridLayout'>, <class 'Button'>]
>>> # Now with loopback False, and restrict False
>>> [type(widget) for widget in box.walk()]
[<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>,
<class 'Widget'>]
>>> # Now with restrict True
>>> [type(widget) for widget in box.walk(restrict=True)]
[<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>]
.. versionadded:: 1.9.0
'''
gen = self._walk(restrict, loopback)
yield next(gen)
for node in gen:
if node is self:
return
yield node
def _walk_reverse(self, loopback=False, go_up=False):
# process is walk up level, walk down its children tree, then walk up
# next level etc.
# default just walk down the children tree
root = self
index = 0
# we need to go up a level before walking tree
if go_up:
root = self.parent
try:
if root is None or not isinstance(root, Widget):
raise ValueError
index = root.children.index(self) + 1
except ValueError:
if not loopback:
return
index = 0
go_up = False
root = self
# now walk children tree starting with last-most child
for child in islice(root.children, index, None):
for walk_child in child._walk_reverse(loopback=loopback):
yield walk_child
# we need to return ourself last, in all cases
yield root
# if going up, continue walking up the parent tree
if go_up:
for walk_child in root._walk_reverse(loopback=loopback,
go_up=go_up):
yield walk_child
def walk_reverse(self, loopback=False):
''' Iterator that walks the widget tree backwards starting with the
widget before this, and going backwards returning widgets in the
reverse order in which layouts display them.
This walks in the opposite direction of :meth:`walk`, so a list of the
tree generated with :meth:`walk` will be in reverse order compared
to the list generated with this, provided `loopback` is True.
:Parameters:
`loopback`: bool, defaults to False
If True, when the uppermost root in the tree is
reached, it'll loop back to the last widget and start walking
back until after we hit widget again. Defaults to False.
:return:
A generator that walks the tree, returning widgets in the
reverse layout order.
For example, given a tree with the following structure:
.. code-block:: kv
GridLayout:
Button
BoxLayout:
id: box
Widget
Button
Widget
walking this tree:
.. code-block:: python
>>> # Call walk on box with loopback True
>>> [type(widget) for widget in box.walk_reverse(loopback=True)]
[<class 'Button'>, <class 'GridLayout'>, <class 'Widget'>,
<class 'Button'>, <class 'Widget'>, <class 'BoxLayout'>]
>>> # Now with loopback False
>>> [type(widget) for widget in box.walk_reverse()]
[<class 'Button'>, <class 'GridLayout'>]
>>> forward = [w for w in box.walk(loopback=True)]
>>> backward = [w for w in box.walk_reverse(loopback=True)]
>>> forward == backward[::-1]
True
.. versionadded:: 1.9.0
'''
for node in self._walk_reverse(loopback=loopback, go_up=True):
yield node
if node is self:
return
def to_widget(self, x, y, relative=False):
'''Convert the given coordinate from window to local widget
coordinates. See :mod:`~kivy.uix.relativelayout` for details on the
coordinate systems.
'''
if self.parent:
x, y = self.parent.to_widget(x, y)
return self.to_local(x, y, relative=relative)
def to_window(self, x, y, initial=True, relative=False):
'''Transform local coordinates to window coordinates. See
:mod:`~kivy.uix.relativelayout` for details on the coordinate systems.
'''
if not initial:
x, y = self.to_parent(x, y, relative=relative)
if self.parent:
return self.parent.to_window(x, y, initial=False,
relative=relative)
return (x, y)
def to_parent(self, x, y, relative=False):
'''Transform local coordinates to parent coordinates. See
:mod:`~kivy.uix.relativelayout` for details on the coordinate systems.
:Parameters:
`relative`: bool, defaults to False
Change to True if you want to translate relative positions from
a widget to its parent coordinates.
'''
if relative:
return (x + self.x, y + self.y)
return (x, y)
def to_local(self, x, y, relative=False):
'''Transform parent coordinates to local coordinates. See
:mod:`~kivy.uix.relativelayout` for details on the coordinate systems.
:Parameters:
`relative`: bool, defaults to False
Change to True if you want to translate coordinates to
relative widget coordinates.
'''
if relative:
return (x - self.x, y - self.y)
return (x, y)
def _apply_transform(self, m, pos=None):
if self.parent:
x, y = self.parent.to_widget(relative=True,
*self.to_window(*(pos or self.pos)))
m.translate(x, y, 0)
m = self.parent._apply_transform(m) if self.parent else m
return m
def get_window_matrix(self, x=0, y=0):
'''Calculate the transformation matrix to convert between window and
widget coordinates.
:Parameters:
`x`: float, defaults to 0
Translates the matrix on the x axis.
`y`: float, defaults to 0
Translates the matrix on the y axis.
'''
m = Matrix()
m.translate(x, y, 0)
m = self._apply_transform(m)
return m
x = NumericProperty(0)
'''X position of the widget.
:attr:`x` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.
'''
y = NumericProperty(0)
'''Y position of the widget.
:attr:`y` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.
'''
width = NumericProperty(100)
'''Width of the widget.
:attr:`width` is a :class:`~kivy.properties.NumericProperty` and defaults
to 100.
.. warning::
Keep in mind that the `width` property is subject to layout logic and
that this has not yet happened at the time of the widget's `__init__`
method.
'''
height = NumericProperty(100)
'''Height of the widget.
:attr:`height` is a :class:`~kivy.properties.NumericProperty` and defaults
to 100.
.. warning::
Keep in mind that the `height` property is subject to layout logic and
that this has not yet happened at the time of the widget's `__init__`
method.
'''
pos = ReferenceListProperty(x, y)
'''Position of the widget.
:attr:`pos` is a :class:`~kivy.properties.ReferenceListProperty` of
(:attr:`x`, :attr:`y`) properties.
'''
size = ReferenceListProperty(width, height)
'''Size of the widget.
:attr:`size` is a :class:`~kivy.properties.ReferenceListProperty` of
(:attr:`width`, :attr:`height`) properties.
'''
def get_right(self):
return self.x + self.width
def set_right(self, value):
self.x = value - self.width
right = AliasProperty(get_right, set_right, bind=('x', 'width'))
'''Right position of the widget.
:attr:`right` is an :class:`~kivy.properties.AliasProperty` of
(:attr:`x` + :attr:`width`).
'''
def get_top(self):
return self.y + self.height
def set_top(self, value):
self.y = value - self.height
top = AliasProperty(get_top, set_top, bind=('y', 'height'))
'''Top position of the widget.
:attr:`top` is an :class:`~kivy.properties.AliasProperty` of
(:attr:`y` + :attr:`height`).
'''
def get_center_x(self):
return self.x + self.width / 2.
def set_center_x(self, value):
self.x = value - self.width / 2.
center_x = AliasProperty(get_center_x, set_center_x, bind=('x', 'width'))
'''X center position of the widget.
:attr:`center_x` is an :class:`~kivy.properties.AliasProperty` of
(:attr:`x` + :attr:`width` / 2.).
'''
def get_center_y(self):
return self.y + self.height / 2.
def set_center_y(self, value):
self.y = value - self.height / 2.
center_y = AliasProperty(get_center_y, set_center_y, bind=('y', 'height'))
'''Y center position of the widget.
:attr:`center_y` is an :class:`~kivy.properties.AliasProperty` of
(:attr:`y` + :attr:`height` / 2.).
'''
center = ReferenceListProperty(center_x, center_y)
'''Center position of the widget.
:attr:`center` is a :class:`~kivy.properties.ReferenceListProperty` of
(:attr:`center_x`, :attr:`center_y`) properties.
'''
cls = ListProperty([])
'''Class of the widget, used for styling.
'''
id = StringProperty(None, allownone=True)
'''Unique identifier of the widget in the tree.
:attr:`id` is a :class:`~kivy.properties.StringProperty` and defaults to
None.
.. warning::
If the :attr:`id` is already used in the tree, an exception will
be raised.
'''
children = ListProperty([])
'''List of children of this widget.
:attr:`children` is a :class:`~kivy.properties.ListProperty` and
defaults to an empty list.
Use :meth:`add_widget` and :meth:`remove_widget` for manipulating the
children list. Don't manipulate the children list directly unless you know
what you are doing.
'''
parent = ObjectProperty(None, allownone=True, rebind=True)
'''Parent of this widget. The parent of a widget is set when the widget
is added to another widget and unset when the widget is removed from its
parent.
:attr:`parent` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
size_hint_x = NumericProperty(1, allownone=True)
'''X size hint. Represents how much space the widget should use in the
direction of the X axis relative to its parent's width.
Only the :class:`~kivy.uix.layout.Layout` and
:class:`~kivy.core.window.Window` classes make use of the hint.
The size_hint is used by layouts for two purposes:
- When the layout considers widgets on their own rather than in
relation to its other children, the size_hint_x is a direct proportion
of the parent width, normally between 0.0 and 1.0. For instance, a
widget with ``size_hint_x=0.5`` in
a vertical BoxLayout will take up half the BoxLayout's width, or
a widget in a FloatLayout with ``size_hint_x=0.2`` will take up 20%
of the FloatLayout width. If the size_hint is greater than 1, the
widget will be wider than the parent.
- When multiple widgets can share a row of a layout, such as in a
horizontal BoxLayout, their widths will be their size_hint_x as a
fraction of the sum of widget size_hints. For instance, if the
size_hint_xs are (0.5, 1.0, 0.5), the first widget will have a
width of 25% of the parent width.
:attr:`size_hint_x` is a :class:`~kivy.properties.NumericProperty` and
defaults to 1.
'''
size_hint_y = NumericProperty(1, allownone=True)
'''Y size hint.
:attr:`size_hint_y` is a :class:`~kivy.properties.NumericProperty` and
defaults to 1.
See :attr:`size_hint_x` for more information, but with widths and heights
swapped.
'''
size_hint = ReferenceListProperty(size_hint_x, size_hint_y)
'''Size hint.
:attr:`size_hint` is a :class:`~kivy.properties.ReferenceListProperty` of
(:attr:`size_hint_x`, :attr:`size_hint_y`) properties.
See :attr:`size_hint_x` for more information.
'''
pos_hint = ObjectProperty({})
'''Position hint. This property allows you to set the position of
the widget inside its parent layout, in percent (similar to
size_hint).
For example, if you want to set the top of the widget to be at 90%
height of its parent layout, you can write::
widget = Widget(pos_hint={'top': 0.9})
The keys 'x', 'right' and 'center_x' will use the parent width.
The keys 'y', 'top' and 'center_y' will use the parent height.
See :doc:`api-kivy.uix.floatlayout` for further reference.
.. note::
:attr:`pos_hint` is not used by all layouts. Check the documentation
of the layout in question to see if it supports pos_hint.
:attr:`pos_hint` is an :class:`~kivy.properties.ObjectProperty`
containing a dict.
'''
size_hint_min_x = NumericProperty(None, allownone=True)
'''When not None, the X-direction minimum size (in pixels,
like :attr:`width`) when :attr:`size_hint_x` is also not None.
When :attr:`size_hint_x` is not None, it is the minimum width that the
widget will be set due to the :attr:`size_hint_x`. I.e. when a smaller size
would be set, :attr:`size_hint_min_x` is the value used instead for the
widget width. When None, or when :attr:`size_hint_x` is None,
:attr:`size_hint_min_x` doesn't do anything.
Only the :class:`~kivy.uix.layout.Layout` and
:class:`~kivy.core.window.Window` classes make use of the hint.
:attr:`size_hint_min_x` is a :class:`~kivy.properties.NumericProperty` and
defaults to None.
.. versionadded:: 1.9.2
'''
size_hint_min_y = NumericProperty(None, allownone=True)
'''When not None, the Y-direction minimum size (in pixels,
like :attr:`height`) when :attr:`size_hint_y` is also not None.
When :attr:`size_hint_y` is not None, it is the minimum height that the
widget will be set due to the :attr:`size_hint_y`. I.e. when a smaller size
would be set, :attr:`size_hint_min_y` is the value used instead for the
widget height. When None, or when :attr:`size_hint_y` is None,
:attr:`size_hint_min_y` doesn't do anything.
Only the :class:`~kivy.uix.layout.Layout` and
:class:`~kivy.core.window.Window` classes make use of the hint.
:attr:`size_hint_min_y` is a :class:`~kivy.properties.NumericProperty` and
defaults to None.
.. versionadded:: 1.9.2
'''
size_hint_min = ReferenceListProperty(size_hint_min_x, size_hint_min_y)
'''Minimum size when using :attr:`size_hint`.
:attr:`size_hint_min` is a :class:`~kivy.properties.ReferenceListProperty`
of (:attr:`size_hint_min_x`, :attr:`size_hint_min_y`) properties.
.. versionadded:: 1.9.2
'''
size_hint_max_x = NumericProperty(None, allownone=True)
'''When not None, the X-direction maximum size (in pixels,
like :attr:`width`) when :attr:`size_hint_x` is also not None.
Similar to :attr:`size_hint_min_x`, except that it sets the maximum width.
:attr:`size_hint_max_x` is a :class:`~kivy.properties.NumericProperty` and
defaults to None.
.. versionadded:: 1.9.2
'''
size_hint_max_y = NumericProperty(None, allownone=True)
'''When not None, the Y-direction maximum size (in pixels,
like :attr:`height`) when :attr:`size_hint_y` is also not None.
Similar to :attr:`size_hint_min_y`, except that it sets the maximum height.
:attr:`size_hint_max_y` is a :class:`~kivy.properties.NumericProperty` and
defaults to None.
.. versionadded:: 1.9.2
'''
size_hint_max = ReferenceListProperty(size_hint_max_x, size_hint_max_y)
'''Maximum size when using :attr:`size_hint`.
:attr:`size_hint_max` is a :class:`~kivy.properties.ReferenceListProperty`
of (:attr:`size_hint_max_x`, :attr:`size_hint_max_y`) properties.
.. versionadded:: 1.9.2
'''
ids = DictProperty({})
'''This is a dictionary of ids defined in your kv language. This will only
be populated if you use ids in your kv language code.
.. versionadded:: 1.7.0
:attr:`ids` is a :class:`~kivy.properties.DictProperty` and defaults to an
empty dict {}.
The :attr:`ids` are populated for each root level widget definition. For
example:
.. code-block:: kv
# in kv
<MyWidget@Widget>:
id: my_widget
Label:
id: label_widget
Widget:
id: inner_widget
Label:
id: inner_label
TextInput:
id: text_input
OtherWidget:
id: other_widget
<OtherWidget@Widget>
id: other_widget
Label:
id: other_label
TextInput:
id: other_textinput
Then, in python:
.. code-block:: python
>>> widget = MyWidget()
>>> print(widget.ids)
{'other_widget': <weakproxy at 041CFED0 to OtherWidget at 041BEC38>,
'inner_widget': <weakproxy at 04137EA0 to Widget at 04138228>,
'inner_label': <weakproxy at 04143540 to Label at 04138260>,
'label_widget': <weakproxy at 04137B70 to Label at 040F97A0>,
'text_input': <weakproxy at 041BB5D0 to TextInput at 041BEC00>}
>>> print(widget.ids['other_widget'].ids)
{'other_textinput': <weakproxy at 041DBB40 to TextInput at 041BEF48>,
'other_label': <weakproxy at 041DB570 to Label at 041BEEA0>}
>>> print(widget.ids['label_widget'].ids)
{}
'''
opacity = NumericProperty(1.0)
'''Opacity of the widget and all its children.
.. versionadded:: 1.4.1
The opacity attribute controls the opacity of the widget and its children.
Be careful, it's a cumulative attribute: the value is multiplied by the
current global opacity and the result is applied to the current context
color.
For example, if the parent has an opacity of 0.5 and a child has an
opacity of 0.2, the real opacity of the child will be 0.5 * 0.2 = 0.1.
Then, the opacity is applied by the shader as:
.. code-block:: python
frag_color = color * vec4(1.0, 1.0, 1.0, opacity);
:attr:`opacity` is a :class:`~kivy.properties.NumericProperty` and defaults
to 1.0.
'''
def on_opacity(self, instance, value):
canvas = self.canvas
if canvas is not None:
canvas.opacity = value
canvas = None
'''Canvas of the widget.
The canvas is a graphics object that contains all the drawing instructions
for the graphical representation of the widget.
There are no general properties for the Widget class, such as background
color, to keep the design simple and lean. Some derived classes, such as
Button, do add such convenience properties but generally the developer is
responsible for implementing the graphics representation for a custom
widget from the ground up. See the derived widget classes for patterns to
follow and extend.
See :class:`~kivy.graphics.Canvas` for more information about the usage.
'''
disabled = BooleanProperty(False)
'''Indicates whether this widget can interact with input or not.
.. note::
1. Child Widgets, when added to a disabled widget, will be disabled
automatically.
2. Disabling/enabling a parent disables/enables all
of its children.
.. versionadded:: 1.8.0
:attr:`disabled` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
|
mit
| -6,705,151,984,013,767,000 | 33.797277 | 80 | 0.61193 | false |
ryfeus/lambda-packs
|
Spacy/source2.7/thinc/neural/_classes/batchnorm.py
|
1
|
3359
|
from .model import Model
from ... import describe
def _init_to_one(W, ops):
W.fill(1.)
def _run_child_hooks(model, X, y=None):
for hook in model.child.on_data_hooks:
hook(model.child, X, y)
@describe.on_data(_run_child_hooks)
@describe.attributes(
G=describe.Weights("Scaling vector",
lambda obj: (obj.nO,), _init_to_one),
b=describe.Biases("Bias vector",
lambda obj: (obj.nO,)),
d_G=describe.Gradient("G"),
d_b=describe.Gradient("b"),
m=describe.Weights("Means", lambda obj: (obj.nO,), _init_to_one),
v=describe.Weights("Variance", lambda obj: (obj.nO,), _init_to_one),
)
class BatchNorm(Model):
name = 'batchnorm'
def __init__(self, child, **kwargs):
self.child = child
self._layers = [child]
if 'nO' in kwargs:
self.nO = kwargs['nO']
elif getattr(child, 'nO', None):
self.nO = child.nO
self.nr_upd = 0
Model.__init__(self, **kwargs)
def predict(self, X):
X = self.child.predict(X)
Xh = _forward(self.ops, X, self.m, self.v+1e-08)
y = Xh * self.G + self.b
return y
def begin_update(self, X, drop=0.):
assert X.dtype == 'float32'
X, backprop_child = self.child.begin_update(X, drop=0.)
N, mu, var = _get_moments(self.ops, X)
self.nr_upd += 1
alpha = self.ops.xp.asarray([0.01], dtype='float32')
# I'm not sure this is the best thing to do --
# Here we make a running estimate of the mean and variance,
# Should we consider a sample be the instance, or the batch?
diff = X - self.m
incr = (1-alpha) * diff
self.m += incr.mean(axis=0)
self.v += (diff * incr).mean(axis=0)
self.v *= alpha
Xhat = _forward(self.ops, X, mu, var)
# Batch "renormalization"
if self.nr_upd >= 7500:
Xhat *= var / (self.v+1e-08)
Xhat += (mu - self.m) / (self.v+1e-08)
y, backprop_rescale = self._begin_update_scale_shift(Xhat)
def finish_update(dy, sgd=None):
dy = backprop_rescale(dy, sgd)
dist, sum_dy, sum_dy_dist = _get_d_moments(self.ops, dy, X, mu)
d_xhat = N * dy - sum_dy - dist * (1./var) * sum_dy_dist
d_xhat *= var ** (-1. / 2)
d_xhat /= N
return backprop_child(d_xhat, sgd)
drop *= getattr(self.child, 'drop_factor', 1.0)
y, bp_dropout = self.ops.dropout(y, drop)
assert y.dtype == 'float32'
return y, bp_dropout(finish_update)
def _begin_update_scale_shift(self, input__BI):
def finish_update(gradient__BI, sgd=None):
self.d_b += gradient__BI.sum(axis=0)
d_G = self.d_G
d_G += (gradient__BI * input__BI).sum(axis=0)
if sgd is not None:
sgd(self._mem.weights, self._mem.gradient, key=self.id)
return gradient__BI * self.G
return input__BI * self.G + self.b, finish_update
def _get_moments(ops, X):
mu = X.mean(axis=0)
var = X.var(axis=0) + 1e-08
return ops.asarray([X.shape[0]], dtype='float32'), mu, var
def _get_d_moments(ops, dy, X, mu):
dist = X-mu
return dist, ops.xp.sum(dy, axis=0), ops.xp.sum(dy * dist, axis=0)
def _forward(ops, X, mu, var):
return (X-mu) * var ** (-1./2.)
|
mit
| -4,046,856,294,922,201,000 | 31.298077 | 75 | 0.54808 | false |
Tatsh/tccutil
|
tccutil.py
|
1
|
7732
|
#!/usr/bin/env python
import argparse
import sqlite3
import sys
import os
import hashlib
from platform import mac_ver
from distutils.version import StrictVersion as version
##############################
######## VARIABLES ###########
# Utility Name
util_name = os.path.basename(sys.argv[0])
# Utility Version
util_version = '1.2.3'
# Current OS X version
osx_version = version(mac_ver()[0])
# Database Path
tcc_db = '/Library/Application Support/com.apple.TCC/TCC.db'
# Set "sudo" to True if called with Admin-Privileges.
sudo = True if os.getuid() == 0 else False
# Default Verbosity
verbose = False
parser = argparse.ArgumentParser(description='Modify Accesibility Preferences')
parser.add_argument(
'action',
metavar='ACTION',
type=str,
nargs='?',
help='This option is only used to perform a reset.',
)
parser.add_argument(
'--list', '-l', action='store_true',
help="List all entries in the accessibility database."
)
parser.add_argument(
'--insert', '-i', action='append', default=[],
help="Adds the given bundle ID or path to the accessibility database.",
)
parser.add_argument(
"-v", "--verbose", action='store_true',
help="Outputs additional info for some commands.",
)
parser.add_argument(
"-r", "--remove", action='append', default=[],
help="Removes the given Bundle ID or Path from the Accessibility Database.",
)
parser.add_argument(
"-e", "--enable", action='append', default=[],
help="Enables Accessibility Access for the given Bundle ID or Path.",
)
parser.add_argument(
"-d", "--disable", action='append', default=[],
help="Disables Accessibility Access for the given Bundle ID or Path."
)
parser.add_argument(
'--version', action='store_true',
help="Show the version of this script",
)
##############################
######## FUNCTIONS ###########
def display_version():
#------------------------
print "%s %s" % (util_name, util_version)
sys.exit(0)
def sudo_required():
#------------------------
if not sudo:
print "Error:"
print " When accessing the Accessibility Database, %s needs to be run with admin-privileges.\n" % (util_name)
display_help(1)
def open_database():
#------------------------
sudo_required()
global conn
global c
# Check if Datebase is already open, else open it.
try: conn.execute("")
except:
verbose_output("Opening Database...")
try:
if not os.path.isfile(tcc_db):
print "TCC Database has not been found."
sys.exit(1)
conn = sqlite3.connect(tcc_db)
c = conn.cursor()
# Do a sanity check that TCC access table has expected structure
c.execute("SELECT sql FROM sqlite_master WHERE name='access' and type='table'")
accessTableDigest=""
for row in c.fetchall():
accessTableDigest=hashlib.sha1(row[0]).hexdigest()[0:10]
break;
# check if table in DB has expected structure:
if not (
accessTableDigest == "8e93d38f7c" #prior to El Capitan
or
(osx_version >= version('10.11') and accessTableDigest in ["9b2ea61b30", "1072dc0e4b"])
):
print "TCC Database structure is unknown."
sys.exit(1)
verbose_output("Database opened.\n")
except:
print "Error opening Database."
sys.exit(1)
def display_version(error_code=None):
print "%s %s" % (util_name, util_version)
sys.exit(0)
def sudo_required():
if not sudo:
print "Error:"
print " When accessing the Accessibility Database, %s needs to be run with admin-privileges.\n" % (util_name)
display_help(1)
def display_help(error_code=None):
parser.print_help()
if error_code != None: sys.exit(error_code)
#------------------------
print "%s %s" % (util_name, util_version)
sys.exit(0)
def close_database():
try:
conn.execute("")
try:
verbose_output("Closing Database...")
conn.close()
try:
conn.execute("")
except:
verbose_output("Database closed.")
except:
print "Error closing Database."
sys.exit(1)
except:
pass
def commit_changes():
#------------------------
# Apply the changes and close the sqlite connection.
verbose_output("Committing Changes...\n")
conn.commit()
def verbose_output(*args):
#------------------------
if verbose:
try:
for a in args:
print a
except:
pass
def list_clients():
#------------------------
open_database()
c.execute("SELECT client from access")
verbose_output("Fetching Entries from Database...\n")
for row in c.fetchall():
# Print each entry in the Accessibility pane.
print row[0]
verbose_output("")
def cli_util_or_bundle_id(client):
#------------------------
global client_type
# If the app starts with a slash, it is a command line utility.
# Setting the client_type to 1 will make the item visible in the GUI so you can manually click the checkbox.
if (client[0] == '/'):
client_type = 1
verbose_output("Detected \"%s\" as Command Line Utility." % (client))
# Otherwise, the app will be a bundle ID, which starts with a com., net., or org., etc.
else:
client_type = 0
verbose_output("Detected \"%s\" as Bundle ID." % (client))
def insert_client(client):
#------------------------
open_database()
# Check if it is a command line utility or a bundle ID as the default value to enable it is different.
cli_util_or_bundle_id(client)
verbose_output("Inserting \"%s\" into Database..." % (client))
if osx_version >= version('10.11'): # El Capitan or higher.
c.execute("INSERT or REPLACE INTO access VALUES('kTCCServiceAccessibility','%s',%s,1,1,NULL,NULL)" % (client, client_type))
else: # Yosemite or lower.
c.execute("INSERT or REPLACE INTO access VALUES('kTCCServiceAccessibility','%s',%s,1,1,NULL)" % (client, client_type))
commit_changes()
def delete_client(client):
#------------------------
open_database()
verbose_output("Removing \"%s\" from Database..." % (client))
c.execute("DELETE from access where client IS '%s'" % (client))
commit_changes()
def enable(client):
#------------------------
open_database()
verbose_output("Enabling %s..." % (client,))
# Setting typically appears in System Preferences right away (without closing the window).
# Set to 1 to enable the client.
c.execute("UPDATE access SET allowed='1' WHERE client='%s'" % (client))
commit_changes()
def disable(client):
#------------------------
open_database()
verbose_output("Disabling %s..." % (client,))
# Setting typically appears in System Preferences right away (without closing the window).
# Set to 0 to disable the client.
c.execute("UPDATE access SET allowed='0' WHERE client='%s'" % (client))
commit_changes()
def main():
#------------------------
# If no arguments are specified, show help menu and exit.
if not sys.argv[1:]:
print "Error:"
print " No arguments.\n"
display_help(2)
args = parser.parse_args()
if args.version:
display_version()
return
if args.action:
if args.action == 'reset':
exit_status = os.system("tccutil {}".format(' '.join(sys.argv[1:])))
sys.exit(exit_status/256)
else:
print "Error\n Unrecognized command {}".format(args.action)
if args.verbose:
# If verbose option is set, set verbose to True and remove all verbose arguments.
global verbose
verbose = True
if args.list:
list_clients()
return
for item_to_remove in args.remove:
delete_client(item_to_remove)
for item in args.insert:
insert_client(item)
for item in args.enable:
enable(item)
for item in args.disable:
disable(item)
close_database()
sys.exit(0)
if __name__ == '__main__':
main()
|
gpl-2.0
| -660,460,857,305,158,800 | 25.29932 | 127 | 0.622866 | false |
ziel980/website
|
app/__init__.py
|
1
|
2897
|
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from .navigation import *
import os
from werkzeug.middleware.proxy_fix import ProxyFix
db = SQLAlchemy()
nav = MyNavigation()
module_setup_functions = []
def create_app(config, disable_login=False):
# Flask
from flask import Flask
app = Flask(__name__)
app.config.from_object(config)
if disable_login:
app.config['LOGIN_DISABLED'] = True
# Fix for redirecting http to https
app.wsgi_app = ProxyFix(app.wsgi_app)
# SQLAlchemy
db.init_app(app)
# Bootstrap
Bootstrap(app)
# Flask-Markdown
from flaskext.markdown import Markdown
Markdown(app)
# Flask-Navigation
nav.init_app(app)
# Create main navigation bar and add Home button.
nav.Bar('main', [nav.Item('Home', 'root.index')])
# Setup modules
from .views import mod_root, setup_error_handlers
app.register_blueprint(mod_root)
setup_error_handlers(app)
import app.mod_projects as mod_projects
import app.mod_streams as mod_streams
import app.mod_auth as mod_auth
import app.mod_adminpanel as mod_adminpanel
import app.mod_todo as mod_todo
for f in module_setup_functions:
f(app, nav, nav['main'])
# Setup error handling
import logging
from logging.handlers import RotatingFileHandler
class DebugRotatingFileHandler(RotatingFileHandler):
def __init__(self, filename, mode='a', max_bytes=0, backup_count=0, encoding=None, delay=False):
RotatingFileHandler.__init__(self, filename, mode, max_bytes, backup_count, encoding, delay)
def emit(self, record):
if not record.levelno == logging.DEBUG:
return
RotatingFileHandler.emit(self, record)
# Via file
# INFO or higher
if not os.path.exists("log"):
os.mkdir("log")
file_handler = RotatingFileHandler('log/website.log', 'a', 1 * 1024 * 1024, 10)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d'))
app.logger.setLevel(logging.DEBUG)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
# DEBUG only
file_handler = DebugRotatingFileHandler('log/website_DEBUG.log', 'a', 1 * 1024 * 1024, 10)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d'))
file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)
app.logger.info('website startup')
app.logger.debug('website startup')
return app
def register_module():
""" Decorator function used by modules to decorate setup function.
A list of setup functions to call is created in module_setup_functions.
"""
def decorator(f):
module_setup_functions.append(f)
return f
return decorator
|
mit
| 3,497,567,861,086,679,000 | 30.835165 | 118 | 0.677943 | false |
googleapis/python-logging
|
tests/unit/test_client.py
|
1
|
30471
|
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from datetime import datetime
from datetime import timedelta
from datetime import timezone
import unittest
import mock
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
class TestClient(unittest.TestCase):
PROJECT = "PROJECT"
PROJECT_PATH = f"projects/{PROJECT}"
LOGGER_NAME = "LOGGER_NAME"
SINK_NAME = "SINK_NAME"
FILTER = "logName:syslog AND severity>=ERROR"
DESTINATION_URI = "faux.googleapis.com/destination"
METRIC_NAME = "metric_name"
FILTER = "logName:syslog AND severity>=ERROR"
DESCRIPTION = "DESCRIPTION"
TIME_FORMAT = '"%Y-%m-%dT%H:%M:%S.%f%z"'
@staticmethod
def _get_target_class():
from google.cloud.logging import Client
return Client
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_defaults(self):
from google.cloud._http import ClientInfo
from google.cloud.logging_v2._http import Connection
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
self.assertEqual(client.project, self.PROJECT)
self.assertIsInstance(client._connection, Connection)
self.assertIsInstance(client._connection._client_info, ClientInfo)
def test_ctor_explicit(self):
from google.cloud._http import ClientInfo
from google.cloud.logging_v2._http import Connection
creds = _make_credentials()
client_info = ClientInfo()
client = self._make_one(
project=self.PROJECT, credentials=creds, client_info=client_info
)
self.assertEqual(client.project, self.PROJECT)
self.assertIs(client._client_info, client_info)
self.assertIsInstance(client._connection, Connection)
self.assertIs(client._connection._client_info, client_info)
def test_ctor_w_empty_client_options(self):
from google.api_core.client_options import ClientOptions
creds = _make_credentials()
client_options = ClientOptions()
client = self._make_one(
project=self.PROJECT, credentials=creds, client_options=client_options
)
self.assertEqual(
client._connection.API_BASE_URL, client._connection.DEFAULT_API_ENDPOINT
)
def test_ctor_w_client_options_object(self):
from google.api_core.client_options import ClientOptions
creds = _make_credentials()
client_options = ClientOptions(
api_endpoint="https://foo-logging.googleapis.com"
)
client = self._make_one(
project=self.PROJECT, credentials=creds, client_options=client_options
)
self.assertEqual(
client._connection.API_BASE_URL, "https://foo-logging.googleapis.com"
)
def test_ctor_w_client_options_dict(self):
creds = _make_credentials()
client_options = {"api_endpoint": "https://foo-logging.googleapis.com"}
client = self._make_one(
project=self.PROJECT, credentials=creds, client_options=client_options
)
self.assertEqual(
client._connection.API_BASE_URL, "https://foo-logging.googleapis.com"
)
def test_logging_api_wo_gapic(self):
from google.cloud.logging_v2._http import _LoggingAPI
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False
)
conn = client._connection = _Connection()
api = client.logging_api
self.assertIsInstance(api, _LoggingAPI)
self.assertEqual(api.api_request, conn.api_request)
# API instance is cached
again = client.logging_api
self.assertIs(again, api)
def test_logging_api_w_gapic(self):
clients = []
api_obj = object()
def make_api(client_obj):
clients.append(client_obj)
return api_obj
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds, _use_grpc=True)
patch = mock.patch("google.cloud.logging_v2.client._gapic")
with patch as gapic_module:
gapic_module.make_logging_api.side_effect = make_api
api = client.logging_api
self.assertIs(api, api_obj)
self.assertEqual(clients, [client])
# API instance is cached
again = client.logging_api
self.assertIs(again, api)
def test_no_gapic_ctor(self):
from google.cloud.logging_v2._http import _LoggingAPI
creds = _make_credentials()
patch = mock.patch("google.cloud.logging_v2.client._USE_GRPC", new=True)
with patch:
client = self._make_one(
project=self.PROJECT, credentials=creds, _use_grpc=False
)
api = client.logging_api
self.assertIsInstance(api, _LoggingAPI)
def test_sinks_api_wo_gapic(self):
from google.cloud.logging_v2._http import _SinksAPI
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False
)
conn = client._connection = _Connection()
api = client.sinks_api
self.assertIsInstance(api, _SinksAPI)
self.assertEqual(api.api_request, conn.api_request)
# API instance is cached
again = client.sinks_api
self.assertIs(again, api)
def test_sinks_api_w_gapic(self):
clients = []
api_obj = object()
def make_api(client_obj):
clients.append(client_obj)
return api_obj
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds, _use_grpc=True)
patch = mock.patch("google.cloud.logging_v2.client._gapic")
with patch as gapic_module:
gapic_module.make_sinks_api.side_effect = make_api
api = client.sinks_api
self.assertIs(api, api_obj)
self.assertEqual(clients, [client])
# API instance is cached
again = client.sinks_api
self.assertIs(again, api)
def test_metrics_api_wo_gapic(self):
from google.cloud.logging_v2._http import _MetricsAPI
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False
)
conn = client._connection = _Connection()
api = client.metrics_api
self.assertIsInstance(api, _MetricsAPI)
self.assertEqual(api.api_request, conn.api_request)
# API instance is cached
again = client.metrics_api
self.assertIs(again, api)
def test_metrics_api_w_gapic(self):
clients = []
api_obj = object()
def make_api(client_obj):
clients.append(client_obj)
return api_obj
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds, _use_grpc=True)
patch = mock.patch("google.cloud.logging_v2.client._gapic")
with patch as gapic_module:
gapic_module.make_metrics_api.side_effect = make_api
api = client.metrics_api
self.assertIs(api, api_obj)
self.assertEqual(clients, [client])
# API instance is cached
again = client.metrics_api
self.assertIs(again, api)
def test_logger(self):
from google.cloud.logging import Logger
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
logger = client.logger(self.LOGGER_NAME)
self.assertIsInstance(logger, Logger)
self.assertEqual(logger.name, self.LOGGER_NAME)
self.assertIs(logger.client, client)
self.assertEqual(logger.project, self.PROJECT)
def test_list_entries_defaults(self):
from google.cloud.logging import TextEntry
IID = "IID"
TEXT = "TEXT"
TOKEN = "TOKEN"
ENTRIES = [
{
"textPayload": TEXT,
"insertId": IID,
"resource": {"type": "global"},
"logName": "projects/%s/logs/%s" % (self.PROJECT, self.LOGGER_NAME),
}
]
creds = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=creds, _use_grpc=False
)
returned = {"entries": ENTRIES, "nextPageToken": TOKEN}
client._connection = _Connection(returned)
iterator = client.list_entries()
page = next(iterator.pages)
entries = list(page)
token = iterator.next_page_token
self.assertEqual(len(entries), 1)
entry = entries[0]
self.assertIsInstance(entry, TextEntry)
self.assertEqual(entry.insert_id, IID)
self.assertEqual(entry.payload, TEXT)
logger = entry.logger
self.assertEqual(logger.name, self.LOGGER_NAME)
self.assertIs(logger.client, client)
self.assertEqual(logger.project, self.PROJECT)
self.assertEqual(token, TOKEN)
# check call payload
call_payload_no_filter = deepcopy(client._connection._called_with)
call_payload_no_filter["data"]["filter"] = "removed"
self.assertEqual(
call_payload_no_filter,
{
"path": "/entries:list",
"method": "POST",
"data": {
"filter": "removed",
"resourceNames": [f"projects/{self.PROJECT}"],
},
},
)
# verify that default filter is 24 hours
timestamp = datetime.strptime(
client._connection._called_with["data"]["filter"],
"timestamp>=" + self.TIME_FORMAT,
)
yesterday = datetime.now(timezone.utc) - timedelta(days=1)
self.assertLess(yesterday - timestamp, timedelta(minutes=1))
def test_list_entries_explicit(self):
from google.cloud.logging import DESCENDING
from google.cloud.logging import ProtobufEntry
from google.cloud.logging import StructEntry
from google.cloud.logging import Logger
PROJECT1 = "PROJECT1"
PROJECT2 = "PROJECT2"
INPUT_FILTER = "logName:LOGNAME"
IID1 = "IID1"
IID2 = "IID2"
PAYLOAD = {"message": "MESSAGE", "weather": "partly cloudy"}
PROTO_PAYLOAD = PAYLOAD.copy()
PROTO_PAYLOAD["@type"] = "type.googleapis.com/testing.example"
TOKEN = "TOKEN"
PAGE_SIZE = 42
ENTRIES = [
{
"jsonPayload": PAYLOAD,
"insertId": IID1,
"resource": {"type": "global"},
"logName": "projects/%s/logs/%s" % (self.PROJECT, self.LOGGER_NAME),
},
{
"protoPayload": PROTO_PAYLOAD,
"insertId": IID2,
"resource": {"type": "global"},
"logName": "projects/%s/logs/%s" % (self.PROJECT, self.LOGGER_NAME),
},
]
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False
)
returned = {"entries": ENTRIES}
client._connection = _Connection(returned)
iterator = client.list_entries(
resource_names=[f"projects/{PROJECT1}", f"projects/{PROJECT2}"],
filter_=INPUT_FILTER,
order_by=DESCENDING,
page_size=PAGE_SIZE,
page_token=TOKEN,
)
entries = list(iterator)
token = iterator.next_page_token
# First, check the token.
self.assertIsNone(token)
# Then check the entries.
self.assertEqual(len(entries), 2)
entry = entries[0]
self.assertIsInstance(entry, StructEntry)
self.assertEqual(entry.insert_id, IID1)
self.assertEqual(entry.payload, PAYLOAD)
logger = entry.logger
self.assertIsInstance(logger, Logger)
self.assertEqual(logger.name, self.LOGGER_NAME)
self.assertIs(logger.client, client)
self.assertEqual(logger.project, self.PROJECT)
entry = entries[1]
self.assertIsInstance(entry, ProtobufEntry)
self.assertEqual(entry.insert_id, IID2)
self.assertEqual(entry.payload, PROTO_PAYLOAD)
logger = entry.logger
self.assertEqual(logger.name, self.LOGGER_NAME)
self.assertIs(logger.client, client)
self.assertEqual(logger.project, self.PROJECT)
self.assertIs(entries[0].logger, entries[1].logger)
# check call payload
call_payload_no_filter = deepcopy(client._connection._called_with)
call_payload_no_filter["data"]["filter"] = "removed"
self.assertEqual(
call_payload_no_filter,
{
"path": "/entries:list",
"method": "POST",
"data": {
"filter": "removed",
"orderBy": DESCENDING,
"pageSize": PAGE_SIZE,
"pageToken": TOKEN,
"resourceNames": [f"projects/{PROJECT1}", f"projects/{PROJECT2}"],
},
},
)
# verify that default timestamp filter is added
timestamp = datetime.strptime(
client._connection._called_with["data"]["filter"],
INPUT_FILTER + " AND timestamp>=" + self.TIME_FORMAT,
)
yesterday = datetime.now(timezone.utc) - timedelta(days=1)
self.assertLess(yesterday - timestamp, timedelta(minutes=1))
def test_list_entries_explicit_timestamp(self):
from google.cloud.logging import DESCENDING
from google.cloud.logging import ProtobufEntry
from google.cloud.logging import StructEntry
from google.cloud.logging import Logger
PROJECT1 = "PROJECT1"
PROJECT2 = "PROJECT2"
INPUT_FILTER = 'logName:LOGNAME AND timestamp="2020-10-13T21"'
IID1 = "IID1"
IID2 = "IID2"
PAYLOAD = {"message": "MESSAGE", "weather": "partly cloudy"}
PROTO_PAYLOAD = PAYLOAD.copy()
PROTO_PAYLOAD["@type"] = "type.googleapis.com/testing.example"
TOKEN = "TOKEN"
PAGE_SIZE = 42
ENTRIES = [
{
"jsonPayload": PAYLOAD,
"insertId": IID1,
"resource": {"type": "global"},
"logName": "projects/%s/logs/%s" % (self.PROJECT, self.LOGGER_NAME),
},
{
"protoPayload": PROTO_PAYLOAD,
"insertId": IID2,
"resource": {"type": "global"},
"logName": "projects/%s/logs/%s" % (self.PROJECT, self.LOGGER_NAME),
},
]
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False
)
returned = {"entries": ENTRIES}
client._connection = _Connection(returned)
iterator = client.list_entries(
resource_names=[f"projects/{PROJECT1}", f"projects/{PROJECT2}"],
filter_=INPUT_FILTER,
order_by=DESCENDING,
page_size=PAGE_SIZE,
page_token=TOKEN,
)
entries = list(iterator)
token = iterator.next_page_token
# First, check the token.
self.assertIsNone(token)
# Then check the entries.
self.assertEqual(len(entries), 2)
entry = entries[0]
self.assertIsInstance(entry, StructEntry)
self.assertEqual(entry.insert_id, IID1)
self.assertEqual(entry.payload, PAYLOAD)
logger = entry.logger
self.assertIsInstance(logger, Logger)
self.assertEqual(logger.name, self.LOGGER_NAME)
self.assertIs(logger.client, client)
self.assertEqual(logger.project, self.PROJECT)
entry = entries[1]
self.assertIsInstance(entry, ProtobufEntry)
self.assertEqual(entry.insert_id, IID2)
self.assertEqual(entry.payload, PROTO_PAYLOAD)
logger = entry.logger
self.assertEqual(logger.name, self.LOGGER_NAME)
self.assertIs(logger.client, client)
self.assertEqual(logger.project, self.PROJECT)
self.assertIs(entries[0].logger, entries[1].logger)
# check call payload
# filter should not be changed
self.assertEqual(
client._connection._called_with,
{
"path": "/entries:list",
"method": "POST",
"data": {
"filter": INPUT_FILTER,
"orderBy": DESCENDING,
"pageSize": PAGE_SIZE,
"pageToken": TOKEN,
"resourceNames": [f"projects/{PROJECT1}", f"projects/{PROJECT2}"],
},
},
)
def test_sink_defaults(self):
from google.cloud.logging import Sink
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
sink = client.sink(self.SINK_NAME)
self.assertIsInstance(sink, Sink)
self.assertEqual(sink.name, self.SINK_NAME)
self.assertIsNone(sink.filter_)
self.assertIsNone(sink.destination)
self.assertIs(sink.client, client)
self.assertEqual(sink.parent, self.PROJECT_PATH)
def test_sink_explicit(self):
from google.cloud.logging import Sink
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
sink = client.sink(
self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI
)
self.assertIsInstance(sink, Sink)
self.assertEqual(sink.name, self.SINK_NAME)
self.assertEqual(sink.filter_, self.FILTER)
self.assertEqual(sink.destination, self.DESTINATION_URI)
self.assertIs(sink.client, client)
self.assertEqual(sink.parent, self.PROJECT_PATH)
def test_list_sinks_no_paging(self):
from google.cloud.logging import Sink
PROJECT = "PROJECT"
TOKEN = "TOKEN"
SINK_NAME = "sink_name"
FILTER = "logName:syslog AND severity>=ERROR"
SINKS = [
{"name": SINK_NAME, "filter": FILTER, "destination": self.DESTINATION_URI}
]
client = self._make_one(
project=PROJECT, credentials=_make_credentials(), _use_grpc=False
)
returned = {"sinks": SINKS, "nextPageToken": TOKEN}
client._connection = _Connection(returned)
iterator = client.list_sinks()
page = next(iterator.pages)
sinks = list(page)
token = iterator.next_page_token
# First check the token.
self.assertEqual(token, TOKEN)
# Then check the sinks returned.
self.assertEqual(len(sinks), 1)
sink = sinks[0]
self.assertIsInstance(sink, Sink)
self.assertEqual(sink.name, SINK_NAME)
self.assertEqual(sink.filter_, FILTER)
self.assertEqual(sink.destination, self.DESTINATION_URI)
self.assertIs(sink.client, client)
# Verify the mocked transport.
called_with = client._connection._called_with
path = "/projects/%s/sinks" % (self.PROJECT,)
self.assertEqual(
called_with, {"method": "GET", "path": path, "query_params": {}}
)
def test_list_sinks_with_paging(self):
from google.cloud.logging import Sink
PROJECT = "PROJECT"
SINK_NAME = "sink_name"
FILTER = "logName:syslog AND severity>=ERROR"
TOKEN = "TOKEN"
PAGE_SIZE = 42
SINKS = [
{"name": SINK_NAME, "filter": FILTER, "destination": self.DESTINATION_URI}
]
client = self._make_one(
project=PROJECT, credentials=_make_credentials(), _use_grpc=False
)
returned = {"sinks": SINKS}
client._connection = _Connection(returned)
iterator = client.list_sinks(page_size=PAGE_SIZE, page_token=TOKEN)
sinks = list(iterator)
token = iterator.next_page_token
# First check the token.
self.assertIsNone(token)
# Then check the sinks returned.
self.assertEqual(len(sinks), 1)
sink = sinks[0]
self.assertIsInstance(sink, Sink)
self.assertEqual(sink.name, SINK_NAME)
self.assertEqual(sink.filter_, FILTER)
self.assertEqual(sink.destination, self.DESTINATION_URI)
self.assertIs(sink.client, client)
# Verify the mocked transport.
called_with = client._connection._called_with
path = "/projects/%s/sinks" % (self.PROJECT,)
self.assertEqual(
called_with,
{
"method": "GET",
"path": path,
"query_params": {"pageSize": PAGE_SIZE, "pageToken": TOKEN},
},
)
def test_metric_defaults(self):
from google.cloud.logging import Metric
creds = _make_credentials()
client_obj = self._make_one(project=self.PROJECT, credentials=creds)
metric = client_obj.metric(self.METRIC_NAME)
self.assertIsInstance(metric, Metric)
self.assertEqual(metric.name, self.METRIC_NAME)
self.assertIsNone(metric.filter_)
self.assertEqual(metric.description, "")
self.assertIs(metric.client, client_obj)
self.assertEqual(metric.project, self.PROJECT)
def test_metric_explicit(self):
from google.cloud.logging import Metric
creds = _make_credentials()
client_obj = self._make_one(project=self.PROJECT, credentials=creds)
metric = client_obj.metric(
self.METRIC_NAME, filter_=self.FILTER, description=self.DESCRIPTION
)
self.assertIsInstance(metric, Metric)
self.assertEqual(metric.name, self.METRIC_NAME)
self.assertEqual(metric.filter_, self.FILTER)
self.assertEqual(metric.description, self.DESCRIPTION)
self.assertIs(metric.client, client_obj)
self.assertEqual(metric.project, self.PROJECT)
def test_list_metrics_no_paging(self):
from google.cloud.logging import Metric
metrics = [
{
"name": self.METRIC_NAME,
"filter": self.FILTER,
"description": self.DESCRIPTION,
}
]
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False
)
returned = {"metrics": metrics}
client._connection = _Connection(returned)
# Execute request.
iterator = client.list_metrics()
metrics = list(iterator)
# Check the metrics returned.
self.assertEqual(len(metrics), 1)
metric = metrics[0]
self.assertIsInstance(metric, Metric)
self.assertEqual(metric.name, self.METRIC_NAME)
self.assertEqual(metric.filter_, self.FILTER)
self.assertEqual(metric.description, self.DESCRIPTION)
self.assertIs(metric.client, client)
# Verify mocked transport.
called_with = client._connection._called_with
path = "/projects/%s/metrics" % (self.PROJECT,)
self.assertEqual(
called_with, {"method": "GET", "path": path, "query_params": {}}
)
def test_list_metrics_with_paging(self):
from google.cloud.logging import Metric
token = "TOKEN"
next_token = "T00KEN"
page_size = 42
metrics = [
{
"name": self.METRIC_NAME,
"filter": self.FILTER,
"description": self.DESCRIPTION,
}
]
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False
)
returned = {"metrics": metrics, "nextPageToken": next_token}
client._connection = _Connection(returned)
# Execute request.
iterator = client.list_metrics(page_size=page_size, page_token=token)
page = next(iterator.pages)
metrics = list(page)
# First check the token.
self.assertEqual(iterator.next_page_token, next_token)
# Then check the metrics returned.
self.assertEqual(len(metrics), 1)
metric = metrics[0]
self.assertIsInstance(metric, Metric)
self.assertEqual(metric.name, self.METRIC_NAME)
self.assertEqual(metric.filter_, self.FILTER)
self.assertEqual(metric.description, self.DESCRIPTION)
self.assertIs(metric.client, client)
# Verify mocked transport.
called_with = client._connection._called_with
path = "/projects/%s/metrics" % (self.PROJECT,)
self.assertEqual(
called_with,
{
"method": "GET",
"path": path,
"query_params": {"pageSize": page_size, "pageToken": token},
},
)
def test_get_default_handler_app_engine(self):
import os
from google.cloud._testing import _Monkey
from google.cloud.logging_v2.handlers._monitored_resources import _GAE_ENV_VARS
from google.cloud.logging.handlers import AppEngineHandler
credentials = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=credentials, _use_grpc=False
)
gae_env_vars = {var: "TRUE" for var in _GAE_ENV_VARS}
with _Monkey(os, environ=gae_env_vars):
handler = client.get_default_handler()
handler.transport.worker.stop()
self.assertIsInstance(handler, AppEngineHandler)
def test_get_default_handler_container_engine(self):
from google.cloud.logging.handlers import ContainerEngineHandler
credentials = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=credentials, _use_grpc=False
)
patch = mock.patch(
"google.cloud.logging_v2.handlers._monitored_resources.retrieve_metadata_server",
return_value="test-gke-cluster",
)
with patch:
handler = client.get_default_handler()
self.assertIsInstance(handler, ContainerEngineHandler)
def test_get_default_handler_general(self):
import io
from google.cloud.logging.handlers import CloudLoggingHandler
from google.cloud.logging import Resource
name = "test-logger"
resource = Resource("resource_type", {"resource_label": "value"})
labels = {"handler_label": "value"}
stream = io.BytesIO()
credentials = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=credentials, _use_grpc=False
)
handler = client.get_default_handler(
name=name, resource=resource, labels=labels, stream=stream
)
handler.transport.worker.stop()
self.assertIsInstance(handler, CloudLoggingHandler)
self.assertEqual(handler.name, name)
self.assertEqual(handler.resource, resource)
self.assertEqual(handler.labels, labels)
def test_setup_logging(self):
from google.cloud.logging.handlers import CloudLoggingHandler
credentials = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=credentials, _use_grpc=False
)
with mock.patch("google.cloud.logging_v2.client.setup_logging") as mocked:
client.setup_logging()
self.assertEqual(len(mocked.mock_calls), 1)
_, args, kwargs = mocked.mock_calls[0]
(handler,) = args
self.assertIsInstance(handler, CloudLoggingHandler)
handler.transport.worker.stop()
expected_kwargs = {
"excluded_loggers": (
"google.cloud",
"google.auth",
"google_auth_httplib2",
"google.api_core.bidi",
"werkzeug",
),
"log_level": 20,
}
self.assertEqual(kwargs, expected_kwargs)
def test_setup_logging_w_extra_kwargs(self):
import io
from google.cloud.logging.handlers import CloudLoggingHandler
from google.cloud.logging import Resource
name = "test-logger"
resource = Resource("resource_type", {"resource_label": "value"})
labels = {"handler_label": "value"}
stream = io.BytesIO()
credentials = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=credentials, _use_grpc=False
)
with mock.patch("google.cloud.logging_v2.client.setup_logging") as mocked:
client.setup_logging(
name=name, resource=resource, labels=labels, stream=stream
)
self.assertEqual(len(mocked.mock_calls), 1)
_, args, kwargs = mocked.mock_calls[0]
(handler,) = args
self.assertIsInstance(handler, CloudLoggingHandler)
self.assertEqual(handler.name, name)
self.assertEqual(handler.resource, resource)
self.assertEqual(handler.labels, labels)
handler.transport.worker.stop()
expected_kwargs = {
"excluded_loggers": (
"google.cloud",
"google.auth",
"google_auth_httplib2",
"google.api_core.bidi",
"werkzeug",
),
"log_level": 20,
}
self.assertEqual(kwargs, expected_kwargs)
class _Connection(object):
_called_with = None
def __init__(self, *responses):
self._responses = responses
def api_request(self, **kw):
self._called_with = kw
response, self._responses = self._responses[0], self._responses[1:]
return response
|
apache-2.0
| 7,038,636,927,435,961,000 | 34.145329 | 93 | 0.597059 | false |
MarkusHackspacher/pyfootballmngr
|
tests/test_pep8.py
|
1
|
1858
|
# -*- coding: utf-8 -*-
# pyfootballmngr
# Copyright (C) <2015> Markus Hackspacher
# This file is part of pyfootballmngr.
# pyfootballmngr is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pyfootballmngr is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pyfootballmngr. If not, see <http://www.gnu.org/licenses/>.
import unittest
import pep8
class TestCodeFormat(unittest.TestCase):
"""
Test of the code format
"""
def test_pep8_conformance(self):
"""Test that code conform to PEP8."""
pep8style = pep8.StyleGuide(quiet=False)
result = pep8style.check_files(['pyfootballmngr.py',
'tests/test_pep8.py',
'modules/main.py',
'modules/datahandler.py',
'modules/gui/main_window.py',
'modules/gui/dialogs/new_match.py',
'modules/gui/dialogs/new_player.py',
'modules/gui/dialogs/update_match.py',
'modules/gui/dialogs/update_player.py',
])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
| -8,170,530,810,372,039,000 | 37.708333 | 79 | 0.571044 | false |
seribits/seriauth
|
seriauth/api/v1/superusers/views.py
|
1
|
3886
|
# -*- encoding: utf-8 -*-
from flask import make_response, request
from flask_restful import Api, Resource
from .. import blueprint_superusers
from ...lib.encrypt import encrypt_sha512
from ...lib.errors import error_410, error_422, error_500
from ...lib.regex_validators import validate_password
from .models import Superuser, SuperuserSchema
schema = SuperuserSchema()
api = Api(blueprint_superusers)
# Recurso de Superusers
class SuperusersList(Resource):
"""Recibe las peticiones [GET] del recurso superusers."""
def get(self):
"""Obtiene un arreglo de Superusers."""
try:
# Consulta de todos los Superusers
query_set = Superuser.query.all()
# Serializamos el query set indicando con many que es un array
res = schema.dump(query_set, many=True).data
return res, 200
except Exception as e:
# Excepción si falla la conexión
return error_500()
class SuperuserDetail(Resource):
"""Recibe las peticiones [GET,PUT,DELETE] del recurso superusers."""
def get(self, id):
"""Devuelve al Superuser con <id>.
Parametros:
id -- Entero
"""
try:
# Consulta del Superuser con <id>
query_set = Superuser.query.get(id)
if query_set is None:
return error_410()
else:
# Selización del query set
res = schema.dump(query_set).data
return res, 200
except Exception as e:
# Exception si falla la conexión
return error_500()
def put(self, id):
"""Actualiza al Superuser con <id>.
Parametros:
id -- Entero
"""
# Valida que la petición sea <application/json>
if request.content_type != "application/json":
err = {"content_type": ["Se esperaba application/json"]}
return error_422(err)
else:
json_data = request.get_json(force=True)
# Obtiene la información del request
if not json_data:
err = {"datos": ["Información insuficientes."]}
return error_422(err)
# validamos y deserializamos el request
data, errors = schema.load(json_data)
if errors:
return error_422(errors)
try:
superuser = Superuser.query.get(id)
if superuser is None:
return error_410()
username, email, password = (
data['username'], data['email'], data['password']
)
pw_validate = validate_password(password)
if not pw_validate:
err = {"password": ["La contraseña no es válida."]}
return error_422(err)
password_sha = encrypt_sha512(password, 10000, 10)
setattr(superuser, 'username', username)
setattr(superuser, 'email', email)
setattr(superuser, 'password', password_sha)
superuser.update()
return self.get(id)
except Exception as e:
return error_500()
def delete(self, id):
"""Elimina al Superuser con <id>.
Parametros:
id -- Entero
"""
try:
superuser = Superuser.query.get(id)
if superuser is None:
return error_410()
else:
superuser.delete(superuser)
res = make_response()
res.status_code = 204
return res
except Exception as e:
# Excepción si falla la conexión
return error_500()
api.add_resource(SuperusersList, '/superusers')
api.add_resource(SuperuserDetail, '/superusers/<int:id>')
|
gpl-3.0
| 7,559,758,148,637,709,000 | 32.119658 | 74 | 0.547355 | false |
IamJeffG/geopandas
|
geopandas/io/tests/test_io.py
|
1
|
1794
|
from __future__ import absolute_import
import fiona
from geopandas import read_postgis, read_file
from geopandas.tests.util import download_nybb, connect, create_db, \
PANDAS_NEW_SQL_API, unittest, validate_boro_df
class TestIO(unittest.TestCase):
def setUp(self):
nybb_filename, nybb_zip_path = download_nybb()
vfs = 'zip://' + nybb_filename
self.df = read_file(nybb_zip_path, vfs=vfs)
with fiona.open(nybb_zip_path, vfs=vfs) as f:
self.crs = f.crs
def test_read_postgis_default(self):
con = connect('test_geopandas')
if con is None or not create_db(self.df):
raise unittest.case.SkipTest()
try:
sql = "SELECT * FROM nybb;"
df = read_postgis(sql, con)
finally:
if PANDAS_NEW_SQL_API:
# It's not really a connection, it's an engine
con = con.connect()
con.close()
validate_boro_df(self, df)
def test_read_postgis_custom_geom_col(self):
con = connect('test_geopandas')
if con is None or not create_db(self.df):
raise unittest.case.SkipTest()
try:
sql = """SELECT
borocode, boroname, shape_leng, shape_area,
geom AS __geometry__
FROM nybb;"""
df = read_postgis(sql, con, geom_col='__geometry__')
finally:
if PANDAS_NEW_SQL_API:
# It's not really a connection, it's an engine
con = con.connect()
con.close()
validate_boro_df(self, df)
def test_read_file(self):
df = self.df.rename(columns=lambda x: x.lower())
validate_boro_df(self, df)
self.assert_(df.crs == self.crs)
|
bsd-3-clause
| 5,256,483,910,449,221,000 | 31.035714 | 69 | 0.552954 | false |
fosfataza/protwis
|
ligand/models.py
|
1
|
14776
|
from django.db import models
from django.utils.text import slugify
from django.db import IntegrityError
from common.models import WebResource
from common.models import WebLink
from common.tools import fetch_from_web_api
from urllib.request import urlopen, quote
import json
import yaml
import logging
class Ligand(models.Model):
properities = models.ForeignKey('LigandProperities', on_delete=models.CASCADE)
name = models.TextField()
canonical = models.NullBooleanField()
ambigious_alias = models.NullBooleanField() #required to flag 'safe' alias, eg one parent
def __str__(self):
return self.name
class Meta():
db_table = 'ligand'
unique_together = ('name', 'canonical')
def load_by_gtop_id(self, ligand_name, gtop_id, ligand_type):
logger = logging.getLogger('build')
# get the data from cache or web services
cache_dir = ['guidetopharmacology', 'ligands']
url = 'http://www.guidetopharmacology.org/services/ligands/$index'
gtop = fetch_from_web_api(url, gtop_id, cache_dir)
if gtop:
# get name from response
ligand_name = gtop['name']
# does a ligand by this name already exists?
try:
existing_ligand = Ligand.objects.get(name=ligand_name, canonical=True)
return existing_ligand
except Ligand.DoesNotExist:
web_resource = False
if gtop_id:
# gtoplig webresource
web_resource = WebResource.objects.get(slug='gtoplig')
return self.update_ligand(ligand_name, {}, ligand_type, web_resource, gtop_id)
def load_from_pubchem(self, lookup_type, pubchem_id, ligand_type, ligand_title=False):
logger = logging.getLogger('build')
# if ligand title is specified, use that as the name
if ligand_title:
ligand_name = ligand_title
# otherwise, fetch ligand name from pubchem
else:
# check cache
cache_dir = ['pubchem', 'cid', 'synonyms']
url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/{}/$index/synonyms/json'.format(lookup_type)
pubchem = fetch_from_web_api(url, pubchem_id, cache_dir)
##print (pubchem)
# get name from response
try:
ligand_name = pubchem['InformationList']['Information'][0]['Synonym'][0]
except:
## Some compounds do not have a name but are still a valid pubchem entry. (Peptides)
logger.warning('Ligand {} does not have a name in PubChem'.format(pubchem_id))
ligand_name = lookup_type + ' ' + pubchem_id
# return None
# fetch ligand properties from pubchem
properties = {}
# check cache
cache_dir = ['pubchem', 'cid', 'property']
url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/{}/$index/property/CanonicalSMILES,InChIKey,MolecularWeight,HBondDonorCount,HBondAcceptorCount,XLogP,RotatableBondCount/json'.format(lookup_type)
pubchem = fetch_from_web_api(url, pubchem_id, cache_dir)
# get properties from response
if pubchem==False:
logger.warning('Ligand {} not found in PubChem'.format(pubchem_id))
return None
if pubchem['PropertyTable']['Properties'][0]:
if 'HBondAcceptorCount' in pubchem['PropertyTable']['Properties'][0] :
properties['hacc'] = pubchem['PropertyTable']['Properties'][0]['HBondAcceptorCount']
if 'HBondDonorCount' in pubchem['PropertyTable']['Properties'][0] :
properties['hdon'] = pubchem['PropertyTable']['Properties'][0]['HBondDonorCount']
if 'XLogP' in pubchem['PropertyTable']['Properties'][0] :
properties['logp'] = pubchem['PropertyTable']['Properties'][0]['XLogP']
if 'RotatableBondCount' in pubchem['PropertyTable']['Properties'][0] :
properties['rotatable_bonds'] = pubchem['PropertyTable']['Properties'][0]['RotatableBondCount']
if 'MolecularWeight' in pubchem['PropertyTable']['Properties'][0] :
properties['mw'] = pubchem['PropertyTable']['Properties'][0]['MolecularWeight']
try:
properties['smiles'] = pubchem['PropertyTable']['Properties'][0]['CanonicalSMILES']
properties['inchikey'] = pubchem['PropertyTable']['Properties'][0]['InChIKey']
except:
logger.warning('Ligand {} not found in PubChem'.format(pubchem_id))
return None
# pubchem webresource
web_resource = WebResource.objects.get(slug='pubchem')
#print (web_resource)
# does a ligand with this canonical name already exist
try:
return Ligand.objects.get(name=ligand_name, canonical=True)
# FIXME check inchikey
except Ligand.DoesNotExist:
pass # continue
# does a (canonical) ligand with this inchikey already exist?
try:
existing_lp = LigandProperities.objects.get(inchikey=properties['inchikey'])
self.properities = existing_lp
self.name = ligand_name
self.canonical = False
self.ambigious_alias = False
try:
self.save()
return self
except IntegrityError:
return Ligand.objects.get(name=ligand_name, canonical=False)
except LigandProperities.DoesNotExist:
return self.update_ligand(ligand_name, properties, ligand_type, web_resource, pubchem_id)
def update_ligand(self, ligand_name, properties, ligand_type, web_resource=False, web_resource_index=False):
lp = LigandProperities.objects.create(ligand_type=ligand_type)
# assign properties
for prop in properties:
setattr(lp, prop, properties[prop])
# assign web link
if web_resource and web_resource_index:
try:
wl, created = WebLink.objects.get_or_create(index=web_resource_index, web_resource=web_resource)
except IntegrityError:
wl = Weblink.objects.get(index=web_resource_index, web_resource=web_resource)
lp.web_links.add(wl)
# try saving the properties, catch IntegrityErrors due to concurrent processing
try:
lp.save()
except IntegrityError:
lp = LigandProperities.objects.get(inchikey=properties['inchikey'])
self.name = ligand_name
self.canonical = True
self.ambigious_alias = False
self.properities = lp
try:
self.save()
return self
except IntegrityError:
return Ligand.objects.get(name=ligand_name, canonical=True)
def load_by_name(self, name):
logger = logging.getLogger('build')
# fetch ligand info from pubchem - start by getting name and 'canonical' name
pubchem_url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/name/' + name + '/synonyms/TXT'
if self.properities.inchikey: #if inchikey has been added use this -- especially to avoid updating a wrong inchikey to a synonym.
pubchem_url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/InchiKey/' + self.properities.inchikey + '/synonyms/TXT'
try:
req = urlopen(pubchem_url)
pubchem = req.read().decode('UTF-8').splitlines()
pubchem_name = pubchem[0]
except: #name not matched in pubchem
if self.properities.inchikey: #if inchikey has been added for check this
pubchem_url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/InchiKey/' + self.properities.inchikey + '/synonyms/TXT'
try:
req = urlopen(pubchem_url)
pubchem = req.read().decode('UTF-8').splitlines()
pubchem_name = pubchem[0]
except: #name not matched in pubchem - exit cos something is wrong
logger.info('Ligand not found by InchiKey in pubchem: ' + str(self.properities.inchikey))
return
else: #if name not found and no inchikey, then no point in looking further
logger.info('Ligand not found in pubchem by name (Consider renaming): ' + str(name))
return
pubchem_url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/name/' + quote(pubchem_name) + '/property/CanonicalSMILES,InChIKey/json'
if self.properities.inchikey:
pubchem_url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/inChiKey/' + self.properities.inchikey + '/property/CanonicalSMILES,InChIKey/json'
try:
req = urlopen(pubchem_url)
pubchem = json.loads(req.read().decode('UTF-8'))
except: #JSON failed
return
# weblink
pubchem_id = pubchem['PropertyTable']['Properties'][0]['CID']
try:
web_resource = WebResource.objects.get(slug='pubchem')
except:
# abort if pdb resource is not found
raise Exception('PubChem resource not found, aborting!')
pubchem_inchikey = ''
pubchem_smiles = ''
# SMILES
pubchem_smiles = pubchem['PropertyTable']['Properties'][0]['CanonicalSMILES']
# InChIKey
pubchem_inchikey = pubchem['PropertyTable']['Properties'][0]['InChIKey']
try: #now that we have inchikey, try and see if it exists in DB
existing_lp = LigandProperities.objects.get(inchikey=pubchem_inchikey)
self.properities = existing_lp
except:
wl, created = WebLink.objects.get_or_create(index=pubchem_id, web_resource=web_resource)
self.properities.web_links.add(wl)
# ligand type
self.properities.ligand_type, created = LigandType.objects.get_or_create(slug='sm', defaults={'name':'Small molecule'})
self.properities.inchikey = pubchem_inchikey
self.properities.smiles = pubchem_smiles
self.properities.save()
if pubchem_name.lower()!=name.lower(): #if not canonical name
logger.info("Updating canonical flag to Pubchem. PubChem canonical: "+pubchem_name +". DB canonical: "+ name)
self.canonical = False
try:
self.save()
except IntegrityError:
logger.error("FAILED SAVING LIGAND, duplicate?")
canonical_entry = Ligand.objects.filter(name=pubchem_name, properities__inchikey=pubchem_inchikey) #NEED TO CHECK BY INCHI KEY - SOME CANONICAL NAMES HAVE MANY ICHIKEYS (DOXEPIN)
if canonical_entry.exists():
return
else: #insert the 'canonical' entry
try:
canonical_entry = Ligand()
canonical_entry.name = pubchem_name
canonical_entry.canonical = True
canonical_entry.properities = self.properities
canonical_entry.save()
except IntegrityError:
logger.error("FAILED SAVING CANONICAL LIGAND, duplicate? "+pubchem_name+" "+name)
print("FAILED SAVING CANONICAL LIGAND, duplicate? "+pubchem_name+" "+name)
class LigandProperities(models.Model):
ligand_type = models.ForeignKey('LigandType', null=True, on_delete=models.CASCADE)
web_links = models.ManyToManyField('common.WebLink')
#vendor_links = models.ManyToManyField('common.WebLink', related_name='vendors')
smiles = models.TextField(null=True)
inchikey = models.CharField(max_length=50, null=True, unique=True)
#vendors = models.ManyToManyField('LigandVenderLink')
mw = models.DecimalField(max_digits=15, decimal_places=3, null=True)
rotatable_bonds = models.SmallIntegerField(null=True)
hacc = models.SmallIntegerField( null=True)
hdon = models.SmallIntegerField( null=True)
logp = models.DecimalField(max_digits=10, decimal_places=3, null=True)
def __str__(self):
return self.inchikey
class Meta():
db_table = 'ligand_properities'
class LigandType(models.Model):
slug = models.SlugField(max_length=20, unique=True)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta():
db_table = 'ligand_type'
class LigandRole(models.Model):
slug = models.SlugField(max_length=50, unique=True)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta():
db_table = 'ligand_role'
class ChemblAssay(models.Model):
#slug = models.SlugField(max_length=50, unique=True)
web_links = models.ManyToManyField('common.WebLink')
assay_id = models.CharField(max_length=50, unique = True)
def __str__(self):
return self.assay_id
class Meta():
db_table = 'chembl_assays'
class AssayExperiment(models.Model):
ligand = models.ForeignKey('Ligand', on_delete=models.CASCADE)
protein = models.ForeignKey('protein.Protein', on_delete=models.CASCADE)
assay = models.ForeignKey('ChemblAssay', on_delete=models.CASCADE)
assay_type = models.CharField(max_length=10)
assay_description = models.TextField(max_length=1000)
pchembl_value = models.DecimalField(max_digits=9, decimal_places=3)
published_value = models.DecimalField(max_digits=9, decimal_places=3)
published_relation = models.CharField(max_length=10)
published_type = models.CharField(max_length=20)
published_units = models.CharField(max_length=20)
standard_value = models.DecimalField(max_digits=9, decimal_places=3)
standard_relation = models.CharField(max_length=10)
standard_type = models.CharField(max_length=20)
standard_units = models.CharField(max_length=20)
class Meta():
unique_together = ('ligand', 'protein', 'assay')
class LigandVendors(models.Model):
slug = models.SlugField(max_length=100, unique=True)
name = models.CharField(max_length=200, default='')
url = models.TextField(null=True)
class LigandVendorLink(models.Model):
vendor = models.ForeignKey('LigandVendors', on_delete=models.CASCADE)
lp = models.ForeignKey('LigandProperities', related_name='vendors', on_delete=models.CASCADE)
url = models.CharField(max_length=300) #SourceRecordURL
vendor_external_id = models.CharField(max_length=300) #RegistryID
sid = models.CharField(max_length=200, unique=True) #SID
|
apache-2.0
| -3,696,227,186,216,180,700 | 41.338109 | 211 | 0.623782 | false |
florianfesti/boxes
|
boxes/generators/folder.py
|
1
|
1599
|
#!/usr/bin/env python3
# Copyright (C) 2013-2014 Florian Festi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from boxes import *
import math
class Folder(Boxes):
"""Book cover with flex for the spine"""
def __init__(self):
Boxes.__init__(self)
self.addSettingsArgs(edges.FingerJointSettings)
self.buildArgParser("x", "y", "h")
self.argparser.add_argument(
"--r", action="store", type=float, default=10.0,
help="radius of the corners")
self.argparser.set_defaults(h=20)
def render(self):
x, y, r, h = self.x, self.y, self.r, self.h
c2 = math.pi * h
self.moveTo(r + self.thickness, self.thickness)
self.edge(x - r)
self.edges["X"](c2, y)
self.edge(x - r)
self.corner(90, r)
self.edge(y - 2 * r)
self.corner(90, r)
self.edge(2 * x - 2 * r + c2)
self.corner(90, r)
self.edge(y - 2 * r)
self.corner(90, r)
|
gpl-3.0
| 2,576,006,453,972,368,400 | 31.632653 | 73 | 0.623515 | false |
jkyeung/XlsxWriter
|
xlsxwriter/test/comparison/test_chart_stock01.py
|
1
|
2175
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_stock01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/charts/chart1.xml': ['<c:formatCode']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'stock'})
date_format = workbook.add_format({'num_format': 14})
chart.axis_ids = [40522880, 40524416]
data = [
[39083, 39084, 39085, 39086, 39087],
[27.2, 25.03, 19.05, 20.34, 18.5],
[23.49, 19.55, 15.12, 17.84, 16.34],
[25.45, 23.05, 17.32, 20.45, 17.34],
]
for row in range(5):
worksheet.write(row, 0, data[0][row], date_format)
worksheet.write(row, 1, data[1][row])
worksheet.write(row, 2, data[2][row])
worksheet.write(row, 3, data[3][row])
worksheet.set_column('A:D', 11)
chart.add_series({'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
})
chart.add_series({'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
chart.add_series({'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$D$1:$D$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
bsd-2-clause
| -961,379,040,412,736,800 | 29.208333 | 79 | 0.511724 | false |
antoinevg/survival
|
ontology/human.py
|
1
|
1096
|
#import sys
#import cairo
#import math
import rsvg
from random import random # for myfn
import widgets
#from widgets.texteditor import TextEditor
#from core.mathlib import MathLib
#from core.world import TheWorld
from ontology.movingthing import MovingThing
SIZE = 0.05 # scaling constant
class Human(MovingThing):
def __init__(self):
MovingThing.__init__(self)
self.width = SIZE
self.height = SIZE
r = random()
if r < 0.5:
self.svg = rsvg.Handle('resources/girl.svg')
self.svg_rotate = 90.0
else:
self.svg = rsvg.Handle('resources/boy.svg')
self.svg_rotate = 90.0
self.code = """\
d = self.look()
if d:
self.right(d)
self.forward(0.008)
elif random() > 0.5:
self.left(5.0)
self.forward(0.012)
else:
self.right(5.0)
self.forward(0.012)"""
# default behaviour
def myfn(self, arg):
d = self.look()
if d:
self.right(d)
self.forward(0.009)
elif random() > 0.5:
self.left(4.0)
self.forward(0.010)
else:
self.right(4.0)
self.forward(0.010)
|
gpl-2.0
| 1,649,486,439,531,439,600 | 17.896552 | 50 | 0.614051 | false |
pombredanne/DjangoRestMultipleModels
|
drf_multiple_model/mixins.py
|
1
|
6074
|
from rest_framework.response import Response
class MultipleModelMixin(object):
"""
Create a list of objects from multiple models/serializers.
Mixin is expecting the view will have a queryList variable, which is
a list/tuple of queryset/serailizer pairs, like as below:
queryList = [
(querysetA,serializerA),
(querysetB,serializerB),
(querysetC,serializerC),
.....
]
optionally, you can add a third element to the queryList,
a label to define that particular data type:
queryList = [
(querysetA,serializerA,'labelA'),
(querysetB,serializerB,'labelB'),
(querysetC,serializerC),
.....
]
"""
objectify = False
queryList = None
# Flag to determine whether to mix objects together or keep them distinct
flat = False
paginating_label = None
# Optional keyword to sort flat lasts by given attribute
# note that the attribute must by shared by ALL models
sorting_field = None
# Flag to append the particular django model being used to the data
add_model_type = True
def get_queryList(self):
assert self.queryList is not None, (
"'%s' should either include a `queryList` attribute, "
"or override the `get_queryList()` method."
% self.__class__.__name__
)
queryList = self.queryList
qlist = []
for query in queryList:
if not isinstance(query, Query):
query = Query.new_from_tuple(query)
qs = query.queryset.all()
query.queryset = qs
qlist.append(query)
return qlist
def paginate_queryList(self, queryList):
"""
Wrapper for pagination function.
By default it just calls paginate_queryset, but can be overwritten for custom functionality
"""
return self.paginate_queryset(queryList)
def list(self, request, *args, **kwargs):
queryList = self.get_queryList()
results = {} if self.objectify else []
# Iterate through the queryList, run each queryset and serialize the data
for query in queryList:
if not isinstance(query, Query):
query = Query.new_from_tuple(query)
# Run the queryset through Django Rest Framework filters
queryset = self.filter_queryset(query.queryset)
# If there is a user-defined filter, run that too.
if query.filter_fn is not None:
queryset = query.filter_fn(queryset, request, *args, **kwargs)
# Run the paired serializer
context = self.get_serializer_context()
data = query.serializer(queryset, many=True, context=context).data
results = self.format_data(data, query, results)
if self.flat:
# Sort by given attribute, if sorting_attribute is provided
if self.sorting_field:
results = self.queryList_sort(results)
# Return paginated results if pagination is enabled
page = self.paginate_queryList(results)
if page is not None:
return self.get_paginated_response(page)
if request.accepted_renderer.format == 'html':
return Response({'data': results})
return Response(results)
# formats the serialized data based on various view properties (e.g. flat=True)
def format_data(self, new_data, query, results):
# Get the label, unless add_model_type is note set
label = None
if query.label is not None:
label = query.label
else:
if self.add_model_type:
label = query.queryset.model.__name__.lower()
if self.flat and self.objectify:
raise RuntimeError("Cannot objectify data with flat=True. Try to use flat=False")
# if flat=True, Organize the data in a flat manner
elif self.flat:
for datum in new_data:
if label:
datum.update({'type': label})
results.append(datum)
# if objectify=True, Organize the data in an object
elif self.objectify:
if not label:
raise RuntimeError("Cannot objectify data. Try to use objectify=False")
# Get paginated data for selected label, if paginating_label is provided
if label == self.paginating_label:
paginated_results = self.get_paginated_response(new_data).data
paginated_results.pop("results", None)
results.update(paginated_results)
results[label] = new_data
# Otherwise, group the data by Model/Queryset
else:
if label:
new_data = {label: new_data}
results.append(new_data)
return results
# Sort based on the given sorting field property
def queryList_sort(self, results):
"""
determing if sort is ascending or descending
based on the presence of '-' at the beginning of the
sorting_field attribute
"""
sorting_field = self.sorting_field
sort_descending = self.sorting_field[0] == '-'
# Remove the '-' if sort descending
if sort_descending:
sorting_field = sorting_field[1:len(sorting_field)]
return sorted(
results,
reverse=sort_descending,
key=lambda datum: datum[sorting_field]
)
class Query(object):
def __init__(self, queryset, serializer, label=None, filter_fn=None, ):
self.queryset = queryset
self.serializer = serializer
self.filter_fn = filter_fn
self.label = label
@classmethod
def new_from_tuple(cls, tuple_):
try:
queryset, serializer, label = tuple_
except ValueError:
queryset, serializer = tuple_
label = None
query = Query(queryset, serializer, label)
return query
|
mit
| 1,078,349,059,939,249,000 | 31.137566 | 99 | 0.594666 | false |
sumihai-tekindo/helpdesk_sicepat
|
helpdesk_waybill/__openerp__.py
|
1
|
1962
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2017 Sicepat Ekspres Indonesia (<http://www.sicepat.com>).
# @author: - Pambudi Satria <pambudi.satria@yahoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Helpdesk Waybill',
'summary': 'Helpdesk Waybill',
'description': """
""",
'author': 'Pambudi Satria',
'website': "https://github.com/sumihai-tekindo/",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Helpdesk',
'version': '8.0.0.1.0',
# any module necessary for this one to work correctly
'depends': [
'base_external_dbsource',
'helpdesk_sicepat',
],
'external dependencies': {'python': ['requests']},
# always loaded
'data': [
'views/res_config_view.xml',
'views/helpdesk_views.xml',
],
# only loaded in demonstration mode
'demo': [],
'images': [],
'qweb': [],
'installable': True,
'application': False,
}
|
gpl-3.0
| 3,049,939,936,751,258,000 | 31.862069 | 95 | 0.575943 | false |
openstack/watcher-dashboard
|
watcher_dashboard/content/goals/views.py
|
1
|
4470
|
# Copyright (c) 2016 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.translation import ugettext_lazy as _
import horizon.exceptions
import horizon.tables
import horizon.tabs
from horizon.utils import memoized
import horizon.workflows
from watcher_dashboard.api import watcher
from watcher_dashboard.content.goals import tables
from watcher_dashboard.content.goals import tabs as wtabs
from watcher_dashboard.content.strategies import tables as strategies_tables
LOG = logging.getLogger(__name__)
class IndexView(horizon.tables.DataTableView):
table_class = tables.GoalsTable
template_name = 'infra_optim/goals/index.html'
page_title = _("Goals")
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['goals_count'] = self.get_goals_count()
return context
def get_data(self):
goals = []
search_opts = self.get_filters()
try:
goals = watcher.Goal.list(self.request, **search_opts)
except Exception:
horizon.exceptions.handle(
self.request,
_("Unable to retrieve goal information."))
return goals
def get_goals_count(self):
return len(self.get_data())
def get_filters(self):
filters = {}
filter_action = self.table._meta._filter_action
if filter_action:
filter_field = self.table.get_filter_field()
if filter_action.is_api_filter(filter_field):
filter_string = self.table.get_filter_string()
if filter_field and filter_string:
filters[filter_field] = filter_string
return filters
class DetailView(horizon.tables.MultiTableView):
table_classes = (tables.EfficacySpecificationTable,
strategies_tables.RelatedStrategiesTable)
tab_group_class = wtabs.GoalDetailTabs
template_name = 'infra_optim/goals/details.html'
redirect_url = 'horizon:admin:goals:index'
page_title = _("Goal Details: {{ goal.name }}")
@memoized.memoized_method
def _get_data(self):
goal_uuid = None
try:
goal_uuid = self.kwargs['goal_uuid']
goal = watcher.Goal.get(self.request, goal_uuid)
except Exception as exc:
LOG.exception(exc)
msg = _('Unable to retrieve details for goal "%s".') \
% goal_uuid
horizon.exceptions.handle(
self.request, msg,
redirect=self.redirect_url)
return goal
def get_related_strategies_data(self):
try:
goal = self._get_data()
strategies = watcher.Strategy.list(self.request, goal=goal.uuid)
except Exception as exc:
LOG.exception(exc)
strategies = []
msg = _('Strategy list cannot be retrieved.')
horizon.exceptions.handle(self.request, msg)
return strategies
def get_efficacy_specification_data(self):
try:
goal = self._get_data()
indicators_spec = [watcher.EfficacyIndicatorSpec(spec)
for spec in goal.efficacy_specification]
except Exception as exc:
LOG.exception(exc)
indicators_spec = []
msg = _('Efficacy specification cannot be retrieved.')
horizon.exceptions.handle(self.request, msg)
return indicators_spec
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
goal = self._get_data()
context["goal"] = goal
return context
def get_tabs(self, request, *args, **kwargs):
goal = self._get_data()
# ports = self._get_ports()
return self.tab_group_class(request, goal=goal,
# ports=ports,
**kwargs)
|
apache-2.0
| -7,534,486,794,679,372,000 | 33.651163 | 76 | 0.623714 | false |
JulyKikuAkita/PythonPrac
|
cs15211/NumbersWithSameConsecutiveDifferences.py
|
1
|
3415
|
__source__ = 'https://leetcode.com/problems/numbers-with-same-consecutive-differences/'
# Time: O(2^N)
# Space: O(2^N)
#
# Description: Leetcode # 967. Numbers With Same Consecutive Differences
#
# Return all non-negative integers of length N such that
# the absolute difference between every two consecutive digits is K.
#
# Note that every number in the answer must not have leading zeros except for the number 0 itself.
# For example, 01 has one leading zero and is invalid, but 0 is valid.
#
# You may return the answer in any order.
#
# Example 1:
#
# Input: N = 3, K = 7
# Output: [181,292,707,818,929]
# Explanation: Note that 070 is not a valid number, because it has leading zeroes.
#
# Example 2:
#
# Input: N = 2, K = 1
# Output: [10,12,21,23,32,34,43,45,54,56,65,67,76,78,87,89,98]
#
# Note:
# 1 <= N <= 9
# 0 <= K <= 9
#
import unittest
# 76ms 25.71%
class Solution(object):
def numsSameConsecDiff(self, N, K):
"""
:type N: int
:type K: int
:rtype: List[int]
"""
ans = {x for x in range(1, 10)}
for _ in xrange(N-1):
ans2 = set()
for x in ans:
d = x % 10
if d - K >= 0:
ans2.add(10*x + d-K)
if d + K <= 9:
ans2.add(10*x + d+K)
ans = ans2
if N == 1:
ans.add(0)
return list(ans)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/numbers-with-same-consecutive-differences/solution/
#
Approach 1: Brute Force
Complexity Analysis
Time Complexity: O(2^N)
Space Complexity: O(2^N)
# 14ms 44.79%
class Solution {
public int[] numsSameConsecDiff(int N, int K) {
Set<Integer> cur = new HashSet();
for (int i = 1; i <= 9; ++i) cur.add(i);
for (int steps = 1; steps <= N-1; ++steps) {
Set<Integer> cur2 = new HashSet();
for (int x: cur) {
int d = x % 10;
if (d - K >= 0)
cur2.add(10 * x + (d - K));
if (d + K <= 9)
cur2.add(10 * x + (d + K));
}
cur = cur2;
}
if (N == 1)
cur.add(0);
int[] ans = new int[cur.size()];
int t = 0;
for (int x: cur)
ans[t++] = x;
return ans;
}
}
# 8ms 93.55%
class Solution {
public int[] numsSameConsecDiff(int N, int K) {
if (N == 1) return new int[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
List<Integer> result = new ArrayList<>();
for (int i = 1; i < 10; i++)
dfs(N, K, 1, i, result);
int[] ans = new int[result.size()];
for (int i = 0; i < ans.length; i++)
ans[i] = result.get(i);
return ans;
}
private void dfs(int N, int K, int index, int num, List<Integer> result) {
if (index == N) {
result.add(num);
return;
}
int pre = num % 10;
if (pre + K >= 0 && pre + K <= 9) {
dfs(N, K, index + 1, num * 10 + pre + K, result);
}
if (pre - K >= 0 && pre - K <= 9) {
if (K == 0) return;
dfs(N, K, index + 1, num * 10 + pre - K, result);
}
return;
}
}
'''
|
apache-2.0
| 8,838,016,209,958,667,000 | 25.679688 | 98 | 0.487848 | false |
vortex-ape/scikit-learn
|
sklearn/kernel_approximation.py
|
4
|
23032
|
"""
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels, KERNEL_PARAMS
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> from sklearn.kernel_approximation import RBFSampler
>>> from sklearn.linear_model import SGDClassifier
>>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
>>> y = [0, 0, 1, 1]
>>> rbf_feature = RBFSampler(gamma=1, random_state=1)
>>> X_features = rbf_feature.fit_transform(X)
>>> clf = SGDClassifier(max_iter=5)
>>> clf.fit(X_features, y)
... # doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None,
early_stopping=False, epsilon=0.1, eta0=0.0, fit_intercept=True,
l1_ratio=0.15, learning_rate='optimal', loss='hinge', max_iter=5,
n_iter=None, n_iter_no_change=5, n_jobs=None, penalty='l2',
power_t=0.5, random_state=None, shuffle=True, tol=None,
validation_fraction=0.1, verbose=0, warm_start=False)
>>> clf.score(X_features, y)
1.0
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> from sklearn.kernel_approximation import SkewedChi2Sampler
>>> from sklearn.linear_model import SGDClassifier
>>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
>>> y = [0, 0, 1, 1]
>>> chi2_feature = SkewedChi2Sampler(skewedness=.01,
... n_components=10,
... random_state=0)
>>> X_features = chi2_feature.fit_transform(X, y)
>>> clf = SGDClassifier(max_iter=10)
>>> clf.fit(X_features, y)
SGDClassifier(alpha=0.0001, average=False, class_weight=None,
early_stopping=False, epsilon=0.1, eta0=0.0, fit_intercept=True,
l1_ratio=0.15, learning_rate='optimal', loss='hinge', max_iter=10,
n_iter=None, n_iter_no_change=5, n_jobs=None, penalty='l2',
power_t=0.5, random_state=None, shuffle=True, tol=None,
validation_fraction=0.1, verbose=0, warm_start=False)
>>> clf.score(X_features, y)
1.0
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features. All values of X must be
strictly greater than "-skewedness".
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X <= -self.skewedness).any():
raise ValueError("X may not contain entries smaller than"
" -skewedness.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.linear_model import SGDClassifier
>>> from sklearn.kernel_approximation import AdditiveChi2Sampler
>>> X, y = load_digits(return_X_y=True)
>>> chi2sampler = AdditiveChi2Sampler(sample_steps=2)
>>> X_transformed = chi2sampler.fit_transform(X, y)
>>> clf = SGDClassifier(max_iter=5, random_state=0)
>>> clf.fit(X_transformed, y)
SGDClassifier(alpha=0.0001, average=False, class_weight=None,
early_stopping=False, epsilon=0.1, eta0=0.0, fit_intercept=True,
l1_ratio=0.15, learning_rate='optimal', loss='hinge', max_iter=5,
n_iter=None, n_iter_no_change=5, n_jobs=None, penalty='l2',
power_t=0.5, random_state=0, shuffle=True, tol=None,
validation_fraction=0.1, verbose=0, warm_start=False)
>>> clf.score(X_transformed, y) # doctest: +ELLIPSIS
0.9543...
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set the parameters
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
coef0 : float, default=None
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
degree : float, default=None
Degree of the polynomial kernel. Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
Examples
--------
>>> from sklearn import datasets, svm
>>> from sklearn.kernel_approximation import Nystroem
>>> digits = datasets.load_digits(n_class=9)
>>> data = digits.data / 16.
>>> clf = svm.LinearSVC()
>>> feature_map_nystroem = Nystroem(gamma=.2,
... random_state=1,
... n_components=300)
>>> data_transformed = feature_map_nystroem.fit_transform(data)
>>> clf.fit(data_transformed, digits.target)
... # doctest: +NORMALIZE_WHITESPACE
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> clf.score(data_transformed, digits.target) # doctest: +ELLIPSIS
0.9987...
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=None, degree=None,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
for param in (KERNEL_PARAMS[self.kernel]):
if getattr(self, param) is not None:
params[param] = getattr(self, param)
else:
if (self.gamma is not None or
self.coef0 is not None or
self.degree is not None):
warnings.warn(
"Passing gamma, coef0 or degree to Nystroem when using a"
" callable kernel is deprecated in version 0.19 and will"
" raise an error in 0.21, as they are ignored. Use "
"kernel_params instead.", DeprecationWarning)
return params
|
bsd-3-clause
| 5,673,052,215,641,753,000 | 35.675159 | 78 | 0.598862 | false |
CitrineInformatics/python-citrination-client
|
citrination_client/models/design/constraints/elemental_inclusion.py
|
1
|
1357
|
from citrination_client.models.design.constraints.base import BaseConstraint
from citrination_client.base.errors import CitrinationClientError
class ElementalInclusionConstraint(BaseConstraint):
"""
Constraint class which allows the assertion that a set of
elements is included in the candidate compositions
"""
def __init__(self, name, elements, logic):
"""
Constructor.
:param name: The name of the column in the data
view to which this constraint should be applied
:type name: str
:param elements: An array of element abbreviations as
strings, e.g. ["Mg", "C"]
:type elements: list of str
:param logic: The logic to apply to the constraint; either
"must", "should", or "exclude"
:type logic: str
"""
bad_logic_msg = "ElementalInclusionConstraint must be initialized with the logic parameter equal to \"must\", \"should\", or \"exclude\""
if logic not in ["must", "should", "exclude"]:
raise CitrinationClientError(bad_logic_msg)
self._name = name
self._type = "elementalInclusionConstraint"
self._elements = elements
self._logic = logic
def options(self):
return {
"elements": self._elements,
"logic": self._logic
}
|
apache-2.0
| -7,337,785,072,410,175,000 | 34.736842 | 145 | 0.627856 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.